1 //! This module implements the [WebAssembly `SIMD128` ISA].
3 //! [WebAssembly `SIMD128` ISA]:
4 //! https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md
6 #![unstable(feature = "wasm_simd", issue = "74372")]
7 #![allow(non_camel_case_types)]
8 #![allow(unused_imports)]
11 core_arch
::{simd::*, simd_llvm::*}
,
18 use stdarch_test
::assert_instr
;
21 /// WASM-specific 128-bit wide SIMD vector type.
22 // N.B., internals here are arbitrary.
23 pub struct v128(i32, i32, i32, i32);
26 #[allow(non_camel_case_types)]
27 #[unstable(feature = "stdsimd_internal", issue = "none")]
28 pub(crate) trait v128Ext
: Sized
{
29 unsafe fn as_v128(self) -> v128
;
32 #[target_feature(enable = "simd128")]
33 unsafe fn as_u8x16(self) -> u8x16
{
34 transmute(self.as_v128())
38 #[target_feature(enable = "simd128")]
39 unsafe fn as_u16x8(self) -> u16x8
{
40 transmute(self.as_v128())
44 #[target_feature(enable = "simd128")]
45 unsafe fn as_u32x4(self) -> u32x4
{
46 transmute(self.as_v128())
50 #[target_feature(enable = "simd128")]
51 unsafe fn as_u64x2(self) -> u64x2
{
52 transmute(self.as_v128())
56 #[target_feature(enable = "simd128")]
57 unsafe fn as_i8x16(self) -> i8x16
{
58 transmute(self.as_v128())
62 #[target_feature(enable = "simd128")]
63 unsafe fn as_i16x8(self) -> i16x8
{
64 transmute(self.as_v128())
68 #[target_feature(enable = "simd128")]
69 unsafe fn as_i32x4(self) -> i32x4
{
70 transmute(self.as_v128())
74 #[target_feature(enable = "simd128")]
75 unsafe fn as_i64x2(self) -> i64x2
{
76 transmute(self.as_v128())
80 #[target_feature(enable = "simd128")]
81 unsafe fn as_f32x4(self) -> f32x4
{
82 transmute(self.as_v128())
86 #[target_feature(enable = "simd128")]
87 unsafe fn as_f64x2(self) -> f64x2
{
88 transmute(self.as_v128())
92 impl v128Ext
for v128
{
94 #[target_feature(enable = "simd128")]
95 unsafe fn as_v128(self) -> Self {
100 #[allow(improper_ctypes)]
102 #[link_name = "llvm.wasm.anytrue.v16i8"]
103 fn llvm_i8x16_any_true(x
: i8x16
) -> i32;
104 #[link_name = "llvm.wasm.alltrue.v16i8"]
105 fn llvm_i8x16_all_true(x
: i8x16
) -> i32;
106 #[link_name = "llvm.sadd.sat.v16i8"]
107 fn llvm_i8x16_add_saturate_s(a
: i8x16
, b
: i8x16
) -> i8x16
;
108 #[link_name = "llvm.uadd.sat.v16i8"]
109 fn llvm_i8x16_add_saturate_u(a
: i8x16
, b
: i8x16
) -> i8x16
;
110 #[link_name = "llvm.wasm.sub.saturate.signed.v16i8"]
111 fn llvm_i8x16_sub_saturate_s(a
: i8x16
, b
: i8x16
) -> i8x16
;
112 #[link_name = "llvm.wasm.sub.saturate.unsigned.v16i8"]
113 fn llvm_i8x16_sub_saturate_u(a
: i8x16
, b
: i8x16
) -> i8x16
;
115 #[link_name = "llvm.wasm.anytrue.v8i16"]
116 fn llvm_i16x8_any_true(x
: i16x8
) -> i32;
117 #[link_name = "llvm.wasm.alltrue.v8i16"]
118 fn llvm_i16x8_all_true(x
: i16x8
) -> i32;
119 #[link_name = "llvm.sadd.sat.v8i16"]
120 fn llvm_i16x8_add_saturate_s(a
: i16x8
, b
: i16x8
) -> i16x8
;
121 #[link_name = "llvm.uadd.sat.v8i16"]
122 fn llvm_i16x8_add_saturate_u(a
: i16x8
, b
: i16x8
) -> i16x8
;
123 #[link_name = "llvm.wasm.sub.saturate.signed.v8i16"]
124 fn llvm_i16x8_sub_saturate_s(a
: i16x8
, b
: i16x8
) -> i16x8
;
125 #[link_name = "llvm.wasm.sub.saturate.unsigned.v8i16"]
126 fn llvm_i16x8_sub_saturate_u(a
: i16x8
, b
: i16x8
) -> i16x8
;
128 #[link_name = "llvm.wasm.anytrue.v4i32"]
129 fn llvm_i32x4_any_true(x
: i32x4
) -> i32;
130 #[link_name = "llvm.wasm.alltrue.v4i32"]
131 fn llvm_i32x4_all_true(x
: i32x4
) -> i32;
133 #[link_name = "llvm.fabs.v4f32"]
134 fn llvm_f32x4_abs(x
: f32x4
) -> f32x4
;
135 #[link_name = "llvm.sqrt.v4f32"]
136 fn llvm_f32x4_sqrt(x
: f32x4
) -> f32x4
;
137 #[link_name = "llvm.minimum.v4f32"]
138 fn llvm_f32x4_min(x
: f32x4
, y
: f32x4
) -> f32x4
;
139 #[link_name = "llvm.maximum.v4f32"]
140 fn llvm_f32x4_max(x
: f32x4
, y
: f32x4
) -> f32x4
;
141 #[link_name = "llvm.fabs.v2f64"]
142 fn llvm_f64x2_abs(x
: f64x2
) -> f64x2
;
143 #[link_name = "llvm.sqrt.v2f64"]
144 fn llvm_f64x2_sqrt(x
: f64x2
) -> f64x2
;
145 #[link_name = "llvm.minimum.v2f64"]
146 fn llvm_f64x2_min(x
: f64x2
, y
: f64x2
) -> f64x2
;
147 #[link_name = "llvm.maximum.v2f64"]
148 fn llvm_f64x2_max(x
: f64x2
, y
: f64x2
) -> f64x2
;
150 #[link_name = "llvm.wasm.bitselect.v16i8"]
151 fn llvm_bitselect(a
: i8x16
, b
: i8x16
, c
: i8x16
) -> i8x16
;
152 #[link_name = "llvm.wasm.swizzle"]
153 fn llvm_swizzle(a
: i8x16
, b
: i8x16
) -> i8x16
;
155 #[link_name = "llvm.wasm.bitmask.v16i8"]
156 fn llvm_bitmask_i8x16(a
: i8x16
) -> i32;
157 #[link_name = "llvm.wasm.narrow.signed.v16i8.v8i16"]
158 fn llvm_narrow_i8x16_s(a
: i16x8
, b
: i16x8
) -> i8x16
;
159 #[link_name = "llvm.wasm.narrow.unsigned.v16i8.v8i16"]
160 fn llvm_narrow_i8x16_u(a
: i16x8
, b
: i16x8
) -> i8x16
;
161 #[link_name = "llvm.wasm.avgr.unsigned.v16i8"]
162 fn llvm_avgr_u_i8x16(a
: i8x16
, b
: i8x16
) -> i8x16
;
164 #[link_name = "llvm.wasm.bitmask.v8i16"]
165 fn llvm_bitmask_i16x8(a
: i16x8
) -> i32;
166 #[link_name = "llvm.wasm.narrow.signed.v8i16.v8i16"]
167 fn llvm_narrow_i16x8_s(a
: i32x4
, b
: i32x4
) -> i16x8
;
168 #[link_name = "llvm.wasm.narrow.unsigned.v8i16.v8i16"]
169 fn llvm_narrow_i16x8_u(a
: i32x4
, b
: i32x4
) -> i16x8
;
170 #[link_name = "llvm.wasm.avgr.unsigned.v8i16"]
171 fn llvm_avgr_u_i16x8(a
: i16x8
, b
: i16x8
) -> i16x8
;
172 #[link_name = "llvm.wasm.widen.low.signed.v8i16.v16i8"]
173 fn llvm_widen_low_i16x8_s(a
: i8x16
) -> i16x8
;
174 #[link_name = "llvm.wasm.widen.high.signed.v8i16.v16i8"]
175 fn llvm_widen_high_i16x8_s(a
: i8x16
) -> i16x8
;
176 #[link_name = "llvm.wasm.widen.low.unsigned.v8i16.v16i8"]
177 fn llvm_widen_low_i16x8_u(a
: i8x16
) -> i16x8
;
178 #[link_name = "llvm.wasm.widen.high.unsigned.v8i16.v16i8"]
179 fn llvm_widen_high_i16x8_u(a
: i8x16
) -> i16x8
;
181 #[link_name = "llvm.wasm.bitmask.v4i32"]
182 fn llvm_bitmask_i32x4(a
: i32x4
) -> i32;
183 #[link_name = "llvm.wasm.avgr.unsigned.v4i32"]
184 fn llvm_avgr_u_i32x4(a
: i32x4
, b
: i32x4
) -> i32x4
;
185 #[link_name = "llvm.wasm.widen.low.signed.v4i32.v8i16"]
186 fn llvm_widen_low_i32x4_s(a
: i16x8
) -> i32x4
;
187 #[link_name = "llvm.wasm.widen.high.signed.v4i32.v8i16"]
188 fn llvm_widen_high_i32x4_s(a
: i16x8
) -> i32x4
;
189 #[link_name = "llvm.wasm.widen.low.unsigned.v4i32.v8i16"]
190 fn llvm_widen_low_i32x4_u(a
: i16x8
) -> i32x4
;
191 #[link_name = "llvm.wasm.widen.high.unsigned.v4i32.v8i16"]
192 fn llvm_widen_high_i32x4_u(a
: i16x8
) -> i32x4
;
195 /// Loads a `v128` vector from the given heap address.
197 #[cfg_attr(test, assert_instr(v128.load))]
198 #[target_feature(enable = "simd128")]
199 pub unsafe fn v128_load(m
: *const v128
) -> v128
{
203 /// Load eight 8-bit integers and sign extend each one to a 16-bit lane
205 #[cfg_attr(all(test, all_simd), assert_instr(i16x8.load8x8_s))]
206 #[target_feature(enable = "simd128")]
207 pub unsafe fn i16x8_load8x8_s(m
: *const i8) -> v128
{
208 transmute(simd_cast
::<_
, i16x8
>(*(m
as *const i8x8
)))
211 /// Load eight 8-bit integers and zero extend each one to a 16-bit lane
213 #[cfg_attr(all(test, all_simd), assert_instr(i16x8.load8x8_u))]
214 #[target_feature(enable = "simd128")]
215 pub unsafe fn i16x8_load8x8_u(m
: *const u8) -> v128
{
216 transmute(simd_cast
::<_
, u16x8
>(*(m
as *const u8x8
)))
219 /// Load four 16-bit integers and sign extend each one to a 32-bit lane
221 #[cfg_attr(all(test, all_simd), assert_instr(i32x4.load16x4_s))]
222 #[target_feature(enable = "simd128")]
223 pub unsafe fn i32x4_load16x4_s(m
: *const i16) -> v128
{
224 transmute(simd_cast
::<_
, i32x4
>(*(m
as *const i16x4
)))
227 /// Load four 16-bit integers and zero extend each one to a 32-bit lane
229 #[cfg_attr(all(test, all_simd), assert_instr(i32x4.load16x4_u))]
230 #[target_feature(enable = "simd128")]
231 pub unsafe fn i32x4_load16x4_u(m
: *const u16) -> v128
{
232 transmute(simd_cast
::<_
, u32x4
>(*(m
as *const u16x4
)))
235 /// Load two 32-bit integers and sign extend each one to a 64-bit lane
237 #[cfg_attr(all(test, all_simd), assert_instr(i64x2.load32x2_s))]
238 #[target_feature(enable = "simd128")]
239 pub unsafe fn i64x2_load32x2_s(m
: *const i32) -> v128
{
240 transmute(simd_cast
::<_
, i64x2
>(*(m
as *const i32x2
)))
243 /// Load two 32-bit integers and zero extend each one to a 64-bit lane
245 #[cfg_attr(all(test, all_simd), assert_instr(i64x2.load32x2_u))]
246 #[target_feature(enable = "simd128")]
247 pub unsafe fn i64x2_load32x2_u(m
: *const u32) -> v128
{
248 transmute(simd_cast
::<_
, u64x2
>(*(m
as *const u32x2
)))
251 /// Load a single element and splat to all lanes of a v128 vector.
253 #[cfg_attr(all(test, all_simd), assert_instr(v8x16.load_splat))]
254 #[target_feature(enable = "simd128")]
255 pub unsafe fn v8x16_load_splat(m
: *const u8) -> v128
{
257 transmute(u8x16(v
, v
, v
, v
, v
, v
, v
, v
, v
, v
, v
, v
, v
, v
, v
, v
))
260 /// Load a single element and splat to all lanes of a v128 vector.
262 #[cfg_attr(all(test, all_simd), assert_instr(v16x8.load_splat))]
263 #[target_feature(enable = "simd128")]
264 pub unsafe fn v16x8_load_splat(m
: *const u16) -> v128
{
266 transmute(u16x8(v
, v
, v
, v
, v
, v
, v
, v
))
269 /// Load a single element and splat to all lanes of a v128 vector.
271 #[cfg_attr(all(test, all_simd), assert_instr(v32x4.load_splat))]
272 #[target_feature(enable = "simd128")]
273 pub unsafe fn v32x4_load_splat(m
: *const u32) -> v128
{
275 transmute(u32x4(v
, v
, v
, v
))
278 /// Load a single element and splat to all lanes of a v128 vector.
280 #[cfg_attr(all(test, all_simd), assert_instr(v64x2.load_splat))]
281 #[target_feature(enable = "simd128")]
282 pub unsafe fn v64x2_load_splat(m
: *const u64) -> v128
{
284 transmute(u64x2(v
, v
))
287 /// Stores a `v128` vector to the given heap address.
289 #[cfg_attr(test, assert_instr(v128.store))]
290 #[target_feature(enable = "simd128")]
291 pub unsafe fn v128_store(m
: *mut v128
, a
: v128
) {
295 /// Materializes a constant SIMD value from the immediate operands.
297 /// This function generates a `v128.const` instruction as if the generated
298 /// vector was interpreted as sixteen 8-bit integers.
300 #[target_feature(enable = "simd128")]
323 pub const unsafe fn i8x16_const(
342 a0
, a1
, a2
, a3
, a4
, a5
, a6
, a7
, a8
, a9
, a10
, a11
, a12
, a13
, a14
, a15
,
346 /// Materializes a constant SIMD value from the immediate operands.
348 /// This function generates a `v128.const` instruction as if the generated
349 /// vector was interpreted as eight 16-bit integers.
351 #[target_feature(enable = "simd128")]
366 pub const unsafe fn i16x8_const(
376 transmute(i16x8(a0
, a1
, a2
, a3
, a4
, a5
, a6
, a7
))
379 /// Materializes a constant SIMD value from the immediate operands.
381 /// This function generates a `v128.const` instruction as if the generated
382 /// vector was interpreted as four 32-bit integers.
384 #[target_feature(enable = "simd128")]
385 #[cfg_attr(all(test, all_simd), assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
386 pub const unsafe fn i32x4_const(a0
: i32, a1
: i32, a2
: i32, a3
: i32) -> v128
{
387 transmute(i32x4(a0
, a1
, a2
, a3
))
390 /// Materializes a constant SIMD value from the immediate operands.
392 /// This function generates a `v128.const` instruction as if the generated
393 /// vector was interpreted as two 64-bit integers.
395 #[target_feature(enable = "simd128")]
396 #[cfg_attr(all(test, all_simd), assert_instr(v128.const, a0 = 0, a1 = 1))]
397 pub const unsafe fn i64x2_const(a0
: i64, a1
: i64) -> v128
{
398 transmute(i64x2(a0
, a1
))
401 /// Materializes a constant SIMD value from the immediate operands.
403 /// This function generates a `v128.const` instruction as if the generated
404 /// vector was interpreted as four 32-bit floats.
406 #[target_feature(enable = "simd128")]
407 #[cfg_attr(all(test, all_simd), assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
408 pub const unsafe fn f32x4_const(a0
: f32, a1
: f32, a2
: f32, a3
: f32) -> v128
{
409 transmute(f32x4(a0
, a1
, a2
, a3
))
412 /// Materializes a constant SIMD value from the immediate operands.
414 /// This function generates a `v128.const` instruction as if the generated
415 /// vector was interpreted as two 64-bit floats.
417 #[target_feature(enable = "simd128")]
418 #[cfg_attr(all(test, all_simd), assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
419 pub const unsafe fn f64x2_const(a0
: f64, a1
: f64) -> v128
{
420 transmute(f64x2(a0
, a1
))
423 /// Returns a new vector with lanes selected from the lanes of the two input
424 /// vectors `$a` and `$b` specified in the 16 immediate operands.
426 /// The `$a` and `$b` expressions must have type `v128`, and this macro
427 /// generates a wasm instruction that is encoded with 16 bytes providing the
428 /// indices of the elements to return. The indices `i` in range [0, 15] select
429 /// the `i`-th element of `a`. The indices in range [16, 31] select the `i -
430 /// 16`-th element of `b`.
432 /// Note that this is a macro due to the codegen requirements of all of the
433 /// index expressions `$i*` must be constant. A compiler error will be
434 /// generated if any of the expressions are not constant.
436 /// All indexes `$i*` must have the type `u32`.
438 #[target_feature(enable = "simd128")]
439 pub unsafe fn v8x16_shuffle
<
460 let shuf
= simd_shuffle16
::<u8x16
, u8x16
>(
464 I0
as u32, I1
as u32, I2
as u32, I3
as u32, I4
as u32, I5
as u32, I6
as u32, I7
as u32,
465 I8
as u32, I9
as u32, I10
as u32, I11
as u32, I12
as u32, I13
as u32, I14
as u32,
473 #[assert_instr(v8x16.shuffle)]
474 #[target_feature(enable = "simd128")]
475 unsafe fn v8x16_shuffle_test(a
: v128
, b
: v128
) -> v128
{
476 v8x16_shuffle
::<0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30>(a
, b
)
479 /// Same as [`v8x16_shuffle`], except operates as if the inputs were eight
480 /// 16-bit integers, only taking 8 indices to shuffle.
482 /// Indices in the range [0, 7] select from `a` while [8, 15] select from `b`.
483 /// Note that this will generate the `v8x16.shuffle` instruction, since there
484 /// is no native `v16x8.shuffle` instruction (there is no need for one since
485 /// `v8x16.shuffle` suffices).
487 #[target_feature(enable = "simd128")]
488 pub unsafe fn v16x8_shuffle
<
501 let shuf
= simd_shuffle8
::<u16x8
, u16x8
>(
505 I0
as u32, I1
as u32, I2
as u32, I3
as u32, I4
as u32, I5
as u32, I6
as u32, I7
as u32,
512 #[assert_instr(v8x16.shuffle)]
513 #[target_feature(enable = "simd128")]
514 unsafe fn v16x8_shuffle_test(a
: v128
, b
: v128
) -> v128
{
515 v16x8_shuffle
::<0, 2, 4, 6, 8, 10, 12, 14>(a
, b
)
518 /// Same as [`v8x16_shuffle`], except operates as if the inputs were four
519 /// 32-bit integers, only taking 4 indices to shuffle.
521 /// Indices in the range [0, 3] select from `a` while [4, 7] select from `b`.
522 /// Note that this will generate the `v8x16.shuffle` instruction, since there
523 /// is no native `v32x4.shuffle` instruction (there is no need for one since
524 /// `v8x16.shuffle` suffices).
526 #[target_feature(enable = "simd128")]
527 pub unsafe fn v32x4_shuffle
<const I0
: usize, const I1
: usize, const I2
: usize, const I3
: usize>(
531 let shuf
= simd_shuffle4
::<u32x4
, u32x4
>(
534 [I0
as u32, I1
as u32, I2
as u32, I3
as u32],
540 #[assert_instr(v8x16.shuffle)]
541 #[target_feature(enable = "simd128")]
542 unsafe fn v32x4_shuffle_test(a
: v128
, b
: v128
) -> v128
{
543 v32x4_shuffle
::<0, 2, 4, 6>(a
, b
)
546 /// Same as [`v8x16_shuffle`], except operates as if the inputs were two
547 /// 64-bit integers, only taking 2 indices to shuffle.
549 /// Indices in the range [0, 1] select from `a` while [2, 3] select from `b`.
550 /// Note that this will generate the `v8x16.shuffle` instruction, since there
551 /// is no native `v64x2.shuffle` instruction (there is no need for one since
552 /// `v8x16.shuffle` suffices).
554 #[target_feature(enable = "simd128")]
555 pub unsafe fn v64x2_shuffle
<const I0
: usize, const I1
: usize>(a
: v128
, b
: v128
) -> v128
{
556 let shuf
= simd_shuffle2
::<u64x2
, u64x2
>(a
.as_u64x2(), b
.as_u64x2(), [I0
as u32, I1
as u32]);
561 #[assert_instr(v8x16.shuffle)]
562 #[target_feature(enable = "simd128")]
563 unsafe fn v64x2_shuffle_test(a
: v128
, b
: v128
) -> v128
{
564 v64x2_shuffle
::<0, 2>(a
, b
)
567 /// Returns a new vector with lanes selected from the lanes of the first input
568 /// vector `a` specified in the second input vector `s`.
570 /// The indices `i` in range [0, 15] select the `i`-th element of `a`. For
571 /// indices outside of the range the resulting lane is 0.
573 #[cfg_attr(test, assert_instr(v8x16.swizzle))]
574 #[target_feature(enable = "simd128")]
575 pub unsafe fn v8x16_swizzle(a
: v128
, s
: v128
) -> v128
{
576 transmute(llvm_swizzle(transmute(a
), transmute(s
)))
579 /// Creates a vector with identical lanes.
581 /// Constructs a vector with `x` replicated to all 16 lanes.
583 #[cfg_attr(test, assert_instr(i8x16.splat))]
584 #[target_feature(enable = "simd128")]
585 pub unsafe fn i8x16_splat(a
: i8) -> v128
{
586 transmute(i8x16
::splat(a
))
589 /// Creates a vector with identical lanes.
591 /// Construct a vector with `x` replicated to all 8 lanes.
593 #[cfg_attr(test, assert_instr(i16x8.splat))]
594 #[target_feature(enable = "simd128")]
595 pub unsafe fn i16x8_splat(a
: i16) -> v128
{
596 transmute(i16x8
::splat(a
))
599 /// Creates a vector with identical lanes.
601 /// Constructs a vector with `x` replicated to all 4 lanes.
603 #[cfg_attr(test, assert_instr(i32x4.splat))]
604 #[target_feature(enable = "simd128")]
605 pub unsafe fn i32x4_splat(a
: i32) -> v128
{
606 transmute(i32x4
::splat(a
))
609 /// Creates a vector with identical lanes.
611 /// Construct a vector with `x` replicated to all 2 lanes.
613 #[cfg_attr(test, assert_instr(i64x2.splat))]
614 #[target_feature(enable = "simd128")]
615 pub unsafe fn i64x2_splat(a
: i64) -> v128
{
616 transmute(i64x2
::splat(a
))
619 /// Creates a vector with identical lanes.
621 /// Constructs a vector with `x` replicated to all 4 lanes.
623 #[cfg_attr(test, assert_instr(f32x4.splat))]
624 #[target_feature(enable = "simd128")]
625 pub unsafe fn f32x4_splat(a
: f32) -> v128
{
626 transmute(f32x4
::splat(a
))
629 /// Creates a vector with identical lanes.
631 /// Constructs a vector with `x` replicated to all 2 lanes.
633 #[cfg_attr(test, assert_instr(f64x2.splat))]
634 #[target_feature(enable = "simd128")]
635 pub unsafe fn f64x2_splat(a
: f64) -> v128
{
636 transmute(f64x2
::splat(a
))
639 /// Extracts a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
641 /// Extracts the scalar value of lane specified in the immediate mode operand
642 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
644 #[target_feature(enable = "simd128")]
645 pub unsafe fn i8x16_extract_lane
<const N
: usize>(a
: v128
) -> i8 {
646 simd_extract(a
.as_i8x16(), N
as u32)
650 #[assert_instr(i8x16.extract_lane_s)]
651 #[target_feature(enable = "simd128")]
652 unsafe fn i8x16_extract_lane_s(a
: v128
) -> i32 {
653 i8x16_extract_lane
::<0>(a
) as i32
657 #[assert_instr(i8x16.extract_lane_u)]
658 #[target_feature(enable = "simd128")]
659 unsafe fn i8x16_extract_lane_u(a
: v128
) -> u32 {
660 i8x16_extract_lane
::<0>(a
) as u8 as u32
663 /// Replaces a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
665 /// Replaces the scalar value of lane specified in the immediate mode operand
666 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
668 #[target_feature(enable = "simd128")]
669 pub unsafe fn i8x16_replace_lane
<const N
: usize>(a
: v128
, val
: i8) -> v128
{
670 transmute(simd_insert(a
.as_i8x16(), N
as u32, val
))
674 #[assert_instr(i8x16.replace_lane)]
675 #[target_feature(enable = "simd128")]
676 unsafe fn i8x16_replace_lane_test(a
: v128
, val
: i8) -> v128
{
677 i8x16_replace_lane
::<0>(a
, val
)
680 /// Extracts a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
682 /// Extracts a the scalar value of lane specified in the immediate mode operand
683 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
685 #[target_feature(enable = "simd128")]
686 pub unsafe fn i16x8_extract_lane
<const N
: usize>(a
: v128
) -> i16 {
687 simd_extract(a
.as_i16x8(), N
as u32)
691 #[assert_instr(i16x8.extract_lane_s)]
692 #[target_feature(enable = "simd128")]
693 unsafe fn i16x8_extract_lane_s(a
: v128
) -> i32 {
694 i16x8_extract_lane
::<0>(a
) as i32
698 #[assert_instr(i16x8.extract_lane_u)]
699 #[target_feature(enable = "simd128")]
700 unsafe fn i16x8_extract_lane_u(a
: v128
) -> u32 {
701 i16x8_extract_lane
::<0>(a
) as u16 as u32
704 /// Replaces a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
706 /// Replaces the scalar value of lane specified in the immediate mode operand
707 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
709 #[target_feature(enable = "simd128")]
710 pub unsafe fn i16x8_replace_lane
<const N
: usize>(a
: v128
, val
: i16) -> v128
{
711 transmute(simd_insert(a
.as_i16x8(), N
as u32, val
))
715 #[assert_instr(i16x8.replace_lane)]
716 #[target_feature(enable = "simd128")]
717 unsafe fn i16x8_replace_lane_test(a
: v128
, val
: i16) -> v128
{
718 i16x8_replace_lane
::<0>(a
, val
)
721 /// Extracts a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
723 /// Extracts the scalar value of lane specified in the immediate mode operand
724 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
726 #[target_feature(enable = "simd128")]
727 pub unsafe fn i32x4_extract_lane
<const N
: usize>(a
: v128
) -> i32 {
728 simd_extract(a
.as_i32x4(), N
as u32)
732 #[assert_instr(i32x4.extract_lane)]
733 #[target_feature(enable = "simd128")]
734 unsafe fn i32x4_extract_lane_test(a
: v128
) -> i32 {
735 i32x4_extract_lane
::<0>(a
)
738 /// Replaces a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
740 /// Replaces the scalar value of lane specified in the immediate mode operand
741 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
743 #[target_feature(enable = "simd128")]
744 pub unsafe fn i32x4_replace_lane
<const N
: usize>(a
: v128
, val
: i32) -> v128
{
745 transmute(simd_insert(a
.as_i32x4(), N
as u32, val
))
749 #[assert_instr(i32x4.replace_lane)]
750 #[target_feature(enable = "simd128")]
751 unsafe fn i32x4_replace_lane_test(a
: v128
, val
: i32) -> v128
{
752 i32x4_replace_lane
::<0>(a
, val
)
755 /// Extracts a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
757 /// Extracts the scalar value of lane specified in the immediate mode operand
758 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
760 #[target_feature(enable = "simd128")]
761 pub unsafe fn i64x2_extract_lane
<const N
: usize>(a
: v128
) -> i64 {
762 simd_extract(a
.as_i64x2(), N
as u32)
766 #[assert_instr(i64x2.extract_lane)]
767 #[target_feature(enable = "simd128")]
768 unsafe fn i64x2_extract_lane_test(a
: v128
) -> i64 {
769 i64x2_extract_lane
::<0>(a
)
772 /// Replaces a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
774 /// Replaces the scalar value of lane specified in the immediate mode operand
775 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
777 #[target_feature(enable = "simd128")]
778 pub unsafe fn i64x2_replace_lane
<const N
: usize>(a
: v128
, val
: i64) -> v128
{
779 transmute(simd_insert(a
.as_i64x2(), N
as u32, val
))
783 #[assert_instr(i64x2.replace_lane)]
784 #[target_feature(enable = "simd128")]
785 unsafe fn i64x2_replace_lane_test(a
: v128
, val
: i64) -> v128
{
786 i64x2_replace_lane
::<0>(a
, val
)
789 /// Extracts a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
791 /// Extracts the scalar value of lane specified fn the immediate mode operand
792 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
794 #[target_feature(enable = "simd128")]
795 pub unsafe fn f32x4_extract_lane
<const N
: usize>(a
: v128
) -> f32 {
796 simd_extract(a
.as_f32x4(), N
as u32)
800 #[assert_instr(f32x4.extract_lane)]
801 #[target_feature(enable = "simd128")]
802 unsafe fn f32x4_extract_lane_test(a
: v128
) -> f32 {
803 f32x4_extract_lane
::<0>(a
)
806 /// Replaces a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
808 /// Replaces the scalar value of lane specified fn the immediate mode operand
809 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
811 #[target_feature(enable = "simd128")]
812 pub unsafe fn f32x4_replace_lane
<const N
: usize>(a
: v128
, val
: f32) -> v128
{
813 transmute(simd_insert(a
.as_f32x4(), N
as u32, val
))
817 #[assert_instr(f32x4.replace_lane)]
818 #[target_feature(enable = "simd128")]
819 unsafe fn f32x4_replace_lane_test(a
: v128
, val
: f32) -> v128
{
820 f32x4_replace_lane
::<0>(a
, val
)
823 /// Extracts a lane from a 128-bit vector interpreted as 2 packed f64 numbers.
825 /// Extracts the scalar value of lane specified fn the immediate mode operand
826 /// `N` from `a`. If `N` fs out of bounds then it is a compile time error.
828 #[target_feature(enable = "simd128")]
829 pub unsafe fn f64x2_extract_lane
<const N
: usize>(a
: v128
) -> f64 {
830 simd_extract(a
.as_f64x2(), N
as u32)
834 #[assert_instr(f64x2.extract_lane)]
835 #[target_feature(enable = "simd128")]
836 unsafe fn f64x2_extract_lane_test(a
: v128
) -> f64 {
837 f64x2_extract_lane
::<0>(a
)
840 /// Replaces a lane from a 128-bit vector interpreted as 2 packed f64 numbers.
842 /// Replaces the scalar value of lane specified in the immediate mode operand
843 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
845 #[target_feature(enable = "simd128")]
846 pub unsafe fn f64x2_replace_lane
<const N
: usize>(a
: v128
, val
: f64) -> v128
{
847 transmute(simd_insert(a
.as_f64x2(), N
as u32, val
))
851 #[assert_instr(f64x2.replace_lane)]
852 #[target_feature(enable = "simd128")]
853 unsafe fn f64x2_replace_lane_test(a
: v128
, val
: f64) -> v128
{
854 f64x2_replace_lane
::<0>(a
, val
)
857 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
860 /// Returns a new vector where each lane is all ones if the pairwise elements
861 /// were equal, or all zeros if the elements were not equal.
863 #[cfg_attr(test, assert_instr(i8x16.eq))]
864 #[target_feature(enable = "simd128")]
865 pub unsafe fn i8x16_eq(a
: v128
, b
: v128
) -> v128
{
866 transmute(simd_eq
::<_
, i8x16
>(a
.as_i8x16(), b
.as_i8x16()))
869 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
872 /// Returns a new vector where each lane is all ones if the pairwise elements
873 /// were not equal, or all zeros if the elements were equal.
875 #[cfg_attr(test, assert_instr(i8x16.ne))]
876 #[target_feature(enable = "simd128")]
877 pub unsafe fn i8x16_ne(a
: v128
, b
: v128
) -> v128
{
878 transmute(simd_ne
::<_
, i8x16
>(a
.as_i8x16(), b
.as_i8x16()))
881 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
884 /// Returns a new vector where each lane is all ones if the pairwise left
885 /// element is less than the pairwise right element, or all zeros otherwise.
887 #[cfg_attr(test, assert_instr(i8x16.lt_s))]
888 #[target_feature(enable = "simd128")]
889 pub unsafe fn i8x16_lt_s(a
: v128
, b
: v128
) -> v128
{
890 transmute(simd_lt
::<_
, i8x16
>(a
.as_i8x16(), b
.as_i8x16()))
893 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
894 /// unsigned integers.
896 /// Returns a new vector where each lane is all ones if the pairwise left
897 /// element is less than the pairwise right element, or all zeros otherwise.
899 #[cfg_attr(test, assert_instr(i8x16.lt_u))]
900 #[target_feature(enable = "simd128")]
901 pub unsafe fn i8x16_lt_u(a
: v128
, b
: v128
) -> v128
{
902 transmute(simd_lt
::<_
, i8x16
>(a
.as_u8x16(), b
.as_u8x16()))
905 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
908 /// Returns a new vector where each lane is all ones if the pairwise left
909 /// element is greater than the pairwise right element, or all zeros otherwise.
911 #[cfg_attr(test, assert_instr(i8x16.gt_s))]
912 #[target_feature(enable = "simd128")]
913 pub unsafe fn i8x16_gt_s(a
: v128
, b
: v128
) -> v128
{
914 transmute(simd_gt
::<_
, i8x16
>(a
.as_i8x16(), b
.as_i8x16()))
917 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
918 /// unsigned integers.
920 /// Returns a new vector where each lane is all ones if the pairwise left
921 /// element is greater than the pairwise right element, or all zeros otherwise.
923 #[cfg_attr(test, assert_instr(i8x16.gt_u))]
924 #[target_feature(enable = "simd128")]
925 pub unsafe fn i8x16_gt_u(a
: v128
, b
: v128
) -> v128
{
926 transmute(simd_gt
::<_
, i8x16
>(a
.as_u8x16(), b
.as_u8x16()))
929 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
932 /// Returns a new vector where each lane is all ones if the pairwise left
933 /// element is less than the pairwise right element, or all zeros otherwise.
935 #[cfg_attr(test, assert_instr(i8x16.le_s))]
936 #[target_feature(enable = "simd128")]
937 pub unsafe fn i8x16_le_s(a
: v128
, b
: v128
) -> v128
{
938 transmute(simd_le
::<_
, i8x16
>(a
.as_i8x16(), b
.as_i8x16()))
941 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
942 /// unsigned integers.
944 /// Returns a new vector where each lane is all ones if the pairwise left
945 /// element is less than the pairwise right element, or all zeros otherwise.
947 #[cfg_attr(test, assert_instr(i8x16.le_u))]
948 #[target_feature(enable = "simd128")]
949 pub unsafe fn i8x16_le_u(a
: v128
, b
: v128
) -> v128
{
950 transmute(simd_le
::<_
, i8x16
>(a
.as_u8x16(), b
.as_u8x16()))
953 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
956 /// Returns a new vector where each lane is all ones if the pairwise left
957 /// element is greater than the pairwise right element, or all zeros otherwise.
959 #[cfg_attr(test, assert_instr(i8x16.ge_s))]
960 #[target_feature(enable = "simd128")]
961 pub unsafe fn i8x16_ge_s(a
: v128
, b
: v128
) -> v128
{
962 transmute(simd_ge
::<_
, i8x16
>(a
.as_i8x16(), b
.as_i8x16()))
965 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
966 /// unsigned integers.
968 /// Returns a new vector where each lane is all ones if the pairwise left
969 /// element is greater than the pairwise right element, or all zeros otherwise.
971 #[cfg_attr(test, assert_instr(i8x16.ge_u))]
972 #[target_feature(enable = "simd128")]
973 pub unsafe fn i8x16_ge_u(a
: v128
, b
: v128
) -> v128
{
974 transmute(simd_ge
::<_
, i8x16
>(a
.as_u8x16(), b
.as_u8x16()))
977 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
980 /// Returns a new vector where each lane is all ones if the pairwise elements
981 /// were equal, or all zeros if the elements were not equal.
983 #[cfg_attr(test, assert_instr(i16x8.eq))]
984 #[target_feature(enable = "simd128")]
985 pub unsafe fn i16x8_eq(a
: v128
, b
: v128
) -> v128
{
986 transmute(simd_eq
::<_
, i16x8
>(a
.as_i16x8(), b
.as_i16x8()))
989 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
992 /// Returns a new vector where each lane is all ones if the pairwise elements
993 /// were not equal, or all zeros if the elements were equal.
995 #[cfg_attr(test, assert_instr(i16x8.ne))]
996 #[target_feature(enable = "simd128")]
997 pub unsafe fn i16x8_ne(a
: v128
, b
: v128
) -> v128
{
998 transmute(simd_ne
::<_
, i16x8
>(a
.as_i16x8(), b
.as_i16x8()))
1001 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1002 /// signed integers.
1004 /// Returns a new vector where each lane is all ones if the pairwise left
1005 /// element is less than the pairwise right element, or all zeros otherwise.
1007 #[cfg_attr(test, assert_instr(i16x8.lt_s))]
1008 #[target_feature(enable = "simd128")]
1009 pub unsafe fn i16x8_lt_s(a
: v128
, b
: v128
) -> v128
{
1010 transmute(simd_lt
::<_
, i16x8
>(a
.as_i16x8(), b
.as_i16x8()))
1013 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1014 /// unsigned integers.
1016 /// Returns a new vector where each lane is all ones if the pairwise left
1017 /// element is less than the pairwise right element, or all zeros otherwise.
1019 #[cfg_attr(test, assert_instr(i16x8.lt_u))]
1020 #[target_feature(enable = "simd128")]
1021 pub unsafe fn i16x8_lt_u(a
: v128
, b
: v128
) -> v128
{
1022 transmute(simd_lt
::<_
, i16x8
>(a
.as_u16x8(), b
.as_u16x8()))
1025 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1026 /// signed integers.
1028 /// Returns a new vector where each lane is all ones if the pairwise left
1029 /// element is greater than the pairwise right element, or all zeros otherwise.
1031 #[cfg_attr(test, assert_instr(i16x8.gt_s))]
1032 #[target_feature(enable = "simd128")]
1033 pub unsafe fn i16x8_gt_s(a
: v128
, b
: v128
) -> v128
{
1034 transmute(simd_gt
::<_
, i16x8
>(a
.as_i16x8(), b
.as_i16x8()))
1037 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1038 /// unsigned integers.
1040 /// Returns a new vector where each lane is all ones if the pairwise left
1041 /// element is greater than the pairwise right element, or all zeros otherwise.
1043 #[cfg_attr(test, assert_instr(i16x8.gt_u))]
1044 #[target_feature(enable = "simd128")]
1045 pub unsafe fn i16x8_gt_u(a
: v128
, b
: v128
) -> v128
{
1046 transmute(simd_gt
::<_
, i16x8
>(a
.as_u16x8(), b
.as_u16x8()))
1049 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1050 /// signed integers.
1052 /// Returns a new vector where each lane is all ones if the pairwise left
1053 /// element is less than the pairwise right element, or all zeros otherwise.
1055 #[cfg_attr(test, assert_instr(i16x8.le_s))]
1056 #[target_feature(enable = "simd128")]
1057 pub unsafe fn i16x8_le_s(a
: v128
, b
: v128
) -> v128
{
1058 transmute(simd_le
::<_
, i16x8
>(a
.as_i16x8(), b
.as_i16x8()))
1061 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1062 /// unsigned integers.
1064 /// Returns a new vector where each lane is all ones if the pairwise left
1065 /// element is less than the pairwise right element, or all zeros otherwise.
1067 #[cfg_attr(test, assert_instr(i16x8.le_u))]
1068 #[target_feature(enable = "simd128")]
1069 pub unsafe fn i16x8_le_u(a
: v128
, b
: v128
) -> v128
{
1070 transmute(simd_le
::<_
, i16x8
>(a
.as_u16x8(), b
.as_u16x8()))
1073 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1074 /// signed integers.
1076 /// Returns a new vector where each lane is all ones if the pairwise left
1077 /// element is greater than the pairwise right element, or all zeros otherwise.
1079 #[cfg_attr(test, assert_instr(i16x8.ge_s))]
1080 #[target_feature(enable = "simd128")]
1081 pub unsafe fn i16x8_ge_s(a
: v128
, b
: v128
) -> v128
{
1082 transmute(simd_ge
::<_
, i16x8
>(a
.as_i16x8(), b
.as_i16x8()))
1085 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1086 /// unsigned integers.
1088 /// Returns a new vector where each lane is all ones if the pairwise left
1089 /// element is greater than the pairwise right element, or all zeros otherwise.
1091 #[cfg_attr(test, assert_instr(i16x8.ge_u))]
1092 #[target_feature(enable = "simd128")]
1093 pub unsafe fn i16x8_ge_u(a
: v128
, b
: v128
) -> v128
{
1094 transmute(simd_ge
::<_
, i16x8
>(a
.as_u16x8(), b
.as_u16x8()))
1097 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1100 /// Returns a new vector where each lane is all ones if the pairwise elements
1101 /// were equal, or all zeros if the elements were not equal.
1103 #[cfg_attr(test, assert_instr(i32x4.eq))]
1104 #[target_feature(enable = "simd128")]
1105 pub unsafe fn i32x4_eq(a
: v128
, b
: v128
) -> v128
{
1106 transmute(simd_eq
::<_
, i32x4
>(a
.as_i32x4(), b
.as_i32x4()))
1109 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1112 /// Returns a new vector where each lane is all ones if the pairwise elements
1113 /// were not equal, or all zeros if the elements were equal.
1115 #[cfg_attr(test, assert_instr(i32x4.ne))]
1116 #[target_feature(enable = "simd128")]
1117 pub unsafe fn i32x4_ne(a
: v128
, b
: v128
) -> v128
{
1118 transmute(simd_ne
::<_
, i32x4
>(a
.as_i32x4(), b
.as_i32x4()))
1121 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1122 /// signed integers.
1124 /// Returns a new vector where each lane is all ones if the pairwise left
1125 /// element is less than the pairwise right element, or all zeros otherwise.
1127 #[cfg_attr(test, assert_instr(i32x4.lt_s))]
1128 #[target_feature(enable = "simd128")]
1129 pub unsafe fn i32x4_lt_s(a
: v128
, b
: v128
) -> v128
{
1130 transmute(simd_lt
::<_
, i32x4
>(a
.as_i32x4(), b
.as_i32x4()))
1133 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1134 /// unsigned integers.
1136 /// Returns a new vector where each lane is all ones if the pairwise left
1137 /// element is less than the pairwise right element, or all zeros otherwise.
1139 #[cfg_attr(test, assert_instr(i32x4.lt_u))]
1140 #[target_feature(enable = "simd128")]
1141 pub unsafe fn i32x4_lt_u(a
: v128
, b
: v128
) -> v128
{
1142 transmute(simd_lt
::<_
, i32x4
>(a
.as_u32x4(), b
.as_u32x4()))
1145 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1146 /// signed integers.
1148 /// Returns a new vector where each lane is all ones if the pairwise left
1149 /// element is greater than the pairwise right element, or all zeros otherwise.
1151 #[cfg_attr(test, assert_instr(i32x4.gt_s))]
1152 #[target_feature(enable = "simd128")]
1153 pub unsafe fn i32x4_gt_s(a
: v128
, b
: v128
) -> v128
{
1154 transmute(simd_gt
::<_
, i32x4
>(a
.as_i32x4(), b
.as_i32x4()))
1157 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1158 /// unsigned integers.
1160 /// Returns a new vector where each lane is all ones if the pairwise left
1161 /// element is greater than the pairwise right element, or all zeros otherwise.
1163 #[cfg_attr(test, assert_instr(i32x4.gt_u))]
1164 #[target_feature(enable = "simd128")]
1165 pub unsafe fn i32x4_gt_u(a
: v128
, b
: v128
) -> v128
{
1166 transmute(simd_gt
::<_
, i32x4
>(a
.as_u32x4(), b
.as_u32x4()))
1169 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1170 /// signed integers.
1172 /// Returns a new vector where each lane is all ones if the pairwise left
1173 /// element is less than the pairwise right element, or all zeros otherwise.
1175 #[cfg_attr(test, assert_instr(i32x4.le_s))]
1176 #[target_feature(enable = "simd128")]
1177 pub unsafe fn i32x4_le_s(a
: v128
, b
: v128
) -> v128
{
1178 transmute(simd_le
::<_
, i32x4
>(a
.as_i32x4(), b
.as_i32x4()))
1181 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1182 /// unsigned integers.
1184 /// Returns a new vector where each lane is all ones if the pairwise left
1185 /// element is less than the pairwise right element, or all zeros otherwise.
1187 #[cfg_attr(test, assert_instr(i32x4.le_u))]
1188 #[target_feature(enable = "simd128")]
1189 pub unsafe fn i32x4_le_u(a
: v128
, b
: v128
) -> v128
{
1190 transmute(simd_le
::<_
, i32x4
>(a
.as_u32x4(), b
.as_u32x4()))
1193 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1194 /// signed integers.
1196 /// Returns a new vector where each lane is all ones if the pairwise left
1197 /// element is greater than the pairwise right element, or all zeros otherwise.
1199 #[cfg_attr(test, assert_instr(i32x4.ge_s))]
1200 #[target_feature(enable = "simd128")]
1201 pub unsafe fn i32x4_ge_s(a
: v128
, b
: v128
) -> v128
{
1202 transmute(simd_ge
::<_
, i32x4
>(a
.as_i32x4(), b
.as_i32x4()))
1205 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1206 /// unsigned integers.
1208 /// Returns a new vector where each lane is all ones if the pairwise left
1209 /// element is greater than the pairwise right element, or all zeros otherwise.
1211 #[cfg_attr(test, assert_instr(i32x4.ge_u))]
1212 #[target_feature(enable = "simd128")]
1213 pub unsafe fn i32x4_ge_u(a
: v128
, b
: v128
) -> v128
{
1214 transmute(simd_ge
::<_
, i32x4
>(a
.as_u32x4(), b
.as_u32x4()))
1217 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1218 /// floating point numbers.
1220 /// Returns a new vector where each lane is all ones if the pairwise elements
1221 /// were equal, or all zeros if the elements were not equal.
1223 #[cfg_attr(test, assert_instr(f32x4.eq))]
1224 #[target_feature(enable = "simd128")]
1225 pub unsafe fn f32x4_eq(a
: v128
, b
: v128
) -> v128
{
1226 transmute(simd_eq
::<_
, i32x4
>(a
.as_f32x4(), b
.as_f32x4()))
1229 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1230 /// floating point numbers.
1232 /// Returns a new vector where each lane is all ones if the pairwise elements
1233 /// were not equal, or all zeros if the elements were equal.
1235 #[cfg_attr(test, assert_instr(f32x4.ne))]
1236 #[target_feature(enable = "simd128")]
1237 pub unsafe fn f32x4_ne(a
: v128
, b
: v128
) -> v128
{
1238 transmute(simd_ne
::<_
, i32x4
>(a
.as_f32x4(), b
.as_f32x4()))
1241 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1242 /// floating point numbers.
1244 /// Returns a new vector where each lane is all ones if the pairwise left
1245 /// element is less than the pairwise right element, or all zeros otherwise.
1247 #[cfg_attr(test, assert_instr(f32x4.lt))]
1248 #[target_feature(enable = "simd128")]
1249 pub unsafe fn f32x4_lt(a
: v128
, b
: v128
) -> v128
{
1250 transmute(simd_lt
::<_
, i32x4
>(a
.as_f32x4(), b
.as_f32x4()))
1253 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1254 /// floating point numbers.
1256 /// Returns a new vector where each lane is all ones if the pairwise left
1257 /// element is greater than the pairwise right element, or all zeros otherwise.
1259 #[cfg_attr(test, assert_instr(f32x4.gt))]
1260 #[target_feature(enable = "simd128")]
1261 pub unsafe fn f32x4_gt(a
: v128
, b
: v128
) -> v128
{
1262 transmute(simd_gt
::<_
, i32x4
>(a
.as_f32x4(), b
.as_f32x4()))
1265 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1266 /// floating point numbers.
1268 /// Returns a new vector where each lane is all ones if the pairwise left
1269 /// element is less than the pairwise right element, or all zeros otherwise.
1271 #[cfg_attr(test, assert_instr(f32x4.le))]
1272 #[target_feature(enable = "simd128")]
1273 pub unsafe fn f32x4_le(a
: v128
, b
: v128
) -> v128
{
1274 transmute(simd_le
::<_
, i32x4
>(a
.as_f32x4(), b
.as_f32x4()))
1277 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1278 /// floating point numbers.
1280 /// Returns a new vector where each lane is all ones if the pairwise left
1281 /// element is greater than the pairwise right element, or all zeros otherwise.
1283 #[cfg_attr(test, assert_instr(f32x4.ge))]
1284 #[target_feature(enable = "simd128")]
1285 pub unsafe fn f32x4_ge(a
: v128
, b
: v128
) -> v128
{
1286 transmute(simd_ge
::<_
, i32x4
>(a
.as_f32x4(), b
.as_f32x4()))
1289 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1290 /// floating point numbers.
1292 /// Returns a new vector where each lane is all ones if the pairwise elements
1293 /// were equal, or all zeros if the elements were not equal.
1295 #[cfg_attr(test, assert_instr(f64x2.eq))]
1296 #[target_feature(enable = "simd128")]
1297 pub unsafe fn f64x2_eq(a
: v128
, b
: v128
) -> v128
{
1298 transmute(simd_eq
::<_
, i64x2
>(a
.as_f64x2(), b
.as_f64x2()))
1301 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1302 /// floating point numbers.
1304 /// Returns a new vector where each lane is all ones if the pairwise elements
1305 /// were not equal, or all zeros if the elements were equal.
1307 #[cfg_attr(test, assert_instr(f64x2.ne))]
1308 #[target_feature(enable = "simd128")]
1309 pub unsafe fn f64x2_ne(a
: v128
, b
: v128
) -> v128
{
1310 transmute(simd_ne
::<_
, i64x2
>(a
.as_f64x2(), b
.as_f64x2()))
1313 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1314 /// floating point numbers.
1316 /// Returns a new vector where each lane is all ones if the pairwise left
1317 /// element is less than the pairwise right element, or all zeros otherwise.
1319 #[cfg_attr(test, assert_instr(f64x2.lt))]
1320 #[target_feature(enable = "simd128")]
1321 pub unsafe fn f64x2_lt(a
: v128
, b
: v128
) -> v128
{
1322 transmute(simd_lt
::<_
, i64x2
>(a
.as_f64x2(), b
.as_f64x2()))
1325 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1326 /// floating point numbers.
1328 /// Returns a new vector where each lane is all ones if the pairwise left
1329 /// element is greater than the pairwise right element, or all zeros otherwise.
1331 #[cfg_attr(test, assert_instr(f64x2.gt))]
1332 #[target_feature(enable = "simd128")]
1333 pub unsafe fn f64x2_gt(a
: v128
, b
: v128
) -> v128
{
1334 transmute(simd_gt
::<_
, i64x2
>(a
.as_f64x2(), b
.as_f64x2()))
1337 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1338 /// floating point numbers.
1340 /// Returns a new vector where each lane is all ones if the pairwise left
1341 /// element is less than the pairwise right element, or all zeros otherwise.
1343 #[cfg_attr(test, assert_instr(f64x2.le))]
1344 #[target_feature(enable = "simd128")]
1345 pub unsafe fn f64x2_le(a
: v128
, b
: v128
) -> v128
{
1346 transmute(simd_le
::<_
, i64x2
>(a
.as_f64x2(), b
.as_f64x2()))
1349 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1350 /// floating point numbers.
1352 /// Returns a new vector where each lane is all ones if the pairwise left
1353 /// element is greater than the pairwise right element, or all zeros otherwise.
1355 #[cfg_attr(test, assert_instr(f64x2.ge))]
1356 #[target_feature(enable = "simd128")]
1357 pub unsafe fn f64x2_ge(a
: v128
, b
: v128
) -> v128
{
1358 transmute(simd_ge
::<_
, i64x2
>(a
.as_f64x2(), b
.as_f64x2()))
1361 /// Flips each bit of the 128-bit input vector.
1363 #[cfg_attr(test, assert_instr(v128.not))]
1364 #[target_feature(enable = "simd128")]
1365 pub unsafe fn v128_not(a
: v128
) -> v128
{
1366 transmute(simd_xor(a
.as_i64x2(), i64x2(!0, !0)))
1369 /// Performs a bitwise and of the two input 128-bit vectors, returning the
1370 /// resulting vector.
1372 #[cfg_attr(test, assert_instr(v128.and))]
1373 #[target_feature(enable = "simd128")]
1374 pub unsafe fn v128_and(a
: v128
, b
: v128
) -> v128
{
1375 transmute(simd_and(a
.as_i64x2(), b
.as_i64x2()))
1378 /// Bitwise AND of bits of `a` and the logical inverse of bits of `b`.
1380 /// This operation is equivalent to `v128.and(a, v128.not(b))`
1382 #[cfg_attr(all(test, all_simd), assert_instr(v128.andnot))]
1383 #[target_feature(enable = "simd128")]
1384 pub unsafe fn v128_andnot(a
: v128
, b
: v128
) -> v128
{
1387 simd_xor(b
.as_i64x2(), i64x2(-1, -1)),
1391 /// Performs a bitwise or of the two input 128-bit vectors, returning the
1392 /// resulting vector.
1394 #[cfg_attr(test, assert_instr(v128.or))]
1395 #[target_feature(enable = "simd128")]
1396 pub unsafe fn v128_or(a
: v128
, b
: v128
) -> v128
{
1397 transmute(simd_or(a
.as_i64x2(), b
.as_i64x2()))
1400 /// Performs a bitwise xor of the two input 128-bit vectors, returning the
1401 /// resulting vector.
1403 #[cfg_attr(test, assert_instr(v128.xor))]
1404 #[target_feature(enable = "simd128")]
1405 pub unsafe fn v128_xor(a
: v128
, b
: v128
) -> v128
{
1406 transmute(simd_xor(a
.as_i64x2(), b
.as_i64x2()))
1409 /// Use the bitmask in `c` to select bits from `v1` when 1 and `v2` when 0.
1411 #[cfg_attr(test, assert_instr(v128.bitselect))]
1412 #[target_feature(enable = "simd128")]
1413 pub unsafe fn v128_bitselect(v1
: v128
, v2
: v128
, c
: v128
) -> v128
{
1414 transmute(llvm_bitselect(v1
.as_i8x16(), v2
.as_i8x16(), c
.as_i8x16()))
1417 /// Lane-wise wrapping absolute value.
1419 // #[cfg_attr(test, assert_instr(i8x16.abs))] // FIXME support not in our LLVM yet
1420 #[target_feature(enable = "simd128")]
1421 pub unsafe fn i8x16_abs(a
: v128
) -> v128
{
1422 let a
= transmute
::<_
, i8x16
>(a
);
1423 let zero
= i8x16
::splat(0);
1424 transmute(simd_select
::<m8x16
, i8x16
>(
1431 /// Negates a 128-bit vectors intepreted as sixteen 8-bit signed integers
1433 #[cfg_attr(test, assert_instr(i8x16.neg))]
1434 #[target_feature(enable = "simd128")]
1435 pub unsafe fn i8x16_neg(a
: v128
) -> v128
{
1436 transmute(simd_mul(a
.as_i8x16(), i8x16
::splat(-1)))
1439 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1441 #[cfg_attr(test, assert_instr(i8x16.any_true))]
1442 #[target_feature(enable = "simd128")]
1443 pub unsafe fn i8x16_any_true(a
: v128
) -> i32 {
1444 llvm_i8x16_any_true(a
.as_i8x16())
1447 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1449 #[cfg_attr(test, assert_instr(i8x16.all_true))]
1450 #[target_feature(enable = "simd128")]
1451 pub unsafe fn i8x16_all_true(a
: v128
) -> i32 {
1452 llvm_i8x16_all_true(a
.as_i8x16())
1455 // FIXME: not available in our LLVM yet
1456 // /// Extracts the high bit for each lane in `a` and produce a scalar mask with
1457 // /// all bits concatenated.
1459 // #[cfg_attr(test, assert_instr(i8x16.all_true))]
1460 // pub unsafe fn i8x16_bitmask(a: v128) -> i32 {
1461 // llvm_bitmask_i8x16(transmute(a))
1464 /// Converts two input vectors into a smaller lane vector by narrowing each
1467 /// Signed saturation to 0x7f or 0x80 is used and the input lanes are always
1468 /// interpreted as signed integers.
1470 #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))]
1471 #[target_feature(enable = "simd128")]
1472 pub unsafe fn i8x16_narrow_i16x8_s(a
: v128
, b
: v128
) -> v128
{
1473 transmute(llvm_narrow_i8x16_s(transmute(a
), transmute(b
)))
1476 /// Converts two input vectors into a smaller lane vector by narrowing each
1479 /// Signed saturation to 0x00 or 0xff is used and the input lanes are always
1480 /// interpreted as signed integers.
1482 #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))]
1483 #[target_feature(enable = "simd128")]
1484 pub unsafe fn i8x16_narrow_i16x8_u(a
: v128
, b
: v128
) -> v128
{
1485 transmute(llvm_narrow_i8x16_u(transmute(a
), transmute(b
)))
1488 /// Shifts each lane to the left by the specified number of bits.
1490 /// Only the low bits of the shift amount are used if the shift amount is
1491 /// greater than the lane width.
1493 #[cfg_attr(test, assert_instr(i8x16.shl))]
1494 #[target_feature(enable = "simd128")]
1495 pub unsafe fn i8x16_shl(a
: v128
, amt
: u32) -> v128
{
1496 transmute(simd_shl(a
.as_i8x16(), i8x16
::splat(amt
as i8)))
1499 /// Shifts each lane to the right by the specified number of bits, sign
1502 /// Only the low bits of the shift amount are used if the shift amount is
1503 /// greater than the lane width.
1505 #[cfg_attr(test, assert_instr(i8x16.shr_s))]
1506 #[target_feature(enable = "simd128")]
1507 pub unsafe fn i8x16_shr_s(a
: v128
, amt
: u32) -> v128
{
1508 transmute(simd_shr(a
.as_i8x16(), i8x16
::splat(amt
as i8)))
1511 /// Shifts each lane to the right by the specified number of bits, shifting in
1514 /// Only the low bits of the shift amount are used if the shift amount is
1515 /// greater than the lane width.
1517 #[cfg_attr(test, assert_instr(i8x16.shr_u))]
1518 #[target_feature(enable = "simd128")]
1519 pub unsafe fn i8x16_shr_u(a
: v128
, amt
: u32) -> v128
{
1520 transmute(simd_shr(a
.as_u8x16(), u8x16
::splat(amt
as u8)))
1523 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit integers.
1525 #[cfg_attr(test, assert_instr(i8x16.add))]
1526 #[target_feature(enable = "simd128")]
1527 pub unsafe fn i8x16_add(a
: v128
, b
: v128
) -> v128
{
1528 transmute(simd_add(a
.as_i8x16(), b
.as_i8x16()))
1531 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit signed
1532 /// integers, saturating on overflow to `i8::MAX`.
1534 #[cfg_attr(test, assert_instr(i8x16.add_saturate_s))]
1535 #[target_feature(enable = "simd128")]
1536 pub unsafe fn i8x16_add_saturate_s(a
: v128
, b
: v128
) -> v128
{
1537 transmute(llvm_i8x16_add_saturate_s(a
.as_i8x16(), b
.as_i8x16()))
1540 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit unsigned
1541 /// integers, saturating on overflow to `u8::MAX`.
1543 #[cfg_attr(test, assert_instr(i8x16.add_saturate_u))]
1544 #[target_feature(enable = "simd128")]
1545 pub unsafe fn i8x16_add_saturate_u(a
: v128
, b
: v128
) -> v128
{
1546 transmute(llvm_i8x16_add_saturate_u(a
.as_i8x16(), b
.as_i8x16()))
1549 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit integers.
1551 #[cfg_attr(test, assert_instr(i8x16.sub))]
1552 #[target_feature(enable = "simd128")]
1553 pub unsafe fn i8x16_sub(a
: v128
, b
: v128
) -> v128
{
1554 transmute(simd_sub(a
.as_i8x16(), b
.as_i8x16()))
1557 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
1558 /// signed integers, saturating on overflow to `i8::MIN`.
1560 #[cfg_attr(test, assert_instr(i8x16.sub_saturate_s))]
1561 #[target_feature(enable = "simd128")]
1562 pub unsafe fn i8x16_sub_saturate_s(a
: v128
, b
: v128
) -> v128
{
1563 transmute(llvm_i8x16_sub_saturate_s(a
.as_i8x16(), b
.as_i8x16()))
1566 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
1567 /// unsigned integers, saturating on overflow to 0.
1569 #[cfg_attr(test, assert_instr(i8x16.sub_saturate_u))]
1570 #[target_feature(enable = "simd128")]
1571 pub unsafe fn i8x16_sub_saturate_u(a
: v128
, b
: v128
) -> v128
{
1572 transmute(llvm_i8x16_sub_saturate_u(a
.as_i8x16(), b
.as_i8x16()))
1575 /// Compares lane-wise signed integers, and returns the minimum of
1578 #[cfg_attr(test, assert_instr(i8x16.min_s))]
1579 #[target_feature(enable = "simd128")]
1580 pub unsafe fn i8x16_min_s(a
: v128
, b
: v128
) -> v128
{
1581 let a
= a
.as_i8x16();
1582 let b
= b
.as_i8x16();
1583 transmute(simd_select
::<i8x16
, _
>(simd_lt(a
, b
), a
, b
))
1586 /// Compares lane-wise unsigned integers, and returns the minimum of
1589 #[cfg_attr(test, assert_instr(i8x16.min_u))]
1590 #[target_feature(enable = "simd128")]
1591 pub unsafe fn i8x16_min_u(a
: v128
, b
: v128
) -> v128
{
1592 let a
= transmute
::<_
, u8x16
>(a
);
1593 let b
= transmute
::<_
, u8x16
>(b
);
1594 transmute(simd_select
::<i8x16
, _
>(simd_lt(a
, b
), a
, b
))
1597 /// Compares lane-wise signed integers, and returns the maximum of
1600 #[cfg_attr(test, assert_instr(i8x16.max_s))]
1601 #[target_feature(enable = "simd128")]
1602 pub unsafe fn i8x16_max_s(a
: v128
, b
: v128
) -> v128
{
1603 let a
= transmute
::<_
, i8x16
>(a
);
1604 let b
= transmute
::<_
, i8x16
>(b
);
1605 transmute(simd_select
::<i8x16
, _
>(simd_gt(a
, b
), a
, b
))
1608 /// Compares lane-wise unsigned integers, and returns the maximum of
1611 #[cfg_attr(test, assert_instr(i8x16.max_u))]
1612 #[target_feature(enable = "simd128")]
1613 pub unsafe fn i8x16_max_u(a
: v128
, b
: v128
) -> v128
{
1614 let a
= transmute
::<_
, u8x16
>(a
);
1615 let b
= transmute
::<_
, u8x16
>(b
);
1616 transmute(simd_select
::<i8x16
, _
>(simd_gt(a
, b
), a
, b
))
1619 /// Lane-wise rounding average.
1621 #[cfg_attr(test, assert_instr(i8x16.avgr_u))]
1622 #[target_feature(enable = "simd128")]
1623 pub unsafe fn i8x16_avgr_u(a
: v128
, b
: v128
) -> v128
{
1624 transmute(llvm_avgr_u_i8x16(transmute(a
), transmute(b
)))
1627 /// Lane-wise wrapping absolute value.
1629 // #[cfg_attr(test, assert_instr(i16x8.abs))] // FIXME support not in our LLVM yet
1630 #[target_feature(enable = "simd128")]
1631 pub unsafe fn i16x8_abs(a
: v128
) -> v128
{
1632 let a
= transmute
::<_
, i16x8
>(a
);
1633 let zero
= i16x8
::splat(0);
1634 transmute(simd_select
::<m16x8
, i16x8
>(
1641 /// Negates a 128-bit vectors intepreted as eight 16-bit signed integers
1643 #[cfg_attr(test, assert_instr(i16x8.neg))]
1644 #[target_feature(enable = "simd128")]
1645 pub unsafe fn i16x8_neg(a
: v128
) -> v128
{
1646 transmute(simd_mul(a
.as_i16x8(), i16x8
::splat(-1)))
1649 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1651 #[cfg_attr(test, assert_instr(i16x8.any_true))]
1652 #[target_feature(enable = "simd128")]
1653 pub unsafe fn i16x8_any_true(a
: v128
) -> i32 {
1654 llvm_i16x8_any_true(a
.as_i16x8())
1657 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1659 #[cfg_attr(test, assert_instr(i16x8.all_true))]
1660 #[target_feature(enable = "simd128")]
1661 pub unsafe fn i16x8_all_true(a
: v128
) -> i32 {
1662 llvm_i16x8_all_true(a
.as_i16x8())
1665 // FIXME: not available in our LLVM yet
1666 // /// Extracts the high bit for each lane in `a` and produce a scalar mask with
1667 // /// all bits concatenated.
1669 // #[cfg_attr(test, assert_instr(i16x8.all_true))]
1670 // pub unsafe fn i16x8_bitmask(a: v128) -> i32 {
1671 // llvm_bitmask_i16x8(transmute(a))
1674 /// Converts two input vectors into a smaller lane vector by narrowing each
1677 /// Signed saturation to 0x7fff or 0x8000 is used and the input lanes are always
1678 /// interpreted as signed integers.
1680 #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))]
1681 #[target_feature(enable = "simd128")]
1682 pub unsafe fn i16x8_narrow_i32x4_s(a
: v128
, b
: v128
) -> v128
{
1683 transmute(llvm_narrow_i16x8_s(transmute(a
), transmute(b
)))
1686 /// Converts two input vectors into a smaller lane vector by narrowing each
1689 /// Signed saturation to 0x0000 or 0xffff is used and the input lanes are always
1690 /// interpreted as signed integers.
1692 #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))]
1693 #[target_feature(enable = "simd128")]
1694 pub unsafe fn i16x8_narrow_i32x4_u(a
: v128
, b
: v128
) -> v128
{
1695 transmute(llvm_narrow_i16x8_u(transmute(a
), transmute(b
)))
1698 /// Converts low half of the smaller lane vector to a larger lane
1699 /// vector, sign extended.
1701 #[cfg_attr(test, assert_instr(i16x8.widen_low_i8x16_s))]
1702 pub unsafe fn i16x8_widen_low_i8x16_s(a
: v128
) -> v128
{
1703 transmute(llvm_widen_low_i16x8_s(transmute(a
)))
1706 /// Converts high half of the smaller lane vector to a larger lane
1707 /// vector, sign extended.
1709 #[cfg_attr(test, assert_instr(i16x8.widen_high_i8x16_s))]
1710 pub unsafe fn i16x8_widen_high_i8x16_s(a
: v128
) -> v128
{
1711 transmute(llvm_widen_high_i16x8_s(transmute(a
)))
1714 /// Converts low half of the smaller lane vector to a larger lane
1715 /// vector, zero extended.
1717 #[cfg_attr(test, assert_instr(i16x8.widen_low_i8x16_u))]
1718 pub unsafe fn i16x8_widen_low_i8x16_u(a
: v128
) -> v128
{
1719 transmute(llvm_widen_low_i16x8_u(transmute(a
)))
1722 /// Converts high half of the smaller lane vector to a larger lane
1723 /// vector, zero extended.
1725 #[cfg_attr(test, assert_instr(i16x8.widen_high_i8x16_u))]
1726 pub unsafe fn i16x8_widen_high_i8x16_u(a
: v128
) -> v128
{
1727 transmute(llvm_widen_high_i16x8_u(transmute(a
)))
1730 /// Shifts each lane to the left by the specified number of bits.
1732 /// Only the low bits of the shift amount are used if the shift amount is
1733 /// greater than the lane width.
1735 #[cfg_attr(test, assert_instr(i16x8.shl))]
1736 #[target_feature(enable = "simd128")]
1737 pub unsafe fn i16x8_shl(a
: v128
, amt
: u32) -> v128
{
1738 transmute(simd_shl(a
.as_i16x8(), i16x8
::splat(amt
as i16)))
1741 /// Shifts each lane to the right by the specified number of bits, sign
1744 /// Only the low bits of the shift amount are used if the shift amount is
1745 /// greater than the lane width.
1747 #[cfg_attr(test, assert_instr(i16x8.shr_s))]
1748 #[target_feature(enable = "simd128")]
1749 pub unsafe fn i16x8_shr_s(a
: v128
, amt
: u32) -> v128
{
1750 transmute(simd_shr(a
.as_i16x8(), i16x8
::splat(amt
as i16)))
1753 /// Shifts each lane to the right by the specified number of bits, shifting in
1756 /// Only the low bits of the shift amount are used if the shift amount is
1757 /// greater than the lane width.
1759 #[cfg_attr(test, assert_instr(i16x8.shr_u))]
1760 #[target_feature(enable = "simd128")]
1761 pub unsafe fn i16x8_shr_u(a
: v128
, amt
: u32) -> v128
{
1762 transmute(simd_shr(a
.as_u16x8(), u16x8
::splat(amt
as u16)))
1765 /// Adds two 128-bit vectors as if they were two packed eight 16-bit integers.
1767 #[cfg_attr(test, assert_instr(i16x8.add))]
1768 #[target_feature(enable = "simd128")]
1769 pub unsafe fn i16x8_add(a
: v128
, b
: v128
) -> v128
{
1770 transmute(simd_add(a
.as_i16x8(), b
.as_i16x8()))
1773 /// Adds two 128-bit vectors as if they were two packed eight 16-bit signed
1774 /// integers, saturating on overflow to `i16::MAX`.
1776 #[cfg_attr(test, assert_instr(i16x8.add_saturate_s))]
1777 #[target_feature(enable = "simd128")]
1778 pub unsafe fn i16x8_add_saturate_s(a
: v128
, b
: v128
) -> v128
{
1779 transmute(llvm_i16x8_add_saturate_s(a
.as_i16x8(), b
.as_i16x8()))
1782 /// Adds two 128-bit vectors as if they were two packed eight 16-bit unsigned
1783 /// integers, saturating on overflow to `u16::MAX`.
1785 #[cfg_attr(test, assert_instr(i16x8.add_saturate_u))]
1786 #[target_feature(enable = "simd128")]
1787 pub unsafe fn i16x8_add_saturate_u(a
: v128
, b
: v128
) -> v128
{
1788 transmute(llvm_i16x8_add_saturate_u(a
.as_i16x8(), b
.as_i16x8()))
1791 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit integers.
1793 #[cfg_attr(test, assert_instr(i16x8.sub))]
1794 #[target_feature(enable = "simd128")]
1795 pub unsafe fn i16x8_sub(a
: v128
, b
: v128
) -> v128
{
1796 transmute(simd_sub(a
.as_i16x8(), b
.as_i16x8()))
1799 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
1800 /// signed integers, saturating on overflow to `i16::MIN`.
1802 #[cfg_attr(test, assert_instr(i16x8.sub_saturate_s))]
1803 #[target_feature(enable = "simd128")]
1804 pub unsafe fn i16x8_sub_saturate_s(a
: v128
, b
: v128
) -> v128
{
1805 transmute(llvm_i16x8_sub_saturate_s(a
.as_i16x8(), b
.as_i16x8()))
1808 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
1809 /// unsigned integers, saturating on overflow to 0.
1811 #[cfg_attr(test, assert_instr(i16x8.sub_saturate_u))]
1812 #[target_feature(enable = "simd128")]
1813 pub unsafe fn i16x8_sub_saturate_u(a
: v128
, b
: v128
) -> v128
{
1814 transmute(llvm_i16x8_sub_saturate_u(a
.as_i16x8(), b
.as_i16x8()))
1817 /// Multiplies two 128-bit vectors as if they were two packed eight 16-bit
1818 /// signed integers.
1820 #[cfg_attr(test, assert_instr(i16x8.mul))]
1821 #[target_feature(enable = "simd128")]
1822 pub unsafe fn i16x8_mul(a
: v128
, b
: v128
) -> v128
{
1823 transmute(simd_mul(a
.as_i16x8(), b
.as_i16x8()))
1826 /// Compares lane-wise signed integers, and returns the minimum of
1829 #[cfg_attr(test, assert_instr(i16x8.min_s))]
1830 #[target_feature(enable = "simd128")]
1831 pub unsafe fn i16x8_min_s(a
: v128
, b
: v128
) -> v128
{
1832 let a
= transmute
::<_
, i16x8
>(a
);
1833 let b
= transmute
::<_
, i16x8
>(b
);
1834 transmute(simd_select
::<i16x8
, _
>(simd_lt(a
, b
), a
, b
))
1837 /// Compares lane-wise unsigned integers, and returns the minimum of
1840 #[cfg_attr(test, assert_instr(i16x8.min_u))]
1841 #[target_feature(enable = "simd128")]
1842 pub unsafe fn i16x8_min_u(a
: v128
, b
: v128
) -> v128
{
1843 let a
= transmute
::<_
, u16x8
>(a
);
1844 let b
= transmute
::<_
, u16x8
>(b
);
1845 transmute(simd_select
::<i16x8
, _
>(simd_lt(a
, b
), a
, b
))
1848 /// Compares lane-wise signed integers, and returns the maximum of
1851 #[cfg_attr(test, assert_instr(i16x8.max_s))]
1852 #[target_feature(enable = "simd128")]
1853 pub unsafe fn i16x8_max_s(a
: v128
, b
: v128
) -> v128
{
1854 let a
= transmute
::<_
, i16x8
>(a
);
1855 let b
= transmute
::<_
, i16x8
>(b
);
1856 transmute(simd_select
::<i16x8
, _
>(simd_gt(a
, b
), a
, b
))
1859 /// Compares lane-wise unsigned integers, and returns the maximum of
1862 #[cfg_attr(test, assert_instr(i16x8.max_u))]
1863 #[target_feature(enable = "simd128")]
1864 pub unsafe fn i16x8_max_u(a
: v128
, b
: v128
) -> v128
{
1865 let a
= transmute
::<_
, u16x8
>(a
);
1866 let b
= transmute
::<_
, u16x8
>(b
);
1867 transmute(simd_select
::<i16x8
, _
>(simd_gt(a
, b
), a
, b
))
1870 /// Lane-wise rounding average.
1872 #[cfg_attr(test, assert_instr(i16x8.avgr_u))]
1873 #[target_feature(enable = "simd128")]
1874 pub unsafe fn i16x8_avgr_u(a
: v128
, b
: v128
) -> v128
{
1875 transmute(llvm_avgr_u_i16x8(transmute(a
), transmute(b
)))
1878 /// Lane-wise wrapping absolute value.
1880 // #[cfg_attr(test, assert_instr(i32x4.abs))] // FIXME support not in our LLVM yet
1881 #[target_feature(enable = "simd128")]
1882 pub unsafe fn i32x4_abs(a
: v128
) -> v128
{
1883 let a
= transmute
::<_
, i32x4
>(a
);
1884 let zero
= i32x4
::splat(0);
1885 transmute(simd_select
::<m32x4
, i32x4
>(
1892 /// Negates a 128-bit vectors intepreted as four 32-bit signed integers
1894 #[cfg_attr(test, assert_instr(i32x4.neg))]
1895 #[target_feature(enable = "simd128")]
1896 pub unsafe fn i32x4_neg(a
: v128
) -> v128
{
1897 transmute(simd_mul(a
.as_i32x4(), i32x4
::splat(-1)))
1900 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1902 #[cfg_attr(test, assert_instr(i32x4.any_true))]
1903 #[target_feature(enable = "simd128")]
1904 pub unsafe fn i32x4_any_true(a
: v128
) -> i32 {
1905 llvm_i32x4_any_true(a
.as_i32x4())
1908 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1910 #[cfg_attr(test, assert_instr(i32x4.all_true))]
1911 #[target_feature(enable = "simd128")]
1912 pub unsafe fn i32x4_all_true(a
: v128
) -> i32 {
1913 llvm_i32x4_all_true(a
.as_i32x4())
1916 // FIXME: not available in our LLVM yet
1917 // /// Extracts the high bit for each lane in `a` and produce a scalar mask with
1918 // /// all bits concatenated.
1920 // #[cfg_attr(test, assert_instr(i32x4.all_true))]
1921 // pub unsafe fn i32x4_bitmask(a: v128) -> i32 {
1922 // llvm_bitmask_i32x4(transmute(a))
1925 /// Converts low half of the smaller lane vector to a larger lane
1926 /// vector, sign extended.
1928 #[cfg_attr(test, assert_instr(i32x4.widen_low_i16x8_s))]
1929 pub unsafe fn i32x4_widen_low_i16x8_s(a
: v128
) -> v128
{
1930 transmute(llvm_widen_low_i32x4_s(transmute(a
)))
1933 /// Converts high half of the smaller lane vector to a larger lane
1934 /// vector, sign extended.
1936 #[cfg_attr(test, assert_instr(i32x4.widen_high_i16x8_s))]
1937 pub unsafe fn i32x4_widen_high_i16x8_s(a
: v128
) -> v128
{
1938 transmute(llvm_widen_high_i32x4_s(transmute(a
)))
1941 /// Converts low half of the smaller lane vector to a larger lane
1942 /// vector, zero extended.
1944 #[cfg_attr(test, assert_instr(i32x4.widen_low_i16x8_u))]
1945 pub unsafe fn i32x4_widen_low_i16x8_u(a
: v128
) -> v128
{
1946 transmute(llvm_widen_low_i32x4_u(transmute(a
)))
1949 /// Converts high half of the smaller lane vector to a larger lane
1950 /// vector, zero extended.
1952 #[cfg_attr(test, assert_instr(i32x4.widen_high_i16x8_u))]
1953 pub unsafe fn i32x4_widen_high_i16x8_u(a
: v128
) -> v128
{
1954 transmute(llvm_widen_high_i32x4_u(transmute(a
)))
1957 /// Shifts each lane to the left by the specified number of bits.
1959 /// Only the low bits of the shift amount are used if the shift amount is
1960 /// greater than the lane width.
1962 #[cfg_attr(test, assert_instr(i32x4.shl))]
1963 #[target_feature(enable = "simd128")]
1964 pub unsafe fn i32x4_shl(a
: v128
, amt
: u32) -> v128
{
1965 transmute(simd_shl(a
.as_i32x4(), i32x4
::splat(amt
as i32)))
1968 /// Shifts each lane to the right by the specified number of bits, sign
1971 /// Only the low bits of the shift amount are used if the shift amount is
1972 /// greater than the lane width.
1974 #[cfg_attr(test, assert_instr(i32x4.shr_s))]
1975 #[target_feature(enable = "simd128")]
1976 pub unsafe fn i32x4_shr_s(a
: v128
, amt
: u32) -> v128
{
1977 transmute(simd_shr(a
.as_i32x4(), i32x4
::splat(amt
as i32)))
1980 /// Shifts each lane to the right by the specified number of bits, shifting in
1983 /// Only the low bits of the shift amount are used if the shift amount is
1984 /// greater than the lane width.
1986 #[cfg_attr(test, assert_instr(i32x4.shr_u))]
1987 #[target_feature(enable = "simd128")]
1988 pub unsafe fn i32x4_shr_u(a
: v128
, amt
: u32) -> v128
{
1989 transmute(simd_shr(a
.as_u32x4(), u32x4
::splat(amt
as u32)))
1992 /// Adds two 128-bit vectors as if they were two packed four 32-bit integers.
1994 #[cfg_attr(test, assert_instr(i32x4.add))]
1995 #[target_feature(enable = "simd128")]
1996 pub unsafe fn i32x4_add(a
: v128
, b
: v128
) -> v128
{
1997 transmute(simd_add(a
.as_i32x4(), b
.as_i32x4()))
2000 /// Subtracts two 128-bit vectors as if they were two packed four 32-bit integers.
2002 #[cfg_attr(test, assert_instr(i32x4.sub))]
2003 #[target_feature(enable = "simd128")]
2004 pub unsafe fn i32x4_sub(a
: v128
, b
: v128
) -> v128
{
2005 transmute(simd_sub(a
.as_i32x4(), b
.as_i32x4()))
2008 /// Multiplies two 128-bit vectors as if they were two packed four 32-bit
2009 /// signed integers.
2011 #[cfg_attr(test, assert_instr(i32x4.mul))]
2012 #[target_feature(enable = "simd128")]
2013 pub unsafe fn i32x4_mul(a
: v128
, b
: v128
) -> v128
{
2014 transmute(simd_mul(a
.as_i32x4(), b
.as_i32x4()))
2017 /// Compares lane-wise signed integers, and returns the minimum of
2020 #[cfg_attr(test, assert_instr(i32x4.min_s))]
2021 #[target_feature(enable = "simd128")]
2022 pub unsafe fn i32x4_min_s(a
: v128
, b
: v128
) -> v128
{
2023 let a
= transmute
::<_
, i32x4
>(a
);
2024 let b
= transmute
::<_
, i32x4
>(b
);
2025 transmute(simd_select
::<i32x4
, _
>(simd_lt(a
, b
), a
, b
))
2028 /// Compares lane-wise unsigned integers, and returns the minimum of
2031 #[cfg_attr(test, assert_instr(i32x4.min_u))]
2032 #[target_feature(enable = "simd128")]
2033 pub unsafe fn i32x4_min_u(a
: v128
, b
: v128
) -> v128
{
2034 let a
= transmute
::<_
, u32x4
>(a
);
2035 let b
= transmute
::<_
, u32x4
>(b
);
2036 transmute(simd_select
::<i32x4
, _
>(simd_lt(a
, b
), a
, b
))
2039 /// Compares lane-wise signed integers, and returns the maximum of
2042 #[cfg_attr(test, assert_instr(i32x4.max_s))]
2043 #[target_feature(enable = "simd128")]
2044 pub unsafe fn i32x4_max_s(a
: v128
, b
: v128
) -> v128
{
2045 let a
= transmute
::<_
, i32x4
>(a
);
2046 let b
= transmute
::<_
, i32x4
>(b
);
2047 transmute(simd_select
::<i32x4
, _
>(simd_gt(a
, b
), a
, b
))
2050 /// Compares lane-wise unsigned integers, and returns the maximum of
2053 #[cfg_attr(test, assert_instr(i32x4.max_u))]
2054 #[target_feature(enable = "simd128")]
2055 pub unsafe fn i32x4_max_u(a
: v128
, b
: v128
) -> v128
{
2056 let a
= transmute
::<_
, u32x4
>(a
);
2057 let b
= transmute
::<_
, u32x4
>(b
);
2058 transmute(simd_select
::<i32x4
, _
>(simd_gt(a
, b
), a
, b
))
2061 /// Negates a 128-bit vectors intepreted as two 64-bit signed integers
2063 #[cfg_attr(test, assert_instr(i64x2.neg))]
2064 #[target_feature(enable = "simd128")]
2065 pub unsafe fn i64x2_neg(a
: v128
) -> v128
{
2066 transmute(simd_mul(a
.as_i64x2(), i64x2
::splat(-1)))
2069 /// Shifts each lane to the left by the specified number of bits.
2071 /// Only the low bits of the shift amount are used if the shift amount is
2072 /// greater than the lane width.
2074 #[cfg_attr(test, assert_instr(i64x2.shl))]
2075 #[target_feature(enable = "simd128")]
2076 pub unsafe fn i64x2_shl(a
: v128
, amt
: u32) -> v128
{
2077 transmute(simd_shl(a
.as_i64x2(), i64x2
::splat(amt
as i64)))
2080 /// Shifts each lane to the right by the specified number of bits, sign
2083 /// Only the low bits of the shift amount are used if the shift amount is
2084 /// greater than the lane width.
2086 #[cfg_attr(test, assert_instr(i64x2.shr_s))]
2087 #[target_feature(enable = "simd128")]
2088 pub unsafe fn i64x2_shr_s(a
: v128
, amt
: u32) -> v128
{
2089 transmute(simd_shr(a
.as_i64x2(), i64x2
::splat(amt
as i64)))
2092 /// Shifts each lane to the right by the specified number of bits, shifting in
2095 /// Only the low bits of the shift amount are used if the shift amount is
2096 /// greater than the lane width.
2098 #[cfg_attr(test, assert_instr(i64x2.shr_u))]
2099 #[target_feature(enable = "simd128")]
2100 pub unsafe fn i64x2_shr_u(a
: v128
, amt
: u32) -> v128
{
2101 transmute(simd_shr(a
.as_u64x2(), u64x2
::splat(amt
as u64)))
2104 /// Adds two 128-bit vectors as if they were two packed two 64-bit integers.
2106 #[cfg_attr(test, assert_instr(i64x2.add))]
2107 #[target_feature(enable = "simd128")]
2108 pub unsafe fn i64x2_add(a
: v128
, b
: v128
) -> v128
{
2109 transmute(simd_add(a
.as_i64x2(), b
.as_i64x2()))
2112 /// Subtracts two 128-bit vectors as if they were two packed two 64-bit integers.
2114 #[cfg_attr(test, assert_instr(i64x2.sub))]
2115 #[target_feature(enable = "simd128")]
2116 pub unsafe fn i64x2_sub(a
: v128
, b
: v128
) -> v128
{
2117 transmute(simd_sub(a
.as_i64x2(), b
.as_i64x2()))
2120 /// Multiplies two 128-bit vectors as if they were two packed two 64-bit integers.
2122 // #[cfg_attr(test, assert_instr(i64x2.mul))] // FIXME: not present in our LLVM
2123 #[target_feature(enable = "simd128")]
2124 pub unsafe fn i64x2_mul(a
: v128
, b
: v128
) -> v128
{
2125 transmute(simd_mul(a
.as_i64x2(), b
.as_i64x2()))
2128 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
2129 /// as four 32-bit floating point numbers.
2131 #[cfg_attr(test, assert_instr(f32x4.abs))]
2132 #[target_feature(enable = "simd128")]
2133 pub unsafe fn f32x4_abs(a
: v128
) -> v128
{
2134 transmute(llvm_f32x4_abs(a
.as_f32x4()))
2137 /// Negates each lane of a 128-bit vector interpreted as four 32-bit floating
2140 #[cfg_attr(test, assert_instr(f32x4.neg))]
2141 #[target_feature(enable = "simd128")]
2142 pub unsafe fn f32x4_neg(a
: v128
) -> v128
{
2143 f32x4_mul(a
, transmute(f32x4(-1.0, -1.0, -1.0, -1.0)))
2146 /// Calculates the square root of each lane of a 128-bit vector interpreted as
2147 /// four 32-bit floating point numbers.
2149 #[cfg_attr(test, assert_instr(f32x4.sqrt))]
2150 #[target_feature(enable = "simd128")]
2151 pub unsafe fn f32x4_sqrt(a
: v128
) -> v128
{
2152 transmute(llvm_f32x4_sqrt(a
.as_f32x4()))
2155 /// Adds pairwise lanes of two 128-bit vectors interpreted as four 32-bit
2156 /// floating point numbers.
2158 #[cfg_attr(test, assert_instr(f32x4.add))]
2159 #[target_feature(enable = "simd128")]
2160 pub unsafe fn f32x4_add(a
: v128
, b
: v128
) -> v128
{
2161 transmute(simd_add(a
.as_f32x4(), b
.as_f32x4()))
2164 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as four 32-bit
2165 /// floating point numbers.
2167 #[cfg_attr(test, assert_instr(f32x4.sub))]
2168 #[target_feature(enable = "simd128")]
2169 pub unsafe fn f32x4_sub(a
: v128
, b
: v128
) -> v128
{
2170 transmute(simd_sub(a
.as_f32x4(), b
.as_f32x4()))
2173 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as four 32-bit
2174 /// floating point numbers.
2176 #[cfg_attr(test, assert_instr(f32x4.mul))]
2177 #[target_feature(enable = "simd128")]
2178 pub unsafe fn f32x4_mul(a
: v128
, b
: v128
) -> v128
{
2179 transmute(simd_mul(a
.as_f32x4(), b
.as_f32x4()))
2182 /// Divides pairwise lanes of two 128-bit vectors interpreted as four 32-bit
2183 /// floating point numbers.
2185 #[cfg_attr(test, assert_instr(f32x4.div))]
2186 #[target_feature(enable = "simd128")]
2187 pub unsafe fn f32x4_div(a
: v128
, b
: v128
) -> v128
{
2188 transmute(simd_div(a
.as_f32x4(), b
.as_f32x4()))
2191 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
2192 /// as four 32-bit floating point numbers.
2194 #[cfg_attr(test, assert_instr(f32x4.min))]
2195 #[target_feature(enable = "simd128")]
2196 pub unsafe fn f32x4_min(a
: v128
, b
: v128
) -> v128
{
2197 transmute(llvm_f32x4_min(a
.as_f32x4(), b
.as_f32x4()))
2200 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
2201 /// as four 32-bit floating point numbers.
2203 #[cfg_attr(test, assert_instr(f32x4.max))]
2204 #[target_feature(enable = "simd128")]
2205 pub unsafe fn f32x4_max(a
: v128
, b
: v128
) -> v128
{
2206 transmute(llvm_f32x4_max(a
.as_f32x4(), b
.as_f32x4()))
2209 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
2210 /// as two 64-bit floating point numbers.
2212 #[cfg_attr(test, assert_instr(f64x2.abs))]
2213 #[target_feature(enable = "simd128")]
2214 pub unsafe fn f64x2_abs(a
: v128
) -> v128
{
2215 transmute(llvm_f64x2_abs(a
.as_f64x2()))
2218 /// Negates each lane of a 128-bit vector interpreted as two 64-bit floating
2221 #[cfg_attr(test, assert_instr(f64x2.neg))]
2222 #[target_feature(enable = "simd128")]
2223 pub unsafe fn f64x2_neg(a
: v128
) -> v128
{
2224 f64x2_mul(a
, transmute(f64x2(-1.0, -1.0)))
2227 /// Calculates the square root of each lane of a 128-bit vector interpreted as
2228 /// two 64-bit floating point numbers.
2230 #[cfg_attr(test, assert_instr(f64x2.sqrt))]
2231 #[target_feature(enable = "simd128")]
2232 pub unsafe fn f64x2_sqrt(a
: v128
) -> v128
{
2233 transmute(llvm_f64x2_sqrt(a
.as_f64x2()))
2236 /// Adds pairwise lanes of two 128-bit vectors interpreted as two 64-bit
2237 /// floating point numbers.
2239 #[cfg_attr(test, assert_instr(f64x2.add))]
2240 #[target_feature(enable = "simd128")]
2241 pub unsafe fn f64x2_add(a
: v128
, b
: v128
) -> v128
{
2242 transmute(simd_add(a
.as_f64x2(), b
.as_f64x2()))
2245 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as two 64-bit
2246 /// floating point numbers.
2248 #[cfg_attr(test, assert_instr(f64x2.sub))]
2249 #[target_feature(enable = "simd128")]
2250 pub unsafe fn f64x2_sub(a
: v128
, b
: v128
) -> v128
{
2251 transmute(simd_sub(a
.as_f64x2(), b
.as_f64x2()))
2254 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as two 64-bit
2255 /// floating point numbers.
2257 #[cfg_attr(test, assert_instr(f64x2.mul))]
2258 #[target_feature(enable = "simd128")]
2259 pub unsafe fn f64x2_mul(a
: v128
, b
: v128
) -> v128
{
2260 transmute(simd_mul(a
.as_f64x2(), b
.as_f64x2()))
2263 /// Divides pairwise lanes of two 128-bit vectors interpreted as two 64-bit
2264 /// floating point numbers.
2266 #[cfg_attr(test, assert_instr(f64x2.div))]
2267 #[target_feature(enable = "simd128")]
2268 pub unsafe fn f64x2_div(a
: v128
, b
: v128
) -> v128
{
2269 transmute(simd_div(a
.as_f64x2(), b
.as_f64x2()))
2272 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
2273 /// as two 64-bit floating point numbers.
2275 #[cfg_attr(test, assert_instr(f64x2.min))]
2276 #[target_feature(enable = "simd128")]
2277 pub unsafe fn f64x2_min(a
: v128
, b
: v128
) -> v128
{
2278 transmute(llvm_f64x2_min(a
.as_f64x2(), b
.as_f64x2()))
2281 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
2282 /// as two 64-bit floating point numbers.
2284 #[cfg_attr(test, assert_instr(f64x2.max))]
2285 #[target_feature(enable = "simd128")]
2286 pub unsafe fn f64x2_max(a
: v128
, b
: v128
) -> v128
{
2287 transmute(llvm_f64x2_max(a
.as_f64x2(), b
.as_f64x2()))
2290 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
2291 /// into a 128-bit vector of four 32-bit signed integers.
2293 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
2294 /// representable intger.
2296 #[cfg_attr(test, assert_instr("i32x4.trunc_sat_f32x4_s"))]
2297 #[target_feature(enable = "simd128")]
2298 pub unsafe fn i32x4_trunc_sat_f32x4_s(a
: v128
) -> v128
{
2299 transmute(simd_cast
::<_
, i32x4
>(a
.as_f32x4()))
2302 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
2303 /// into a 128-bit vector of four 32-bit unsigned integers.
2305 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
2306 /// representable intger.
2308 #[cfg_attr(test, assert_instr("i32x4.trunc_sat_f32x4_u"))]
2309 #[target_feature(enable = "simd128")]
2310 pub unsafe fn i32x4_trunc_sat_f32x4_u(a
: v128
) -> v128
{
2311 transmute(simd_cast
::<_
, u32x4
>(a
.as_f32x4()))
2314 /// Converts a 128-bit vector interpreted as four 32-bit signed integers into a
2315 /// 128-bit vector of four 32-bit floating point numbers.
2317 #[cfg_attr(test, assert_instr("f32x4.convert_i32x4_s"))]
2318 #[target_feature(enable = "simd128")]
2319 pub unsafe fn f32x4_convert_i32x4_s(a
: v128
) -> v128
{
2320 transmute(simd_cast
::<_
, f32x4
>(a
.as_i32x4()))
2323 /// Converts a 128-bit vector interpreted as four 32-bit unsigned integers into a
2324 /// 128-bit vector of four 32-bit floating point numbers.
2326 #[cfg_attr(test, assert_instr("f32x4.convert_i32x4_u"))]
2327 #[target_feature(enable = "simd128")]
2328 pub unsafe fn f32x4_convert_i32x4_u(a
: v128
) -> v128
{
2329 transmute(simd_cast
::<_
, f32x4
>(a
.as_u32x4()))
2337 use std
::num
::Wrapping
;
2338 use std
::prelude
::v1
::*;
2340 fn compare_bytes(a
: v128
, b
: v128
) {
2341 let a
: [u8; 16] = unsafe { transmute(a) }
;
2342 let b
: [u8; 16] = unsafe { transmute(b) }
;
2347 fn test_v128_const() {
2349 unsafe { super::i8x16_const(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) }
;
2350 compare_bytes(A
, A
);
2353 macro_rules
! test_splat
{
2354 ($test_id
:ident
: $val
:expr
=> $
($vals
:expr
),*) => {
2358 let a
= super::$
test_id($val
);
2359 let b
: v128
= transmute([$
($vals
as u8),*]);
2360 compare_bytes(a
, b
);
2366 test_splat
!(i8x16_splat
: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
2367 test_splat
!(i16x8_splat
: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
2368 test_splat
!(i32x4_splat
: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
2369 test_splat
!(i64x2_splat
: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
2370 test_splat
!(f32x4_splat
: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
2371 test_splat
!(f64x2_splat
: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
2373 // tests extract and replace lanes
2374 macro_rules
! test_extract
{
2376 name
: $test_id
:ident
,
2377 extract
: $extract
:ident
,
2378 replace
: $replace
:ident
,
2381 indices
: [$
($idx
:expr
),*],
2386 let arr
: [$elem
; $count
] = [123 as $elem
; $count
];
2387 let vec
: v128
= transmute(arr
);
2389 assert_eq
!($extract
::<$idx
>(vec
), 123 as $elem
);
2392 // create a vector from array and check that the indices contain
2393 // the same values as in the array:
2394 let arr
: [$elem
; $count
] = [$
($idx
as $elem
),*];
2395 let vec
: v128
= transmute(arr
);
2397 assert_eq
!($extract
::<$idx
>(vec
), $idx
as $elem
);
2399 let tmp
= $replace
::<$idx
>(vec
, 124 as $elem
);
2400 assert_eq
!($extract
::<$idx
>(tmp
), 124 as $elem
);
2408 name
: test_i8x16_extract_replace
,
2409 extract
: i8x16_extract_lane
,
2410 replace
: i8x16_replace_lane
,
2413 indices
: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2416 name
: test_i16x8_extract_replace
,
2417 extract
: i16x8_extract_lane
,
2418 replace
: i16x8_replace_lane
,
2421 indices
: [0, 1, 2, 3, 4, 5, 6, 7],
2424 name
: test_i32x4_extract_replace
,
2425 extract
: i32x4_extract_lane
,
2426 replace
: i32x4_replace_lane
,
2429 indices
: [0, 1, 2, 3],
2432 name
: test_i64x2_extract_replace
,
2433 extract
: i64x2_extract_lane
,
2434 replace
: i64x2_replace_lane
,
2440 name
: test_f32x4_extract_replace
,
2441 extract
: f32x4_extract_lane
,
2442 replace
: f32x4_replace_lane
,
2445 indices
: [0, 1, 2, 3],
2448 name
: test_f64x2_extract_replace
,
2449 extract
: f64x2_extract_lane
,
2450 replace
: f64x2_replace_lane
,
2456 macro_rules
! test_binop
{
2459 $
([$
($vec1
:tt
)*] ($op
:tt
| $f
:ident
) [$
($vec2
:tt
)*],)*
2466 let v1
= [$
($vec1
)*];
2467 let v2
= [$
($vec2
)*];
2468 let v1_v128
: v128
= mem
::transmute(v1
);
2469 let v2_v128
: v128
= mem
::transmute(v2
);
2470 let v3_v128
= super::$
f(v1_v128
, v2_v128
);
2471 let mut v3
= [$
($vec1
)*];
2473 v3
= mem
::transmute(v3_v128
);
2475 for (i
, actual
) in v3
.iter().enumerate() {
2476 let expected
= (Wrapping(v1
[i
]) $op
Wrapping(v2
[i
])).0;
2477 assert_eq
!(*actual
, expected
);
2485 macro_rules
! test_unop
{
2488 $
(($op
:tt
| $f
:ident
) [$
($vec1
:tt
)*],)*
2495 let v1
= [$
($vec1
)*];
2496 let v1_v128
: v128
= mem
::transmute(v1
);
2497 let v2_v128
= super::$
f(v1_v128
);
2498 let mut v2
= [$
($vec1
)*];
2500 v2
= mem
::transmute(v2_v128
);
2502 for (i
, actual
) in v2
.iter().enumerate() {
2503 let expected
= ($op
Wrapping(v1
[i
])).0;
2504 assert_eq
!(*actual
, expected
);
2514 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
2516 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
2518 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
2520 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
2522 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
2524 [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
2527 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
2529 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
2531 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
2533 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
2535 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
2537 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
2541 [0i16, 0, 0, 0, 0, 0, 0, 0]
2543 [1i16, 1, 1, 1, 1, 1, 1, 1],
2545 [1i16, 2, 3, 4, 5, 6, 7, 8]
2547 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
2551 [0i16, 0, 0, 0, 0, 0, 0, 0]
2553 [1i16, 1, 1, 1, 1, 1, 1, 1],
2555 [1i16, 2, 3, 4, 5, 6, 7, 8]
2557 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
2561 [0i16, 0, 0, 0, 0, 0, 0, 0]
2563 [1i16, 1, 1, 1, 1, 1, 1, 1],
2565 [1i16, 2, 3, 4, 5, 6, 7, 8]
2567 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
2571 [0i32, 0, 0, 0] (+ | i32x4_add
) [1, 2, 3, 4],
2572 [1i32, 1283, i32::MAX
, i32::MIN
]
2578 [0i32, 0, 0, 0] (- | i32x4_sub
) [1, 2, 3, 4],
2579 [1i32, 1283, i32::MAX
, i32::MIN
]
2585 [0i32, 0, 0, 0] (* | i32x4_mul
) [1, 2, 3, 4],
2586 [1i32, 1283, i32::MAX
, i32::MIN
]
2591 // TODO: test_i64x2_add
2592 // TODO: test_i64x2_sub
2598 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
2601 [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
2604 [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
2608 (- | i16x8_neg
) [1i16, 1, 1, 1, 1, 1, 1, 1],
2609 (- | i16x8_neg
) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
2613 (- | i32x4_neg
) [1i32, 2, 3, 4],
2614 (- | i32x4_neg
) [i32::MIN
, i32::MAX
, 0, 4],
2617 // TODO: test_i64x2_neg
2621 fn test_v8x16_shuffle() {
2623 let a
= [0_u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
2625 16_u8, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
2628 let vec_a
: v128
= transmute(a
);
2629 let vec_b
: v128
= transmute(b
);
2631 let vec_r
= v8x16_shuffle
::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>(
2635 let e
= [0_u8, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30];
2636 let vec_e
: v128
= transmute(e
);
2637 compare_bytes(vec_r
, vec_e
);
2641 macro_rules
! floating_point
{
2653 trait IsNan
: Sized
{
2654 fn is_nan(self) -> bool
{
2658 impl IsNan
for i8 {}
2659 impl IsNan
for i16 {}
2660 impl IsNan
for i32 {}
2661 impl IsNan
for i64 {}
2663 macro_rules
! test_bop
{
2664 ($id
:ident
[$ety
:ident
; $ecount
:expr
] |
2665 $binary_op
:ident
[$op_test_id
:ident
] :
2666 ([$
($in_a
:expr
),*], [$
($in_b
:expr
),*]) => [$
($out
:expr
),*]) => {
2668 $id
[$ety
; $ecount
] => $ety
| $binary_op
[ $op_test_id
]:
2669 ([$
($in_a
),*], [$
($in_b
),*]) => [$
($out
),*]
2673 ($id
:ident
[$ety
:ident
; $ecount
:expr
] => $oty
:ident
|
2674 $binary_op
:ident
[$op_test_id
:ident
] :
2675 ([$
($in_a
:expr
),*], [$
($in_b
:expr
),*]) => [$
($out
:expr
),*]) => {
2679 let a_input
: [$ety
; $ecount
] = [$
($in_a
),*];
2680 let b_input
: [$ety
; $ecount
] = [$
($in_b
),*];
2681 let output
: [$oty
; $ecount
] = [$
($out
),*];
2683 let a_vec_in
: v128
= transmute(a_input
);
2684 let b_vec_in
: v128
= transmute(b_input
);
2685 let vec_res
: v128
= $
binary_op(a_vec_in
, b_vec_in
);
2687 let res
: [$oty
; $ecount
] = transmute(vec_res
);
2689 if !floating_point
!($ety
) {
2690 assert_eq
!(res
, output
);
2692 for i
in 0..$ecount
{
2695 assert_eq
!(r
.is_nan(), o
.is_nan());
2706 macro_rules
! test_bops
{
2707 ($id
:ident
[$ety
:ident
; $ecount
:expr
] |
2708 $binary_op
:ident
[$op_test_id
:ident
]:
2709 ([$
($in_a
:expr
),*], $in_b
:expr
) => [$
($out
:expr
),*]) => {
2713 let a_input
: [$ety
; $ecount
] = [$
($in_a
),*];
2714 let output
: [$ety
; $ecount
] = [$
($out
),*];
2716 let a_vec_in
: v128
= transmute(a_input
);
2717 let vec_res
: v128
= $
binary_op(a_vec_in
, $in_b
);
2719 let res
: [$ety
; $ecount
] = transmute(vec_res
);
2720 assert_eq
!(res
, output
);
2726 macro_rules
! test_uop
{
2727 ($id
:ident
[$ety
:ident
; $ecount
:expr
] |
2728 $unary_op
:ident
[$op_test_id
:ident
]: [$
($in_a
:expr
),*] => [$
($out
:expr
),*]) => {
2732 let a_input
: [$ety
; $ecount
] = [$
($in_a
),*];
2733 let output
: [$ety
; $ecount
] = [$
($out
),*];
2735 let a_vec_in
: v128
= transmute(a_input
);
2736 let vec_res
: v128
= $
unary_op(a_vec_in
);
2738 let res
: [$ety
; $ecount
] = transmute(vec_res
);
2739 assert_eq
!(res
, output
);
2745 test_bops
!(i8x16
[i8; 16] | i8x16_shl
[i8x16_shl_test
]:
2746 ([0, -1, 2, 3, 4, 5, 6, i8::MAX
, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2747 [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
2748 test_bops
!(i16x8
[i16; 8] | i16x8_shl
[i16x8_shl_test
]:
2749 ([0, -1, 2, 3, 4, 5, 6, i16::MAX
], 1) =>
2750 [0, -2, 4, 6, 8, 10, 12, -2]);
2751 test_bops
!(i32x4
[i32; 4] | i32x4_shl
[i32x4_shl_test
]:
2752 ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
2753 test_bops
!(i64x2
[i64; 2] | i64x2_shl
[i64x2_shl_test
]:
2754 ([0, -1], 1) => [0, -2]);
2756 test_bops
!(i8x16
[i8; 16] | i8x16_shr_s
[i8x16_shr_s_test
]:
2757 ([0, -1, 2, 3, 4, 5, 6, i8::MAX
, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2758 [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
2759 test_bops
!(i16x8
[i16; 8] | i16x8_shr_s
[i16x8_shr_s_test
]:
2760 ([0, -1, 2, 3, 4, 5, 6, i16::MAX
], 1) =>
2761 [0, -1, 1, 1, 2, 2, 3, i16::MAX
/ 2]);
2762 test_bops
!(i32x4
[i32; 4] | i32x4_shr_s
[i32x4_shr_s_test
]:
2763 ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
2764 test_bops
!(i64x2
[i64; 2] | i64x2_shr_s
[i64x2_shr_s_test
]:
2765 ([0, -1], 1) => [0, -1]);
2767 test_bops
!(i8x16
[i8; 16] | i8x16_shr_u
[i8x16_uhr_u_test
]:
2768 ([0, -1, 2, 3, 4, 5, 6, i8::MAX
, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2769 [0, i8::MAX
, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
2770 test_bops
!(i16x8
[i16; 8] | i16x8_shr_u
[i16x8_uhr_u_test
]:
2771 ([0, -1, 2, 3, 4, 5, 6, i16::MAX
], 1) =>
2772 [0, i16::MAX
, 1, 1, 2, 2, 3, i16::MAX
/ 2]);
2773 test_bops
!(i32x4
[i32; 4] | i32x4_shr_u
[i32x4_uhr_u_test
]:
2774 ([0, -1, 2, 3], 1) => [0, i32::MAX
, 1, 1]);
2775 test_bops
!(i64x2
[i64; 2] | i64x2_shr_u
[i64x2_uhr_u_test
]:
2776 ([0, -1], 1) => [0, i64::MAX
]);
2779 fn v128_bitwise_logical_ops() {
2781 let a
: [u32; 4] = [u32::MAX
, 0, u32::MAX
, 0];
2782 let b
: [u32; 4] = [u32::MAX
; 4];
2783 let c
: [u32; 4] = [0; 4];
2785 let vec_a
: v128
= transmute(a
);
2786 let vec_b
: v128
= transmute(b
);
2787 let vec_c
: v128
= transmute(c
);
2789 let r
: v128
= v128_and(vec_a
, vec_a
);
2790 compare_bytes(r
, vec_a
);
2791 let r
: v128
= v128_and(vec_a
, vec_b
);
2792 compare_bytes(r
, vec_a
);
2793 let r
: v128
= v128_or(vec_a
, vec_b
);
2794 compare_bytes(r
, vec_b
);
2795 let r
: v128
= v128_not(vec_b
);
2796 compare_bytes(r
, vec_c
);
2797 let r
: v128
= v128_xor(vec_a
, vec_c
);
2798 compare_bytes(r
, vec_a
);
2800 let r
: v128
= v128_bitselect(vec_b
, vec_c
, vec_b
);
2801 compare_bytes(r
, vec_b
);
2802 let r
: v128
= v128_bitselect(vec_b
, vec_c
, vec_c
);
2803 compare_bytes(r
, vec_c
);
2804 let r
: v128
= v128_bitselect(vec_b
, vec_c
, vec_a
);
2805 compare_bytes(r
, vec_a
);
2809 macro_rules
! test_bool_red
{
2810 ([$test_id
:ident
, $any
:ident
, $all
:ident
] | [$
($
true:expr
),*] | [$
($
false:expr
),*] | [$
($alt
:expr
),*]) => {
2814 let vec_a
: v128
= transmute([$
($
true),*]); // true
2815 let vec_b
: v128
= transmute([$
($
false),*]); // false
2816 let vec_c
: v128
= transmute([$
($alt
),*]); // alternating
2818 assert_eq
!($
any(vec_a
), 1);
2819 assert_eq
!($
any(vec_b
), 0);
2820 assert_eq
!($
any(vec_c
), 1);
2822 assert_eq
!($
all(vec_a
), 1);
2823 assert_eq
!($
all(vec_b
), 0);
2824 assert_eq
!($
all(vec_c
), 0);
2831 [i8x16_boolean_reductions
, i8x16_any_true
, i8x16_all_true
]
2832 | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
2833 | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
2834 | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
2837 [i16x8_boolean_reductions
, i16x8_any_true
, i16x8_all_true
]
2838 | [1_i16, 1, 1, 1, 1, 1, 1, 1]
2839 | [0_i16, 0, 0, 0, 0, 0, 0, 0]
2840 | [1_i16, 0, 1, 0, 1, 0, 1, 0]
2843 [i32x4_boolean_reductions
, i32x4_any_true
, i32x4_all_true
]
2849 test_bop
!(i8x16
[i8; 16] | i8x16_eq
[i8x16_eq_test
]:
2850 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2851 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2852 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2853 test_bop
!(i16x8
[i16; 8] | i16x8_eq
[i16x8_eq_test
]:
2854 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2855 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2856 test_bop
!(i32x4
[i32; 4] | i32x4_eq
[i32x4_eq_test
]:
2857 ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
2858 test_bop
!(f32x4
[f32; 4] => i32 | f32x4_eq
[f32x4_eq_test
]:
2859 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
2860 test_bop
!(f64x2
[f64; 2] => i64 | f64x2_eq
[f64x2_eq_test
]: ([0., 1.], [0., 2.]) => [-1, 0]);
2862 test_bop
!(i8x16
[i8; 16] | i8x16_ne
[i8x16_ne_test
]:
2863 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2864 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2865 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2866 test_bop
!(i16x8
[i16; 8] | i16x8_ne
[i16x8_ne_test
]:
2867 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2868 [0, -1, 0, -1 ,0, -1, 0, 0]);
2869 test_bop
!(i32x4
[i32; 4] | i32x4_ne
[i32x4_ne_test
]:
2870 ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
2871 test_bop
!(f32x4
[f32; 4] => i32 | f32x4_ne
[f32x4_ne_test
]:
2872 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
2873 test_bop
!(f64x2
[f64; 2] => i64 | f64x2_ne
[f64x2_ne_test
]: ([0., 1.], [0., 2.]) => [0, -1]);
2875 test_bop
!(i8x16
[i8; 16] | i8x16_lt_s
[i8x16_lt_test
]:
2876 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2877 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2878 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2879 test_bop
!(i16x8
[i16; 8] | i16x8_lt_s
[i16x8_lt_test
]:
2880 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2881 [0, -1, 0, -1 ,0, -1, 0, 0]);
2882 test_bop
!(i32x4
[i32; 4] | i32x4_lt_s
[i32x4_lt_test
]:
2883 ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
2884 test_bop
!(f32x4
[f32; 4] => i32 | f32x4_lt
[f32x4_lt_test
]:
2885 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
2886 test_bop
!(f64x2
[f64; 2] => i64 | f64x2_lt
[f64x2_lt_test
]: ([0., 1.], [0., 2.]) => [0, -1]);
2888 test_bop
!(i8x16
[i8; 16] | i8x16_gt_s
[i8x16_gt_test
]:
2889 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15],
2890 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
2891 [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2892 test_bop
!(i16x8
[i16; 8] | i16x8_gt_s
[i16x8_gt_test
]:
2893 ([0, 2, 2, 4, 4, 6, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
2894 [0, -1, 0, -1 ,0, -1, 0, 0]);
2895 test_bop
!(i32x4
[i32; 4] | i32x4_gt_s
[i32x4_gt_test
]:
2896 ([0, 2, 2, 4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
2897 test_bop
!(f32x4
[f32; 4] => i32 | f32x4_gt
[f32x4_gt_test
]:
2898 ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
2899 test_bop
!(f64x2
[f64; 2] => i64 | f64x2_gt
[f64x2_gt_test
]: ([0., 2.], [0., 1.]) => [0, -1]);
2901 test_bop
!(i8x16
[i8; 16] | i8x16_ge_s
[i8x16_ge_test
]:
2902 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2903 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2904 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2905 test_bop
!(i16x8
[i16; 8] | i16x8_ge_s
[i16x8_ge_test
]:
2906 ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2907 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2908 test_bop
!(i32x4
[i32; 4] | i32x4_ge_s
[i32x4_ge_test
]:
2909 ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
2910 test_bop
!(f32x4
[f32; 4] => i32 | f32x4_ge
[f32x4_ge_test
]:
2911 ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
2912 test_bop
!(f64x2
[f64; 2] => i64 | f64x2_ge
[f64x2_ge_test
]: ([0., 1.], [0., 2.]) => [-1, 0]);
2914 test_bop
!(i8x16
[i8; 16] | i8x16_le_s
[i8x16_le_test
]:
2915 ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15],
2916 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
2918 [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2919 test_bop
!(i16x8
[i16; 8] | i16x8_le_s
[i16x8_le_test
]:
2920 ([0, 2, 2, 4, 4, 6, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
2921 [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2922 test_bop
!(i32x4
[i32; 4] | i32x4_le_s
[i32x4_le_test
]:
2923 ([0, 2, 2, 4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
2924 test_bop
!(f32x4
[f32; 4] => i32 | f32x4_le
[f32x4_le_test
]:
2925 ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
2926 test_bop
!(f64x2
[f64; 2] => i64 | f64x2_le
[f64x2_le_test
]: ([0., 2.], [0., 1.]) => [-1, 0]);
2929 fn v128_bitwise_load_store() {
2931 let mut arr
: [i32; 4] = [0, 1, 2, 3];
2933 let vec
= v128_load(arr
.as_ptr() as *const v128
);
2934 let vec
= i32x4_add(vec
, vec
);
2935 v128_store(arr
.as_mut_ptr() as *mut v128
, vec
);
2937 assert_eq
!(arr
, [0, 2, 4, 6]);
2941 test_uop
!(f32x4
[f32; 4] | f32x4_neg
[f32x4_neg_test
]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
2942 test_uop
!(f32x4
[f32; 4] | f32x4_abs
[f32x4_abs_test
]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
2943 test_bop
!(f32x4
[f32; 4] | f32x4_min
[f32x4_min_test
]:
2944 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
2945 test_bop
!(f32x4
[f32; 4] | f32x4_min
[f32x4_min_test_nan
]:
2946 ([0., -1., 7., 8.], [1., -3., -4., std
::f32::NAN
])
2947 => [0., -3., -4., std
::f32::NAN
]);
2948 test_bop
!(f32x4
[f32; 4] | f32x4_max
[f32x4_max_test
]:
2949 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
2950 test_bop
!(f32x4
[f32; 4] | f32x4_max
[f32x4_max_test_nan
]:
2951 ([0., -1., 7., 8.], [1., -3., -4., std
::f32::NAN
])
2952 => [1., -1., 7., std
::f32::NAN
]);
2953 test_bop
!(f32x4
[f32; 4] | f32x4_add
[f32x4_add_test
]:
2954 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
2955 test_bop
!(f32x4
[f32; 4] | f32x4_sub
[f32x4_sub_test
]:
2956 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
2957 test_bop
!(f32x4
[f32; 4] | f32x4_mul
[f32x4_mul_test
]:
2958 ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
2959 test_bop
!(f32x4
[f32; 4] | f32x4_div
[f32x4_div_test
]:
2960 ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
2962 test_uop
!(f64x2
[f64; 2] | f64x2_neg
[f64x2_neg_test
]: [0., 1.] => [ 0., -1.]);
2963 test_uop
!(f64x2
[f64; 2] | f64x2_abs
[f64x2_abs_test
]: [0., -1.] => [ 0., 1.]);
2964 test_bop
!(f64x2
[f64; 2] | f64x2_min
[f64x2_min_test
]:
2965 ([0., -1.], [1., -3.]) => [0., -3.]);
2966 test_bop
!(f64x2
[f64; 2] | f64x2_min
[f64x2_min_test_nan
]:
2967 ([7., 8.], [-4., std
::f64::NAN
])
2968 => [ -4., std
::f64::NAN
]);
2969 test_bop
!(f64x2
[f64; 2] | f64x2_max
[f64x2_max_test
]:
2970 ([0., -1.], [1., -3.]) => [1., -1.]);
2971 test_bop
!(f64x2
[f64; 2] | f64x2_max
[f64x2_max_test_nan
]:
2972 ([7., 8.], [ -4., std
::f64::NAN
])
2973 => [7., std
::f64::NAN
]);
2974 test_bop
!(f64x2
[f64; 2] | f64x2_add
[f64x2_add_test
]:
2975 ([0., -1.], [1., -3.]) => [1., -4.]);
2976 test_bop
!(f64x2
[f64; 2] | f64x2_sub
[f64x2_sub_test
]:
2977 ([0., -1.], [1., -3.]) => [-1., 2.]);
2978 test_bop
!(f64x2
[f64; 2] | f64x2_mul
[f64x2_mul_test
]:
2979 ([0., -1.], [1., -3.]) => [0., 3.]);
2980 test_bop
!(f64x2
[f64; 2] | f64x2_div
[f64x2_div_test
]:
2981 ([0., -8.], [1., 4.]) => [0., -2.]);
2983 macro_rules
! test_conv
{
2984 ($test_id
:ident
| $conv_id
:ident
| $to_ty
:ident
| $from
:expr
, $to
:expr
) => {
2988 let from
: v128
= transmute($from
);
2989 let to
: v128
= transmute($to
);
2991 let r
: v128
= $
conv_id(from
);
2993 compare_bytes(r
, to
);
3000 f32x4_convert_s_i32x4
| f32x4_convert_i32x4_s
| f32x4
| [1_i32, 2, 3, 4],
3004 f32x4_convert_u_i32x4
| f32x4_convert_i32x4_u
| f32x4
| [u32::MAX
, 2, 3, 4],
3005 [u32::MAX
as f32, 2., 3., 4.]
3008 // FIXME: this fails, and produces 0 instead of saturating at i32::MAX
3010 // i32x4_trunc_s_f32x4_sat
3011 // | i32x4_trunc_sat_f32x4_s
3013 // | [f32::NAN, 2., (i32::MAX as f32 + 1.), 4.],
3014 // [0, 2, i32::MAX, 4]
3016 // FIXME: add other saturating tests