1 //! This module implements the [WebAssembly `SIMD128` ISA].
3 //! [WebAssembly `SIMD128` ISA]:
4 //! https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md
6 #![allow(non_camel_case_types)]
9 core_arch
::{simd::*, simd_llvm::*}
,
16 use stdarch_test
::assert_instr
;
18 use wasm_bindgen_test
::wasm_bindgen_test
;
21 /// WASM-specific 128-bit wide SIMD vector type.
22 // N.B., internals here are arbitrary.
23 pub struct v128(i32, i32, i32, i32);
26 #[allow(non_camel_case_types)]
27 #[unstable(feature = "stdimd_internal", issue = "none")]
28 pub(crate) trait v128Ext
: Sized
{
29 fn as_v128(self) -> v128
;
32 fn as_u8x16(self) -> u8x16
{
33 unsafe { transmute(self.as_v128()) }
37 fn as_u16x8(self) -> u16x8
{
38 unsafe { transmute(self.as_v128()) }
42 fn as_u32x4(self) -> u32x4
{
43 unsafe { transmute(self.as_v128()) }
47 fn as_u64x2(self) -> u64x2
{
48 unsafe { transmute(self.as_v128()) }
52 fn as_i8x16(self) -> i8x16
{
53 unsafe { transmute(self.as_v128()) }
57 fn as_i16x8(self) -> i16x8
{
58 unsafe { transmute(self.as_v128()) }
62 fn as_i32x4(self) -> i32x4
{
63 unsafe { transmute(self.as_v128()) }
67 fn as_i64x2(self) -> i64x2
{
68 unsafe { transmute(self.as_v128()) }
72 fn as_f32x4(self) -> f32x4
{
73 unsafe { transmute(self.as_v128()) }
77 fn as_f64x2(self) -> f64x2
{
78 unsafe { transmute(self.as_v128()) }
82 impl v128Ext
for v128
{
84 fn as_v128(self) -> Self {
89 #[allow(improper_ctypes)]
91 #[link_name = "llvm.wasm.anytrue.v16i8"]
92 fn llvm_i8x16_any_true(x
: i8x16
) -> i32;
93 #[link_name = "llvm.wasm.alltrue.v16i8"]
94 fn llvm_i8x16_all_true(x
: i8x16
) -> i32;
95 #[link_name = "llvm.sadd.sat.v16i8"]
96 fn llvm_i8x16_add_saturate_s(a
: i8x16
, b
: i8x16
) -> i8x16
;
97 #[link_name = "llvm.uadd.sat.v16i8"]
98 fn llvm_i8x16_add_saturate_u(a
: i8x16
, b
: i8x16
) -> i8x16
;
99 #[link_name = "llvm.wasm.sub.saturate.signed.v16i8"]
100 fn llvm_i8x16_sub_saturate_s(a
: i8x16
, b
: i8x16
) -> i8x16
;
101 #[link_name = "llvm.wasm.sub.saturate.unsigned.v16i8"]
102 fn llvm_i8x16_sub_saturate_u(a
: i8x16
, b
: i8x16
) -> i8x16
;
104 #[link_name = "llvm.wasm.anytrue.v8i16"]
105 fn llvm_i16x8_any_true(x
: i16x8
) -> i32;
106 #[link_name = "llvm.wasm.alltrue.v8i16"]
107 fn llvm_i16x8_all_true(x
: i16x8
) -> i32;
108 #[link_name = "llvm.sadd.sat.v8i16"]
109 fn llvm_i16x8_add_saturate_s(a
: i16x8
, b
: i16x8
) -> i16x8
;
110 #[link_name = "llvm.uadd.sat.v8i16"]
111 fn llvm_i16x8_add_saturate_u(a
: i16x8
, b
: i16x8
) -> i16x8
;
112 #[link_name = "llvm.wasm.sub.saturate.signed.v8i16"]
113 fn llvm_i16x8_sub_saturate_s(a
: i16x8
, b
: i16x8
) -> i16x8
;
114 #[link_name = "llvm.wasm.sub.saturate.unsigned.v8i16"]
115 fn llvm_i16x8_sub_saturate_u(a
: i16x8
, b
: i16x8
) -> i16x8
;
117 #[link_name = "llvm.wasm.anytrue.v4i32"]
118 fn llvm_i32x4_any_true(x
: i32x4
) -> i32;
119 #[link_name = "llvm.wasm.alltrue.v4i32"]
120 fn llvm_i32x4_all_true(x
: i32x4
) -> i32;
122 #[link_name = "llvm.wasm.anytrue.v2i64"]
123 fn llvm_i64x2_any_true(x
: i64x2
) -> i32;
124 #[link_name = "llvm.wasm.alltrue.v2i64"]
125 fn llvm_i64x2_all_true(x
: i64x2
) -> i32;
127 #[link_name = "llvm.fabs.v4f32"]
128 fn llvm_f32x4_abs(x
: f32x4
) -> f32x4
;
129 #[link_name = "llvm.sqrt.v4f32"]
130 fn llvm_f32x4_sqrt(x
: f32x4
) -> f32x4
;
131 #[link_name = "llvm.minimum.v4f32"]
132 fn llvm_f32x4_min(x
: f32x4
, y
: f32x4
) -> f32x4
;
133 #[link_name = "llvm.maximum.v4f32"]
134 fn llvm_f32x4_max(x
: f32x4
, y
: f32x4
) -> f32x4
;
135 #[link_name = "llvm.fabs.v2f64"]
136 fn llvm_f64x2_abs(x
: f64x2
) -> f64x2
;
137 #[link_name = "llvm.sqrt.v2f64"]
138 fn llvm_f64x2_sqrt(x
: f64x2
) -> f64x2
;
139 #[link_name = "llvm.minimum.v2f64"]
140 fn llvm_f64x2_min(x
: f64x2
, y
: f64x2
) -> f64x2
;
141 #[link_name = "llvm.maximum.v2f64"]
142 fn llvm_f64x2_max(x
: f64x2
, y
: f64x2
) -> f64x2
;
144 #[link_name = "llvm.wasm.bitselect.v16i8"]
145 fn llvm_bitselect(a
: i8x16
, b
: i8x16
, c
: i8x16
) -> i8x16
;
148 /// Loads a `v128` vector from the given heap address.
150 #[cfg_attr(test, assert_instr(v128.load))]
151 pub unsafe fn v128_load(m
: *const v128
) -> v128
{
155 /// Stores a `v128` vector to the given heap address.
157 #[cfg_attr(test, assert_instr(v128.store))]
158 pub unsafe fn v128_store(m
: *mut v128
, a
: v128
) {
162 /// Materializes a constant SIMD value from the immediate operands.
164 /// The `v128.const` instruction is encoded with 16 immediate bytes
165 /// `imm` which provide the bits of the vector directly.
167 #[cfg(not(only_node_compatible_functions))]
168 #[rustc_args_required_const(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)]
169 #[cfg_attr(test, assert_instr(
188 pub const fn v128_const(
213 a0
, a1
, a2
, a3
, a4
, a5
, a6
, a7
, a8
, a9
, a10
, a11
, a12
, a13
, a14
, a15
,
220 /// Creates a vector with identical lanes.
222 /// Constructs a vector with `x` replicated to all 16 lanes.
224 #[cfg_attr(test, assert_instr(i8x16.splat))]
225 pub fn i8x16_splat(a
: i8) -> v128
{
226 unsafe { transmute(i8x16::splat(a)) }
229 /// Extracts a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
231 /// Extracts the scalar value of lane specified in the immediate mode operand
236 /// This function has undefined behavior if `imm` is greater than or equal to
239 #[rustc_args_required_const(1)]
240 pub unsafe fn i8x16_extract_lane(a
: v128
, imm
: usize) -> i8 {
242 #[assert_instr(i8x16.extract_lane_s)]
243 fn extract_lane_s(a
: v128
) -> i32 {
244 unsafe { i8x16_extract_lane(a, 0) as i32 }
247 #[cfg(not(only_node_compatible_functions))]
248 #[assert_instr(i8x16.extract_lane_u)]
249 fn extract_lane_u(a
: v128
) -> u32 {
250 unsafe { i8x16_extract_lane(a, 0) as u32 }
252 simd_extract(a
.as_i8x16(), imm
as u32)
255 /// Replaces a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
257 /// Replaces the scalar value of lane specified in the immediate mode operand
262 /// This function has undefined behavior if `imm` is greater than or equal to
265 #[cfg_attr(test, assert_instr(i8x16.replace_lane, imm = 0))]
266 #[rustc_args_required_const(1)]
267 pub unsafe fn i8x16_replace_lane(a
: v128
, imm
: usize, val
: i8) -> v128
{
268 transmute(simd_insert(a
.as_i8x16(), imm
as u32, val
))
271 /// Creates a vector with identical lanes.
273 /// Construct a vector with `x` replicated to all 8 lanes.
275 #[cfg_attr(test, assert_instr(i16x8.splat))]
276 pub fn i16x8_splat(a
: i16) -> v128
{
277 unsafe { transmute(i16x8::splat(a)) }
280 /// Extracts a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
282 /// Extracts a the scalar value of lane specified in the immediate mode operand
287 /// This function has undefined behavior if `imm` is greater than or equal to
290 #[rustc_args_required_const(1)]
291 pub unsafe fn i16x8_extract_lane(a
: v128
, imm
: usize) -> i16 {
293 #[assert_instr(i16x8.extract_lane_s)]
294 fn extract_lane_s(a
: v128
) -> i32 {
295 unsafe { i16x8_extract_lane(a, 0) as i32 }
298 #[cfg(not(only_node_compatible_functions))]
299 #[assert_instr(i16x8.extract_lane_u)]
300 fn extract_lane_u(a
: v128
) -> u32 {
301 unsafe { i16x8_extract_lane(a, 0) as u32 }
303 simd_extract(a
.as_i16x8(), imm
as u32)
306 /// Replaces a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
308 /// Replaces the scalar value of lane specified in the immediate mode operand
313 /// This function has undefined behavior if `imm` is greater than or equal to
316 #[cfg_attr(test, assert_instr(i16x8.replace_lane, imm = 0))]
317 #[rustc_args_required_const(1)]
318 pub unsafe fn i16x8_replace_lane(a
: v128
, imm
: usize, val
: i16) -> v128
{
319 transmute(simd_insert(a
.as_i16x8(), imm
as u32, val
))
322 /// Creates a vector with identical lanes.
324 /// Constructs a vector with `x` replicated to all 4 lanes.
326 #[cfg_attr(test, assert_instr(i32x4.splat))]
327 pub fn i32x4_splat(a
: i32) -> v128
{
328 unsafe { transmute(i32x4::splat(a)) }
331 /// Extracts a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
333 /// Extracts the scalar value of lane specified in the immediate mode operand
338 /// This function has undefined behavior if `imm` is greater than or equal to
341 #[cfg_attr(test, assert_instr(i32x4.extract_lane, imm = 0))]
342 #[rustc_args_required_const(1)]
343 pub unsafe fn i32x4_extract_lane(a
: v128
, imm
: usize) -> i32 {
344 simd_extract(a
.as_i32x4(), imm
as u32)
347 /// Replaces a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
349 /// Replaces the scalar value of lane specified in the immediate mode operand
354 /// This function has undefined behavior if `imm` is greater than or equal to
357 #[cfg_attr(test, assert_instr(i32x4.replace_lane, imm = 0))]
358 #[rustc_args_required_const(1)]
359 pub unsafe fn i32x4_replace_lane(a
: v128
, imm
: usize, val
: i32) -> v128
{
360 transmute(simd_insert(a
.as_i32x4(), imm
as u32, val
))
363 /// Creates a vector with identical lanes.
365 /// Construct a vector with `x` replicated to all 2 lanes.
367 #[cfg(not(only_node_compatible_functions))]
368 #[cfg_attr(test, assert_instr(i8x16.splat))]
369 pub fn i64x2_splat(a
: i64) -> v128
{
370 unsafe { transmute(i64x2::splat(a)) }
373 /// Extracts a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
375 /// Extracts the scalar value of lane specified in the immediate mode operand
380 /// This function has undefined behavior if `imm` is greater than or equal to
383 #[cfg(not(only_node_compatible_functions))]
384 #[cfg_attr(test, assert_instr(i64x2.extract_lane_s, imm = 0))]
385 #[rustc_args_required_const(1)]
386 pub unsafe fn i64x2_extract_lane(a
: v128
, imm
: usize) -> i64 {
387 simd_extract(a
.as_i64x2(), imm
as u32)
390 /// Replaces a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
392 /// Replaces the scalar value of lane specified in the immediate mode operand
397 /// This function has undefined behavior if `imm` is greater than or equal to
400 #[cfg(not(only_node_compatible_functions))]
401 #[cfg_attr(test, assert_instr(i64x2.replace_lane, imm = 0))]
402 #[rustc_args_required_const(1)]
403 pub unsafe fn i64x2_replace_lane(a
: v128
, imm
: usize, val
: i64) -> v128
{
404 transmute(simd_insert(a
.as_i64x2(), imm
as u32, val
))
407 /// Creates a vector with identical lanes.
409 /// Constructs a vector with `x` replicated to all 4 lanes.
411 #[cfg_attr(test, assert_instr(f32x4.splat))]
412 pub fn f32x4_splat(a
: f32) -> v128
{
413 unsafe { transmute(f32x4::splat(a)) }
416 /// Extracts a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
418 /// Extracts the scalar value of lane specified in the immediate mode operand
423 /// This function has undefined behavior if `imm` is greater than or equal to
426 #[cfg_attr(test, assert_instr(f32x4.extract_lane, imm = 0))]
427 #[rustc_args_required_const(1)]
428 pub unsafe fn f32x4_extract_lane(a
: v128
, imm
: usize) -> f32 {
429 simd_extract(a
.as_f32x4(), imm
as u32)
432 /// Replaces a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
434 /// Replaces the scalar value of lane specified in the immediate mode operand
439 /// This function has undefined behavior if `imm` is greater than or equal to
442 #[cfg_attr(test, assert_instr(f32x4.replace_lane, imm = 0))]
443 #[rustc_args_required_const(1)]
444 pub unsafe fn f32x4_replace_lane(a
: v128
, imm
: usize, val
: f32) -> v128
{
445 transmute(simd_insert(a
.as_f32x4(), imm
as u32, val
))
448 /// Creates a vector with identical lanes.
450 /// Constructs a vector with `x` replicated to all 2 lanes.
452 #[cfg(not(only_node_compatible_functions))]
453 #[cfg_attr(test, assert_instr(f64x2.splat))]
454 pub fn f64x2_splat(a
: f64) -> v128
{
455 unsafe { transmute(f64x2::splat(a)) }
458 /// Extracts lane from a 128-bit vector interpreted as 2 packed f64 numbers.
460 /// Extracts the scalar value of lane specified in the immediate mode operand
465 /// This function has undefined behavior if `imm` is greater than or equal to
468 #[cfg(not(only_node_compatible_functions))]
469 #[cfg_attr(test, assert_instr(f64x2.extract_lane_s, imm = 0))]
470 #[rustc_args_required_const(1)]
471 pub unsafe fn f64x2_extract_lane(a
: v128
, imm
: usize) -> f64 {
472 simd_extract(a
.as_f64x2(), imm
as u32)
475 /// Replaces a lane from a 128-bit vector interpreted as 2 packed f64 numbers.
477 /// Replaces the scalar value of lane specified in the immediate mode operand
482 /// This function has undefined behavior if `imm` is greater than or equal to
485 #[cfg(not(only_node_compatible_functions))]
486 #[cfg_attr(test, assert_instr(f64x2.replace_lane, imm = 0))]
487 #[rustc_args_required_const(1)]
488 pub unsafe fn f64x2_replace_lane(a
: v128
, imm
: usize, val
: f64) -> v128
{
489 transmute(simd_insert(a
.as_f64x2(), imm
as u32, val
))
492 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
495 /// Returns a new vector where each lane is all ones if the pairwise elements
496 /// were equal, or all zeros if the elements were not equal.
498 #[cfg_attr(test, assert_instr(i8x16.eq))]
499 pub fn i8x16_eq(a
: v128
, b
: v128
) -> v128
{
500 unsafe { transmute(simd_eq::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
503 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
506 /// Returns a new vector where each lane is all ones if the pairwise elements
507 /// were not equal, or all zeros if the elements were equal.
509 #[cfg_attr(test, assert_instr(i8x16.ne))]
510 pub fn i8x16_ne(a
: v128
, b
: v128
) -> v128
{
511 unsafe { transmute(simd_ne::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
514 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
517 /// Returns a new vector where each lane is all ones if the pairwise left
518 /// element is less than the pairwise right element, or all zeros otherwise.
520 #[cfg_attr(test, assert_instr(i8x16.lt_s))]
521 pub fn i8x16_lt_s(a
: v128
, b
: v128
) -> v128
{
522 unsafe { transmute(simd_lt::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
525 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
526 /// unsigned integers.
528 /// Returns a new vector where each lane is all ones if the pairwise left
529 /// element is less than the pairwise right element, or all zeros otherwise.
531 #[cfg_attr(test, assert_instr(i8x16.lt_u))]
532 pub fn i8x16_lt_u(a
: v128
, b
: v128
) -> v128
{
533 unsafe { transmute(simd_lt::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
536 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
539 /// Returns a new vector where each lane is all ones if the pairwise left
540 /// element is greater than the pairwise right element, or all zeros otherwise.
542 #[cfg_attr(test, assert_instr(i8x16.gt_s))]
543 pub fn i8x16_gt_s(a
: v128
, b
: v128
) -> v128
{
544 unsafe { transmute(simd_gt::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
547 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
548 /// unsigned integers.
550 /// Returns a new vector where each lane is all ones if the pairwise left
551 /// element is greater than the pairwise right element, or all zeros otherwise.
553 #[cfg_attr(test, assert_instr(i8x16.gt_u))]
554 pub fn i8x16_gt_u(a
: v128
, b
: v128
) -> v128
{
555 unsafe { transmute(simd_gt::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
558 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
561 /// Returns a new vector where each lane is all ones if the pairwise left
562 /// element is less than the pairwise right element, or all zeros otherwise.
564 #[cfg_attr(test, assert_instr(i8x16.le_s))]
565 pub fn i8x16_le_s(a
: v128
, b
: v128
) -> v128
{
566 unsafe { transmute(simd_le::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
569 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
570 /// unsigned integers.
572 /// Returns a new vector where each lane is all ones if the pairwise left
573 /// element is less than the pairwise right element, or all zeros otherwise.
575 #[cfg_attr(test, assert_instr(i8x16.le_u))]
576 pub fn i8x16_le_u(a
: v128
, b
: v128
) -> v128
{
577 unsafe { transmute(simd_le::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
580 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
583 /// Returns a new vector where each lane is all ones if the pairwise left
584 /// element is greater than the pairwise right element, or all zeros otherwise.
586 #[cfg_attr(test, assert_instr(i8x16.ge_s))]
587 pub fn i8x16_ge_s(a
: v128
, b
: v128
) -> v128
{
588 unsafe { transmute(simd_ge::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
591 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
592 /// unsigned integers.
594 /// Returns a new vector where each lane is all ones if the pairwise left
595 /// element is greater than the pairwise right element, or all zeros otherwise.
597 #[cfg_attr(test, assert_instr(i8x16.ge_u))]
598 pub fn i8x16_ge_u(a
: v128
, b
: v128
) -> v128
{
599 unsafe { transmute(simd_ge::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
602 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
605 /// Returns a new vector where each lane is all ones if the pairwise elements
606 /// were equal, or all zeros if the elements were not equal.
608 #[cfg_attr(test, assert_instr(i16x8.eq))]
609 pub fn i16x8_eq(a
: v128
, b
: v128
) -> v128
{
610 unsafe { transmute(simd_eq::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
613 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
616 /// Returns a new vector where each lane is all ones if the pairwise elements
617 /// were not equal, or all zeros if the elements were equal.
619 #[cfg_attr(test, assert_instr(i16x8.ne))]
620 pub fn i16x8_ne(a
: v128
, b
: v128
) -> v128
{
621 unsafe { transmute(simd_ne::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
624 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
627 /// Returns a new vector where each lane is all ones if the pairwise left
628 /// element is less than the pairwise right element, or all zeros otherwise.
630 #[cfg_attr(test, assert_instr(i16x8.lt_s))]
631 pub fn i16x8_lt_s(a
: v128
, b
: v128
) -> v128
{
632 unsafe { transmute(simd_lt::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
635 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
636 /// unsigned integers.
638 /// Returns a new vector where each lane is all ones if the pairwise left
639 /// element is less than the pairwise right element, or all zeros otherwise.
641 #[cfg_attr(test, assert_instr(i16x8.lt_u))]
642 pub fn i16x8_lt_u(a
: v128
, b
: v128
) -> v128
{
643 unsafe { transmute(simd_lt::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
646 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
649 /// Returns a new vector where each lane is all ones if the pairwise left
650 /// element is greater than the pairwise right element, or all zeros otherwise.
652 #[cfg_attr(test, assert_instr(i16x8.gt_s))]
653 pub fn i16x8_gt_s(a
: v128
, b
: v128
) -> v128
{
654 unsafe { transmute(simd_gt::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
657 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
658 /// unsigned integers.
660 /// Returns a new vector where each lane is all ones if the pairwise left
661 /// element is greater than the pairwise right element, or all zeros otherwise.
663 #[cfg_attr(test, assert_instr(i16x8.gt_u))]
664 pub fn i16x8_gt_u(a
: v128
, b
: v128
) -> v128
{
665 unsafe { transmute(simd_gt::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
668 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
671 /// Returns a new vector where each lane is all ones if the pairwise left
672 /// element is less than the pairwise right element, or all zeros otherwise.
674 #[cfg_attr(test, assert_instr(i16x8.le_s))]
675 pub fn i16x8_le_s(a
: v128
, b
: v128
) -> v128
{
676 unsafe { transmute(simd_le::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
679 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
680 /// unsigned integers.
682 /// Returns a new vector where each lane is all ones if the pairwise left
683 /// element is less than the pairwise right element, or all zeros otherwise.
685 #[cfg_attr(test, assert_instr(i16x8.le_u))]
686 pub fn i16x8_le_u(a
: v128
, b
: v128
) -> v128
{
687 unsafe { transmute(simd_le::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
690 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
693 /// Returns a new vector where each lane is all ones if the pairwise left
694 /// element is greater than the pairwise right element, or all zeros otherwise.
696 #[cfg_attr(test, assert_instr(i16x8.ge_s))]
697 pub fn i16x8_ge_s(a
: v128
, b
: v128
) -> v128
{
698 unsafe { transmute(simd_ge::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
701 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
702 /// unsigned integers.
704 /// Returns a new vector where each lane is all ones if the pairwise left
705 /// element is greater than the pairwise right element, or all zeros otherwise.
707 #[cfg_attr(test, assert_instr(i16x8.ge_u))]
708 pub fn i16x8_ge_u(a
: v128
, b
: v128
) -> v128
{
709 unsafe { transmute(simd_ge::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
712 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
715 /// Returns a new vector where each lane is all ones if the pairwise elements
716 /// were equal, or all zeros if the elements were not equal.
718 #[cfg_attr(test, assert_instr(i32x4.eq))]
719 pub fn i32x4_eq(a
: v128
, b
: v128
) -> v128
{
720 unsafe { transmute(simd_eq::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
723 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
726 /// Returns a new vector where each lane is all ones if the pairwise elements
727 /// were not equal, or all zeros if the elements were equal.
729 #[cfg_attr(test, assert_instr(i32x4.ne))]
730 pub fn i32x4_ne(a
: v128
, b
: v128
) -> v128
{
731 unsafe { transmute(simd_ne::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
734 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
737 /// Returns a new vector where each lane is all ones if the pairwise left
738 /// element is less than the pairwise right element, or all zeros otherwise.
740 #[cfg_attr(test, assert_instr(i32x4.lt_s))]
741 pub fn i32x4_lt_s(a
: v128
, b
: v128
) -> v128
{
742 unsafe { transmute(simd_lt::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
745 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
746 /// unsigned integers.
748 /// Returns a new vector where each lane is all ones if the pairwise left
749 /// element is less than the pairwise right element, or all zeros otherwise.
751 #[cfg_attr(test, assert_instr(i32x4.lt_u))]
752 pub fn i32x4_lt_u(a
: v128
, b
: v128
) -> v128
{
753 unsafe { transmute(simd_lt::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
756 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
759 /// Returns a new vector where each lane is all ones if the pairwise left
760 /// element is greater than the pairwise right element, or all zeros otherwise.
762 #[cfg_attr(test, assert_instr(i32x4.gt_s))]
763 pub fn i32x4_gt_s(a
: v128
, b
: v128
) -> v128
{
764 unsafe { transmute(simd_gt::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
767 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
768 /// unsigned integers.
770 /// Returns a new vector where each lane is all ones if the pairwise left
771 /// element is greater than the pairwise right element, or all zeros otherwise.
773 #[cfg_attr(test, assert_instr(i32x4.gt_u))]
774 pub fn i32x4_gt_u(a
: v128
, b
: v128
) -> v128
{
775 unsafe { transmute(simd_gt::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
778 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
781 /// Returns a new vector where each lane is all ones if the pairwise left
782 /// element is less than the pairwise right element, or all zeros otherwise.
784 #[cfg_attr(test, assert_instr(i32x4.le_s))]
785 pub fn i32x4_le_s(a
: v128
, b
: v128
) -> v128
{
786 unsafe { transmute(simd_le::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
789 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
790 /// unsigned integers.
792 /// Returns a new vector where each lane is all ones if the pairwise left
793 /// element is less than the pairwise right element, or all zeros otherwise.
795 #[cfg_attr(test, assert_instr(i32x4.le_u))]
796 pub fn i32x4_le_u(a
: v128
, b
: v128
) -> v128
{
797 unsafe { transmute(simd_le::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
800 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
803 /// Returns a new vector where each lane is all ones if the pairwise left
804 /// element is greater than the pairwise right element, or all zeros otherwise.
806 #[cfg_attr(test, assert_instr(i32x4.ge_s))]
807 pub fn i32x4_ge_s(a
: v128
, b
: v128
) -> v128
{
808 unsafe { transmute(simd_ge::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
811 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
812 /// unsigned integers.
814 /// Returns a new vector where each lane is all ones if the pairwise left
815 /// element is greater than the pairwise right element, or all zeros otherwise.
817 #[cfg_attr(test, assert_instr(i32x4.ge_u))]
818 pub fn i32x4_ge_u(a
: v128
, b
: v128
) -> v128
{
819 unsafe { transmute(simd_ge::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
822 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
823 /// floating point numbers.
825 /// Returns a new vector where each lane is all ones if the pairwise elements
826 /// were equal, or all zeros if the elements were not equal.
828 #[cfg_attr(test, assert_instr(f32x4.eq))]
829 pub fn f32x4_eq(a
: v128
, b
: v128
) -> v128
{
830 unsafe { transmute(simd_eq::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
833 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
834 /// floating point numbers.
836 /// Returns a new vector where each lane is all ones if the pairwise elements
837 /// were not equal, or all zeros if the elements were equal.
839 #[cfg_attr(test, assert_instr(f32x4.ne))]
840 pub fn f32x4_ne(a
: v128
, b
: v128
) -> v128
{
841 unsafe { transmute(simd_ne::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
844 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
845 /// floating point numbers.
847 /// Returns a new vector where each lane is all ones if the pairwise left
848 /// element is less than the pairwise right element, or all zeros otherwise.
850 #[cfg_attr(test, assert_instr(f32x4.lt))]
851 pub fn f32x4_lt(a
: v128
, b
: v128
) -> v128
{
852 unsafe { transmute(simd_lt::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
855 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
856 /// floating point numbers.
858 /// Returns a new vector where each lane is all ones if the pairwise left
859 /// element is greater than the pairwise right element, or all zeros otherwise.
861 #[cfg_attr(test, assert_instr(f32x4.gt))]
862 pub fn f32x4_gt(a
: v128
, b
: v128
) -> v128
{
863 unsafe { transmute(simd_gt::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
866 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
867 /// floating point numbers.
869 /// Returns a new vector where each lane is all ones if the pairwise left
870 /// element is less than the pairwise right element, or all zeros otherwise.
872 #[cfg_attr(test, assert_instr(f32x4.le))]
873 pub fn f32x4_le(a
: v128
, b
: v128
) -> v128
{
874 unsafe { transmute(simd_le::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
877 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
878 /// floating point numbers.
880 /// Returns a new vector where each lane is all ones if the pairwise left
881 /// element is greater than the pairwise right element, or all zeros otherwise.
883 #[cfg_attr(test, assert_instr(f32x4.ge))]
884 pub fn f32x4_ge(a
: v128
, b
: v128
) -> v128
{
885 unsafe { transmute(simd_ge::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
888 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
889 /// floating point numbers.
891 /// Returns a new vector where each lane is all ones if the pairwise elements
892 /// were equal, or all zeros if the elements were not equal.
894 #[cfg(not(only_node_compatible_functions))]
895 #[cfg_attr(test, assert_instr(f64x2.eq))]
896 pub fn f64x2_eq(a
: v128
, b
: v128
) -> v128
{
897 unsafe { transmute(simd_eq::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
900 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
901 /// floating point numbers.
903 /// Returns a new vector where each lane is all ones if the pairwise elements
904 /// were not equal, or all zeros if the elements were equal.
906 #[cfg(not(only_node_compatible_functions))]
907 #[cfg_attr(test, assert_instr(f64x2.ne))]
908 pub fn f64x2_ne(a
: v128
, b
: v128
) -> v128
{
909 unsafe { transmute(simd_ne::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
912 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
913 /// floating point numbers.
915 /// Returns a new vector where each lane is all ones if the pairwise left
916 /// element is less than the pairwise right element, or all zeros otherwise.
918 #[cfg(not(only_node_compatible_functions))]
919 #[cfg_attr(test, assert_instr(f64x2.lt))]
920 pub fn f64x2_lt(a
: v128
, b
: v128
) -> v128
{
921 unsafe { transmute(simd_lt::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
924 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
925 /// floating point numbers.
927 /// Returns a new vector where each lane is all ones if the pairwise left
928 /// element is greater than the pairwise right element, or all zeros otherwise.
930 #[cfg(not(only_node_compatible_functions))]
931 #[cfg_attr(test, assert_instr(f64x2.gt))]
932 pub fn f64x2_gt(a
: v128
, b
: v128
) -> v128
{
933 unsafe { transmute(simd_gt::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
936 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
937 /// floating point numbers.
939 /// Returns a new vector where each lane is all ones if the pairwise left
940 /// element is less than the pairwise right element, or all zeros otherwise.
942 #[cfg(not(only_node_compatible_functions))]
943 #[cfg_attr(test, assert_instr(f64x2.le))]
944 pub fn f64x2_le(a
: v128
, b
: v128
) -> v128
{
945 unsafe { transmute(simd_le::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
948 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
949 /// floating point numbers.
951 /// Returns a new vector where each lane is all ones if the pairwise left
952 /// element is greater than the pairwise right element, or all zeros otherwise.
954 #[cfg(not(only_node_compatible_functions))]
955 #[cfg_attr(test, assert_instr(f64x2.ge))]
956 pub fn f64x2_ge(a
: v128
, b
: v128
) -> v128
{
957 unsafe { transmute(simd_ge::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
960 /// Flips each bit of the 128-bit input vector.
962 #[cfg_attr(test, assert_instr(v128.not))]
963 pub fn v128_not(a
: v128
) -> v128
{
964 unsafe { transmute(simd_xor(a.as_i64x2(), i64x2(!0, !0))) }
967 /// Performs a bitwise and of the two input 128-bit vectors, returning the
968 /// resulting vector.
970 #[cfg_attr(test, assert_instr(v128.and))]
971 pub fn v128_and(a
: v128
, b
: v128
) -> v128
{
972 unsafe { transmute(simd_and(a.as_i64x2(), b.as_i64x2())) }
975 /// Performs a bitwise or of the two input 128-bit vectors, returning the
976 /// resulting vector.
978 #[cfg_attr(test, assert_instr(v128.or))]
979 pub fn v128_or(a
: v128
, b
: v128
) -> v128
{
980 unsafe { transmute(simd_or(a.as_i64x2(), b.as_i64x2())) }
983 /// Performs a bitwise xor of the two input 128-bit vectors, returning the
984 /// resulting vector.
986 #[cfg_attr(test, assert_instr(v128.xor))]
987 pub fn v128_xor(a
: v128
, b
: v128
) -> v128
{
988 unsafe { transmute(simd_xor(a.as_i64x2(), b.as_i64x2())) }
991 /// Use the bitmask in `c` to select bits from `v1` when 1 and `v2` when 0.
993 #[cfg_attr(test, assert_instr(v128.bitselect))]
994 pub fn v128_bitselect(v1
: v128
, v2
: v128
, c
: v128
) -> v128
{
995 unsafe { transmute(llvm_bitselect(c.as_i8x16(), v1.as_i8x16(), v2.as_i8x16())) }
998 /// Negates a 128-bit vectors intepreted as sixteen 8-bit signed integers
1000 #[cfg_attr(test, assert_instr(i8x16.neg))]
1001 pub fn i8x16_neg(a
: v128
) -> v128
{
1002 unsafe { transmute(simd_mul(a.as_i8x16(), i8x16::splat(-1))) }
1005 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1007 #[cfg_attr(test, assert_instr(i8x16.any_true))]
1008 pub fn i8x16_any_true(a
: v128
) -> i32 {
1009 unsafe { llvm_i8x16_any_true(a.as_i8x16()) }
1012 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1014 #[cfg_attr(test, assert_instr(i8x16.all_true))]
1015 pub fn i8x16_all_true(a
: v128
) -> i32 {
1016 unsafe { llvm_i8x16_all_true(a.as_i8x16()) }
1019 /// Shifts each lane to the left by the specified number of bits.
1021 /// Only the low bits of the shift amount are used if the shift amount is
1022 /// greater than the lane width.
1024 #[cfg(not(only_node_compatible_functions))]
1025 #[cfg_attr(test, assert_instr(i8x16.shl))]
1026 pub fn i8x16_shl(a
: v128
, amt
: u32) -> v128
{
1027 unsafe { transmute(simd_shl(a.as_i8x16(), i8x16::splat(amt as i8))) }
1030 /// Shifts each lane to the right by the specified number of bits, sign
1033 /// Only the low bits of the shift amount are used if the shift amount is
1034 /// greater than the lane width.
1036 #[cfg(not(only_node_compatible_functions))]
1037 #[cfg_attr(test, assert_instr(i8x16.shl))]
1038 pub fn i8x16_shr_s(a
: v128
, amt
: u32) -> v128
{
1039 unsafe { transmute(simd_shr(a.as_i8x16(), i8x16::splat(amt as i8))) }
1042 /// Shifts each lane to the right by the specified number of bits, shifting in
1045 /// Only the low bits of the shift amount are used if the shift amount is
1046 /// greater than the lane width.
1048 #[cfg(not(only_node_compatible_functions))]
1049 #[cfg_attr(test, assert_instr(i8x16.shl))]
1050 pub fn i8x16_shr_u(a
: v128
, amt
: u32) -> v128
{
1051 unsafe { transmute(simd_shr(a.as_u8x16(), u8x16::splat(amt as u8))) }
1054 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit integers.
1056 #[cfg_attr(test, assert_instr(i8x16.add))]
1057 pub fn i8x16_add(a
: v128
, b
: v128
) -> v128
{
1058 unsafe { transmute(simd_add(a.as_i8x16(), b.as_i8x16())) }
1061 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit signed
1062 /// integers, saturating on overflow to `i8::MAX`.
1064 #[cfg_attr(test, assert_instr(i8x16.add_saturate_s))]
1065 pub fn i8x16_add_saturate_s(a
: v128
, b
: v128
) -> v128
{
1066 unsafe { transmute(llvm_i8x16_add_saturate_s(a.as_i8x16(), b.as_i8x16())) }
1069 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit unsigned
1070 /// integers, saturating on overflow to `u8::MAX`.
1072 #[cfg_attr(test, assert_instr(i8x16.add_saturate_u))]
1073 pub fn i8x16_add_saturate_u(a
: v128
, b
: v128
) -> v128
{
1074 unsafe { transmute(llvm_i8x16_add_saturate_u(a.as_i8x16(), b.as_i8x16())) }
1077 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit integers.
1079 #[cfg_attr(test, assert_instr(i8x16.sub))]
1080 pub fn i8x16_sub(a
: v128
, b
: v128
) -> v128
{
1081 unsafe { transmute(simd_sub(a.as_i8x16(), b.as_i8x16())) }
1084 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
1085 /// signed integers, saturating on overflow to `i8::MIN`.
1087 #[cfg_attr(test, assert_instr(i8x16.sub_saturate_s))]
1088 pub fn i8x16_sub_saturate_s(a
: v128
, b
: v128
) -> v128
{
1089 unsafe { transmute(llvm_i8x16_sub_saturate_s(a.as_i8x16(), b.as_i8x16())) }
1092 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
1093 /// unsigned integers, saturating on overflow to 0.
1095 #[cfg_attr(test, assert_instr(i8x16.sub_saturate_u))]
1096 pub fn i8x16_sub_saturate_u(a
: v128
, b
: v128
) -> v128
{
1097 unsafe { transmute(llvm_i8x16_sub_saturate_u(a.as_i8x16(), b.as_i8x16())) }
1100 /// Multiplies two 128-bit vectors as if they were two packed sixteen 8-bit
1101 /// signed integers.
1103 #[cfg_attr(test, assert_instr(i8x16.mul))]
1104 pub fn i8x16_mul(a
: v128
, b
: v128
) -> v128
{
1105 unsafe { transmute(simd_mul(a.as_i8x16(), b.as_i8x16())) }
1108 /// Negates a 128-bit vectors intepreted as eight 16-bit signed integers
1110 #[cfg_attr(test, assert_instr(i16x8.neg))]
1111 pub fn i16x8_neg(a
: v128
) -> v128
{
1112 unsafe { transmute(simd_mul(a.as_i16x8(), i16x8::splat(-1))) }
1115 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1117 #[cfg_attr(test, assert_instr(i16x8.any_true))]
1118 pub fn i16x8_any_true(a
: v128
) -> i32 {
1119 unsafe { llvm_i16x8_any_true(a.as_i16x8()) }
1122 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1124 #[cfg_attr(test, assert_instr(i16x8.all_true))]
1125 pub fn i16x8_all_true(a
: v128
) -> i32 {
1126 unsafe { llvm_i16x8_all_true(a.as_i16x8()) }
1129 /// Shifts each lane to the left by the specified number of bits.
1131 /// Only the low bits of the shift amount are used if the shift amount is
1132 /// greater than the lane width.
1134 #[cfg(not(only_node_compatible_functions))]
1135 #[cfg_attr(test, assert_instr(i16x8.shl))]
1136 pub fn i16x8_shl(a
: v128
, amt
: u32) -> v128
{
1137 unsafe { transmute(simd_shl(a.as_i16x8(), i16x8::splat(amt as i16))) }
1140 /// Shifts each lane to the right by the specified number of bits, sign
1143 /// Only the low bits of the shift amount are used if the shift amount is
1144 /// greater than the lane width.
1146 #[cfg(not(only_node_compatible_functions))]
1147 #[cfg_attr(test, assert_instr(i16x8.shl))]
1148 pub fn i16x8_shr_s(a
: v128
, amt
: u32) -> v128
{
1149 unsafe { transmute(simd_shr(a.as_i16x8(), i16x8::splat(amt as i16))) }
1152 /// Shifts each lane to the right by the specified number of bits, shifting in
1155 /// Only the low bits of the shift amount are used if the shift amount is
1156 /// greater than the lane width.
1158 #[cfg(not(only_node_compatible_functions))]
1159 #[cfg_attr(test, assert_instr(i16x8.shl))]
1160 pub fn i16x8_shr_u(a
: v128
, amt
: u32) -> v128
{
1161 unsafe { transmute(simd_shr(a.as_u16x8(), u16x8::splat(amt as u16))) }
1164 /// Adds two 128-bit vectors as if they were two packed eight 16-bit integers.
1166 #[cfg_attr(test, assert_instr(i16x8.add))]
1167 pub fn i16x8_add(a
: v128
, b
: v128
) -> v128
{
1168 unsafe { transmute(simd_add(a.as_i16x8(), b.as_i16x8())) }
1171 /// Adds two 128-bit vectors as if they were two packed eight 16-bit signed
1172 /// integers, saturating on overflow to `i16::MAX`.
1174 #[cfg_attr(test, assert_instr(i16x8.add_saturate_s))]
1175 pub fn i16x8_add_saturate_s(a
: v128
, b
: v128
) -> v128
{
1176 unsafe { transmute(llvm_i16x8_add_saturate_s(a.as_i16x8(), b.as_i16x8())) }
1179 /// Adds two 128-bit vectors as if they were two packed eight 16-bit unsigned
1180 /// integers, saturating on overflow to `u16::MAX`.
1182 #[cfg_attr(test, assert_instr(i16x8.add_saturate_u))]
1183 pub fn i16x8_add_saturate_u(a
: v128
, b
: v128
) -> v128
{
1184 unsafe { transmute(llvm_i16x8_add_saturate_u(a.as_i16x8(), b.as_i16x8())) }
1187 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit integers.
1189 #[cfg_attr(test, assert_instr(i16x8.sub))]
1190 pub fn i16x8_sub(a
: v128
, b
: v128
) -> v128
{
1191 unsafe { transmute(simd_sub(a.as_i16x8(), b.as_i16x8())) }
1194 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
1195 /// signed integers, saturating on overflow to `i16::MIN`.
1197 #[cfg_attr(test, assert_instr(i16x8.sub_saturate_s))]
1198 pub fn i16x8_sub_saturate_s(a
: v128
, b
: v128
) -> v128
{
1199 unsafe { transmute(llvm_i16x8_sub_saturate_s(a.as_i16x8(), b.as_i16x8())) }
1202 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
1203 /// unsigned integers, saturating on overflow to 0.
1205 #[cfg_attr(test, assert_instr(i16x8.sub_saturate_u))]
1206 pub fn i16x8_sub_saturate_u(a
: v128
, b
: v128
) -> v128
{
1207 unsafe { transmute(llvm_i16x8_sub_saturate_u(a.as_i16x8(), b.as_i16x8())) }
1210 /// Multiplies two 128-bit vectors as if they were two packed eight 16-bit
1211 /// signed integers.
1213 #[cfg_attr(test, assert_instr(i16x8.mul))]
1214 pub fn i16x8_mul(a
: v128
, b
: v128
) -> v128
{
1215 unsafe { transmute(simd_mul(a.as_i16x8(), b.as_i16x8())) }
1218 /// Negates a 128-bit vectors intepreted as four 32-bit signed integers
1220 #[cfg_attr(test, assert_instr(i32x4.neg))]
1221 pub fn i32x4_neg(a
: v128
) -> v128
{
1222 unsafe { transmute(simd_mul(a.as_i32x4(), i32x4::splat(-1))) }
1225 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1227 #[cfg_attr(test, assert_instr(i32x4.any_true))]
1228 pub fn i32x4_any_true(a
: v128
) -> i32 {
1229 unsafe { llvm_i32x4_any_true(a.as_i32x4()) }
1232 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1234 #[cfg_attr(test, assert_instr(i32x4.all_true))]
1235 pub fn i32x4_all_true(a
: v128
) -> i32 {
1236 unsafe { llvm_i32x4_all_true(a.as_i32x4()) }
1239 /// Shifts each lane to the left by the specified number of bits.
1241 /// Only the low bits of the shift amount are used if the shift amount is
1242 /// greater than the lane width.
1244 #[cfg(not(only_node_compatible_functions))]
1245 #[cfg_attr(test, assert_instr(i32x4.shl))]
1246 pub fn i32x4_shl(a
: v128
, amt
: u32) -> v128
{
1247 unsafe { transmute(simd_shl(a.as_i32x4(), i32x4::splat(amt as i32))) }
1250 /// Shifts each lane to the right by the specified number of bits, sign
1253 /// Only the low bits of the shift amount are used if the shift amount is
1254 /// greater than the lane width.
1256 #[cfg(not(only_node_compatible_functions))]
1257 #[cfg_attr(test, assert_instr(i32x4.shl))]
1258 pub fn i32x4_shr_s(a
: v128
, amt
: u32) -> v128
{
1259 unsafe { transmute(simd_shr(a.as_i32x4(), i32x4::splat(amt as i32))) }
1262 /// Shifts each lane to the right by the specified number of bits, shifting in
1265 /// Only the low bits of the shift amount are used if the shift amount is
1266 /// greater than the lane width.
1268 #[cfg(not(only_node_compatible_functions))]
1269 #[cfg_attr(test, assert_instr(i32x4.shl))]
1270 pub fn i32x4_shr_u(a
: v128
, amt
: u32) -> v128
{
1271 unsafe { transmute(simd_shr(a.as_u32x4(), u32x4::splat(amt as u32))) }
1274 /// Adds two 128-bit vectors as if they were two packed four 32-bit integers.
1276 #[cfg_attr(test, assert_instr(i32x4.add))]
1277 pub fn i32x4_add(a
: v128
, b
: v128
) -> v128
{
1278 unsafe { transmute(simd_add(a.as_i32x4(), b.as_i32x4())) }
1281 /// Subtracts two 128-bit vectors as if they were two packed four 32-bit integers.
1283 #[cfg_attr(test, assert_instr(i32x4.sub))]
1284 pub fn i32x4_sub(a
: v128
, b
: v128
) -> v128
{
1285 unsafe { transmute(simd_sub(a.as_i32x4(), b.as_i32x4())) }
1288 /// Multiplies two 128-bit vectors as if they were two packed four 32-bit
1289 /// signed integers.
1291 #[cfg_attr(test, assert_instr(i32x4.mul))]
1292 pub fn i32x4_mul(a
: v128
, b
: v128
) -> v128
{
1293 unsafe { transmute(simd_mul(a.as_i32x4(), b.as_i32x4())) }
1296 /// Negates a 128-bit vectors intepreted as two 64-bit signed integers
1298 #[cfg(not(only_node_compatible_functions))]
1299 #[cfg_attr(test, assert_instr(i32x4.neg))]
1300 pub fn i64x2_neg(a
: v128
) -> v128
{
1301 unsafe { transmute(simd_mul(a.as_i64x2(), i64x2::splat(-1))) }
1304 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1306 #[cfg(not(only_node_compatible_functions))]
1307 #[cfg_attr(test, assert_instr(i64x2.any_true))]
1308 pub fn i64x2_any_true(a
: v128
) -> i32 {
1309 unsafe { llvm_i64x2_any_true(a.as_i64x2()) }
1312 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1314 #[cfg(not(only_node_compatible_functions))]
1315 #[cfg_attr(test, assert_instr(i64x2.all_true))]
1316 pub fn i64x2_all_true(a
: v128
) -> i32 {
1317 unsafe { llvm_i64x2_all_true(a.as_i64x2()) }
1320 /// Shifts each lane to the left by the specified number of bits.
1322 /// Only the low bits of the shift amount are used if the shift amount is
1323 /// greater than the lane width.
1325 #[cfg(not(only_node_compatible_functions))]
1326 #[cfg_attr(test, assert_instr(i64x2.shl))]
1327 pub fn i64x2_shl(a
: v128
, amt
: u32) -> v128
{
1328 unsafe { transmute(simd_shl(a.as_i64x2(), i64x2::splat(amt as i64))) }
1331 /// Shifts each lane to the right by the specified number of bits, sign
1334 /// Only the low bits of the shift amount are used if the shift amount is
1335 /// greater than the lane width.
1337 #[cfg(not(only_node_compatible_functions))]
1338 #[cfg_attr(test, assert_instr(i64x2.shl))]
1339 pub fn i64x2_shr_s(a
: v128
, amt
: u32) -> v128
{
1340 unsafe { transmute(simd_shr(a.as_i64x2(), i64x2::splat(amt as i64))) }
1343 /// Shifts each lane to the right by the specified number of bits, shifting in
1346 /// Only the low bits of the shift amount are used if the shift amount is
1347 /// greater than the lane width.
1349 #[cfg(not(only_node_compatible_functions))]
1350 #[cfg_attr(test, assert_instr(i64x2.shl))]
1351 pub fn i64x2_shr_u(a
: v128
, amt
: u32) -> v128
{
1352 unsafe { transmute(simd_shr(a.as_u64x2(), u64x2::splat(amt as u64))) }
1355 /// Adds two 128-bit vectors as if they were two packed two 64-bit integers.
1357 #[cfg(not(only_node_compatible_functions))]
1358 #[cfg_attr(test, assert_instr(i64x2.add))]
1359 pub fn i64x2_add(a
: v128
, b
: v128
) -> v128
{
1360 unsafe { transmute(simd_add(a.as_i64x2(), b.as_i64x2())) }
1363 /// Subtracts two 128-bit vectors as if they were two packed two 64-bit integers.
1365 #[cfg(not(only_node_compatible_functions))]
1366 #[cfg_attr(test, assert_instr(i64x2.sub))]
1367 pub fn i64x2_sub(a
: v128
, b
: v128
) -> v128
{
1368 unsafe { transmute(simd_sub(a.as_i64x2(), b.as_i64x2())) }
1371 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
1372 /// as four 32-bit floating point numbers.
1374 #[cfg_attr(test, assert_instr(f32x4.abs))]
1375 pub fn f32x4_abs(a
: v128
) -> v128
{
1376 unsafe { transmute(llvm_f32x4_abs(a.as_f32x4())) }
1379 /// Negates each lane of a 128-bit vector interpreted as four 32-bit floating
1382 #[cfg_attr(test, assert_instr(f32x4.neg))]
1383 pub fn f32x4_neg(a
: v128
) -> v128
{
1384 unsafe { f32x4_mul(a, transmute(f32x4(-1.0, -1.0, -1.0, -1.0))) }
1387 /// Calculates the square root of each lane of a 128-bit vector interpreted as
1388 /// four 32-bit floating point numbers.
1390 #[cfg(not(only_node_compatible_functions))]
1391 #[cfg_attr(test, assert_instr(f32x4.sqrt))]
1392 pub fn f32x4_sqrt(a
: v128
) -> v128
{
1393 unsafe { transmute(llvm_f32x4_sqrt(a.as_f32x4())) }
1396 /// Adds pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1397 /// floating point numbers.
1399 #[cfg_attr(test, assert_instr(f32x4.add))]
1400 pub fn f32x4_add(a
: v128
, b
: v128
) -> v128
{
1401 unsafe { transmute(simd_add(a.as_f32x4(), b.as_f32x4())) }
1404 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1405 /// floating point numbers.
1407 #[cfg_attr(test, assert_instr(f32x4.sub))]
1408 pub fn f32x4_sub(a
: v128
, b
: v128
) -> v128
{
1409 unsafe { transmute(simd_sub(a.as_f32x4(), b.as_f32x4())) }
1412 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1413 /// floating point numbers.
1415 #[cfg_attr(test, assert_instr(f32x4.mul))]
1416 pub fn f32x4_mul(a
: v128
, b
: v128
) -> v128
{
1417 unsafe { transmute(simd_mul(a.as_f32x4(), b.as_f32x4())) }
1420 /// Divides pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1421 /// floating point numbers.
1423 #[cfg(not(only_node_compatible_functions))]
1424 #[cfg_attr(test, assert_instr(f32x4.div))]
1425 pub fn f32x4_div(a
: v128
, b
: v128
) -> v128
{
1426 unsafe { transmute(simd_div(a.as_f32x4(), b.as_f32x4())) }
1429 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
1430 /// as four 32-bit floating point numbers.
1432 #[cfg_attr(test, assert_instr(f32x4.min))]
1433 pub fn f32x4_min(a
: v128
, b
: v128
) -> v128
{
1434 unsafe { transmute(llvm_f32x4_min(a.as_f32x4(), b.as_f32x4())) }
1437 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
1438 /// as four 32-bit floating point numbers.
1440 #[cfg_attr(test, assert_instr(f32x4.max))]
1441 pub fn f32x4_max(a
: v128
, b
: v128
) -> v128
{
1442 unsafe { transmute(llvm_f32x4_max(a.as_f32x4(), b.as_f32x4())) }
1445 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
1446 /// as two 64-bit floating point numbers.
1448 #[cfg(not(only_node_compatible_functions))]
1449 #[cfg_attr(test, assert_instr(f64x2.abs))]
1450 pub fn f64x2_abs(a
: v128
) -> v128
{
1451 unsafe { transmute(llvm_f64x2_abs(a.as_f64x2())) }
1454 /// Negates each lane of a 128-bit vector interpreted as two 64-bit floating
1457 #[cfg(not(only_node_compatible_functions))]
1458 #[cfg_attr(test, assert_instr(f64x2.abs))]
1459 pub fn f64x2_neg(a
: v128
) -> v128
{
1460 unsafe { f64x2_mul(a, transmute(f64x2(-1.0, -1.0))) }
1463 /// Calculates the square root of each lane of a 128-bit vector interpreted as
1464 /// two 64-bit floating point numbers.
1466 #[cfg(not(only_node_compatible_functions))]
1467 #[cfg_attr(test, assert_instr(f64x2.sqrt))]
1468 pub fn f64x2_sqrt(a
: v128
) -> v128
{
1469 unsafe { transmute(llvm_f64x2_sqrt(a.as_f64x2())) }
1472 /// Adds pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1473 /// floating point numbers.
1475 #[cfg(not(only_node_compatible_functions))]
1476 #[cfg_attr(test, assert_instr(f64x2.add))]
1477 pub fn f64x2_add(a
: v128
, b
: v128
) -> v128
{
1478 unsafe { transmute(simd_add(a.as_f64x2(), b.as_f64x2())) }
1481 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1482 /// floating point numbers.
1484 #[cfg(not(only_node_compatible_functions))]
1485 #[cfg_attr(test, assert_instr(f64x2.sub))]
1486 pub fn f64x2_sub(a
: v128
, b
: v128
) -> v128
{
1487 unsafe { transmute(simd_sub(a.as_f64x2(), b.as_f64x2())) }
1490 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1491 /// floating point numbers.
1493 #[cfg(not(only_node_compatible_functions))]
1494 #[cfg_attr(test, assert_instr(f64x2.mul))]
1495 pub fn f64x2_mul(a
: v128
, b
: v128
) -> v128
{
1496 unsafe { transmute(simd_mul(a.as_f64x2(), b.as_f64x2())) }
1499 /// Divides pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1500 /// floating point numbers.
1502 #[cfg(not(only_node_compatible_functions))]
1503 #[cfg_attr(test, assert_instr(f64x2.div))]
1504 pub fn f64x2_div(a
: v128
, b
: v128
) -> v128
{
1505 unsafe { transmute(simd_div(a.as_f64x2(), b.as_f64x2())) }
1508 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
1509 /// as two 64-bit floating point numbers.
1511 #[cfg(not(only_node_compatible_functions))]
1512 #[cfg_attr(test, assert_instr(f64x2.min))]
1513 pub fn f64x2_min(a
: v128
, b
: v128
) -> v128
{
1514 unsafe { transmute(llvm_f64x2_min(a.as_f64x2(), b.as_f64x2())) }
1517 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
1518 /// as two 64-bit floating point numbers.
1520 #[cfg(not(only_node_compatible_functions))]
1521 #[cfg_attr(test, assert_instr(f64x2.max))]
1522 pub fn f64x2_max(a
: v128
, b
: v128
) -> v128
{
1523 unsafe { transmute(llvm_f64x2_max(a.as_f64x2(), b.as_f64x2())) }
1526 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
1527 /// into a 128-bit vector of four 32-bit signed integers.
1529 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1530 /// representable intger.
1532 #[cfg_attr(test, assert_instr("i32x4.trunc_sat_f32x4_s"))]
1533 pub fn i32x4_trunc_s_f32x4_sat(a
: v128
) -> v128
{
1534 unsafe { transmute(simd_cast::<_, i32x4>(a.as_f32x4())) }
1537 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
1538 /// into a 128-bit vector of four 32-bit unsigned integers.
1540 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1541 /// representable intger.
1543 #[cfg_attr(test, assert_instr("i32x4.trunc_sat_f32x4_u"))]
1544 pub fn i32x4_trunc_u_f32x4_sat(a
: v128
) -> v128
{
1545 unsafe { transmute(simd_cast::<_, u32x4>(a.as_f32x4())) }
1548 /// Converts a 128-bit vector interpreted as two 64-bit floating point numbers
1549 /// into a 128-bit vector of two 64-bit signed integers.
1551 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1552 /// representable intger.
1554 #[cfg(not(only_node_compatible_functions))]
1555 #[cfg_attr(test, assert_instr("i64x2.trunc_s/f64x2:sat"))]
1556 pub fn i64x2_trunc_s_f64x2_sat(a
: v128
) -> v128
{
1557 unsafe { transmute(simd_cast::<_, i64x2>(a.as_f64x2())) }
1560 /// Converts a 128-bit vector interpreted as two 64-bit floating point numbers
1561 /// into a 128-bit vector of two 64-bit unsigned integers.
1563 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1564 /// representable intger.
1566 #[cfg(not(only_node_compatible_functions))]
1567 #[cfg_attr(test, assert_instr("i64x2.trunc_u/f64x2:sat"))]
1568 pub fn i64x2_trunc_u_f64x2_sat(a
: v128
) -> v128
{
1569 unsafe { transmute(simd_cast::<_, u64x2>(a.as_f64x2())) }
1572 /// Converts a 128-bit vector interpreted as four 32-bit signed integers into a
1573 /// 128-bit vector of four 32-bit floating point numbers.
1575 #[cfg_attr(test, assert_instr("f32x4.convert_i32x4_s"))]
1576 pub fn f32x4_convert_i32x4_s(a
: v128
) -> v128
{
1577 unsafe { transmute(simd_cast::<_, f32x4>(a.as_i32x4())) }
1580 /// Converts a 128-bit vector interpreted as four 32-bit unsigned integers into a
1581 /// 128-bit vector of four 32-bit floating point numbers.
1583 #[cfg_attr(test, assert_instr("f32x4.convert_i32x4_u"))]
1584 pub fn f32x4_convert_i32x4_u(a
: v128
) -> v128
{
1585 unsafe { transmute(simd_cast::<_, f32x4>(a.as_u32x4())) }
1588 /// Converts a 128-bit vector interpreted as two 64-bit signed integers into a
1589 /// 128-bit vector of two 64-bit floating point numbers.
1591 #[cfg(not(only_node_compatible_functions))]
1592 #[cfg_attr(test, assert_instr("f64x2.convert_s/i64x2"))]
1593 pub fn f64x2_convert_s_i64x2(a
: v128
) -> v128
{
1594 unsafe { transmute(simd_cast::<_, f64x2>(a.as_i64x2())) }
1597 /// Converts a 128-bit vector interpreted as two 64-bit unsigned integers into a
1598 /// 128-bit vector of two 64-bit floating point numbers.
1600 #[cfg(not(only_node_compatible_functions))]
1601 #[cfg_attr(test, assert_instr("f64x2.convert_u/i64x2"))]
1602 pub fn f64x2_convert_u_i64x2(a
: v128
) -> v128
{
1603 unsafe { transmute(simd_cast::<_, f64x2>(a.as_u64x2())) }
1611 use std
::num
::Wrapping
;
1612 use std
::prelude
::v1
::*;
1613 use wasm_bindgen_test
::*;
1615 fn compare_bytes(a
: v128
, b
: v128
) {
1616 let a
: [u8; 16] = unsafe { transmute(a) }
;
1617 let b
: [u8; 16] = unsafe { transmute(b) }
;
1621 #[wasm_bindgen_test]
1622 #[cfg(not(only_node_compatible_functions))]
1623 fn test_v128_const() {
1625 unsafe { super::v128_const(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) }
;
1626 compare_bytes(A
, A
);
1629 macro_rules
! test_splat
{
1630 ($test_id
:ident
: $val
:expr
=> $
($vals
:expr
),*) => {
1631 #[wasm_bindgen_test]
1633 let a
= super::$
test_id($val
);
1634 let b
: v128
= unsafe {
1635 transmute([$
($vals
as u8),*])
1637 compare_bytes(a
, b
);
1642 test_splat
!(i8x16_splat
: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
1643 test_splat
!(i16x8_splat
: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
1644 test_splat
!(i32x4_splat
: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
1645 #[cfg(not(only_node_compatible_functions))]
1646 test_splat
!(i64x2_splat
: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
1647 test_splat
!(f32x4_splat
: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
1648 #[cfg(not(only_node_compatible_functions))]
1649 test_splat
!(f64x2_splat
: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
1651 // tests extract and replace lanes
1652 macro_rules
! test_extract
{
1654 name
: $test_id
:ident
,
1655 extract
: $extract
:ident
,
1656 replace
: $replace
:ident
,
1659 indices
: [$
($idx
:expr
),*],
1661 #[wasm_bindgen_test]
1664 let arr
: [$elem
; $count
] = [123 as $elem
; $count
];
1665 let vec
: v128
= transmute(arr
);
1667 assert_eq
!($
extract(vec
, $idx
), 123 as $elem
);
1670 // create a vector from array and check that the indices contain
1671 // the same values as in the array:
1672 let arr
: [$elem
; $count
] = [$
($idx
as $elem
),*];
1673 let vec
: v128
= transmute(arr
);
1675 assert_eq
!($
extract(vec
, $idx
), $idx
as $elem
);
1677 let tmp
= $
replace(vec
, $idx
, 124 as $elem
);
1678 assert_eq
!($
extract(tmp
, $idx
), 124 as $elem
);
1686 name
: test_i8x16_extract_replace
,
1687 extract
: i8x16_extract_lane
,
1688 replace
: i8x16_replace_lane
,
1691 indices
: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
1694 name
: test_i16x8_extract_replace
,
1695 extract
: i16x8_extract_lane
,
1696 replace
: i16x8_replace_lane
,
1699 indices
: [0, 1, 2, 3, 4, 5, 6, 7],
1702 name
: test_i32x4_extract_replace
,
1703 extract
: i32x4_extract_lane
,
1704 replace
: i32x4_replace_lane
,
1707 indices
: [0, 1, 2, 3],
1709 #[cfg(not(only_node_compatible_functions))]
1711 name
: test_i64x2_extract_replace
,
1712 extract
: i64x2_extract_lane
,
1713 replace
: i64x2_replace_lane
,
1719 name
: test_f32x4_extract_replace
,
1720 extract
: f32x4_extract_lane
,
1721 replace
: f32x4_replace_lane
,
1724 indices
: [0, 1, 2, 3],
1726 #[cfg(not(only_node_compatible_functions))]
1728 name
: test_f64x2_extract_replace
,
1729 extract
: f64x2_extract_lane
,
1730 replace
: f64x2_replace_lane
,
1736 macro_rules
! test_binop
{
1739 $
([$
($vec1
:tt
)*] ($op
:tt
| $f
:ident
) [$
($vec2
:tt
)*],)*
1742 #[wasm_bindgen_test]
1746 let v1
= [$
($vec1
)*];
1747 let v2
= [$
($vec2
)*];
1748 let v1_v128
: v128
= mem
::transmute(v1
);
1749 let v2_v128
: v128
= mem
::transmute(v2
);
1750 let v3_v128
= super::$
f(v1_v128
, v2_v128
);
1751 let mut v3
= [$
($vec1
)*];
1753 v3
= mem
::transmute(v3_v128
);
1755 for (i
, actual
) in v3
.iter().enumerate() {
1756 let expected
= (Wrapping(v1
[i
]) $op
Wrapping(v2
[i
])).0;
1757 assert_eq
!(*actual
, expected
);
1765 macro_rules
! test_unop
{
1768 $
(($op
:tt
| $f
:ident
) [$
($vec1
:tt
)*],)*
1771 #[wasm_bindgen_test]
1775 let v1
= [$
($vec1
)*];
1776 let v1_v128
: v128
= mem
::transmute(v1
);
1777 let v2_v128
= super::$
f(v1_v128
);
1778 let mut v2
= [$
($vec1
)*];
1780 v2
= mem
::transmute(v2_v128
);
1782 for (i
, actual
) in v2
.iter().enumerate() {
1783 let expected
= ($op
Wrapping(v1
[i
])).0;
1784 assert_eq
!(*actual
, expected
);
1794 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1796 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1798 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1800 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1802 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1804 [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
1807 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1809 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1811 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1813 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1815 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1817 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
1820 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1822 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1824 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1826 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1828 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1830 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 30, 3],
1834 [0i16, 0, 0, 0, 0, 0, 0, 0]
1836 [1i16, 1, 1, 1, 1, 1, 1, 1],
1838 [1i16, 2, 3, 4, 5, 6, 7, 8]
1840 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
1844 [0i16, 0, 0, 0, 0, 0, 0, 0]
1846 [1i16, 1, 1, 1, 1, 1, 1, 1],
1848 [1i16, 2, 3, 4, 5, 6, 7, 8]
1850 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
1854 [0i16, 0, 0, 0, 0, 0, 0, 0]
1856 [1i16, 1, 1, 1, 1, 1, 1, 1],
1858 [1i16, 2, 3, 4, 5, 6, 7, 8]
1860 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
1864 [0i32, 0, 0, 0] (+ | i32x4_add
) [1, 2, 3, 4],
1865 [1i32, 1283, i32::MAX
, i32::MIN
]
1871 [0i32, 0, 0, 0] (- | i32x4_sub
) [1, 2, 3, 4],
1872 [1i32, 1283, i32::MAX
, i32::MIN
]
1878 [0i32, 0, 0, 0] (* | i32x4_mul
) [1, 2, 3, 4],
1879 [1i32, 1283, i32::MAX
, i32::MIN
]
1884 // TODO: test_i64x2_add
1885 // TODO: test_i64x2_sub
1891 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1894 [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1897 [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
1901 (- | i16x8_neg
) [1i16, 1, 1, 1, 1, 1, 1, 1],
1902 (- | i16x8_neg
) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
1906 (- | i32x4_neg
) [1i32, 2, 3, 4],
1907 (- | i32x4_neg
) [i32::MIN
, i32::MAX
, 0, 4],
1910 // TODO: test_i64x2_neg
1913 // #[wasm_bindgen_test]
1914 // fn v8x16_shuffle() {
1916 // let a = [0_u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
1918 // 16_u8, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
1922 // let vec_a: v128 = transmute(a);
1923 // let vec_b: v128 = transmute(b);
1925 // let vec_r = v8x16_shuffle!(
1928 // [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
1932 // [0_u8, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30];
1933 // let vec_e: v128 = transmute(e);
1934 // compare_bytes(vec_r, vec_e);
1938 // macro_rules! floating_point {
1950 // trait IsNan: Sized {
1951 // fn is_nan(self) -> bool {
1955 // impl IsNan for i8 {}
1956 // impl IsNan for i16 {}
1957 // impl IsNan for i32 {}
1958 // impl IsNan for i64 {}
1960 // macro_rules! test_bop {
1961 // ($id:ident[$ety:ident; $ecount:expr] |
1962 // $binary_op:ident [$op_test_id:ident] :
1963 // ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
1965 // $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
1966 // ([$($in_a),*], [$($in_b),*]) => [$($out),*]
1970 // ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
1971 // $binary_op:ident [$op_test_id:ident] :
1972 // ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
1973 // #[wasm_bindgen_test]
1974 // fn $op_test_id() {
1976 // let a_input: [$ety; $ecount] = [$($in_a),*];
1977 // let b_input: [$ety; $ecount] = [$($in_b),*];
1978 // let output: [$oty; $ecount] = [$($out),*];
1980 // let a_vec_in: v128 = transmute(a_input);
1981 // let b_vec_in: v128 = transmute(b_input);
1982 // let vec_res: v128 = $id::$binary_op(a_vec_in, b_vec_in);
1984 // let res: [$oty; $ecount] = transmute(vec_res);
1986 // if !floating_point!($ety) {
1987 // assert_eq!(res, output);
1989 // for i in 0..$ecount {
1991 // let o = output[i];
1992 // assert_eq!(r.is_nan(), o.is_nan());
1994 // assert_eq!(r, o);
2003 // macro_rules! test_bops {
2004 // ($id:ident[$ety:ident; $ecount:expr] |
2005 // $binary_op:ident [$op_test_id:ident]:
2006 // ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
2007 // #[wasm_bindgen_test]
2008 // fn $op_test_id() {
2010 // let a_input: [$ety; $ecount] = [$($in_a),*];
2011 // let output: [$ety; $ecount] = [$($out),*];
2013 // let a_vec_in: v128 = transmute(a_input);
2014 // let vec_res: v128 = $id::$binary_op(a_vec_in, $in_b);
2016 // let res: [$ety; $ecount] = transmute(vec_res);
2017 // assert_eq!(res, output);
2023 // macro_rules! test_uop {
2024 // ($id:ident[$ety:ident; $ecount:expr] |
2025 // $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
2026 // #[wasm_bindgen_test]
2027 // fn $op_test_id() {
2029 // let a_input: [$ety; $ecount] = [$($in_a),*];
2030 // let output: [$ety; $ecount] = [$($out),*];
2032 // let a_vec_in: v128 = transmute(a_input);
2033 // let vec_res: v128 = $id::$unary_op(a_vec_in);
2035 // let res: [$ety; $ecount] = transmute(vec_res);
2036 // assert_eq!(res, output);
2044 // test_bops!(i8x16[i8; 16] | shl[i8x16_shl_test]:
2045 // ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2046 // [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
2047 // test_bops!(i16x8[i16; 8] | shl[i16x8_shl_test]:
2048 // ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
2049 // [0, -2, 4, 6, 8, 10, 12, -2]);
2050 // test_bops!(i32x4[i32; 4] | shl[i32x4_shl_test]:
2051 // ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
2052 // test_bops!(i64x2[i64; 2] | shl[i64x2_shl_test]:
2053 // ([0, -1], 1) => [0, -2]);
2055 // test_bops!(i8x16[i8; 16] | shr_s[i8x16_shr_s_test]:
2056 // ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2057 // [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
2058 // test_bops!(i16x8[i16; 8] | shr_s[i16x8_shr_s_test]:
2059 // ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
2060 // [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
2061 // test_bops!(i32x4[i32; 4] | shr_s[i32x4_shr_s_test]:
2062 // ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
2063 // test_bops!(i64x2[i64; 2] | shr_s[i64x2_shr_s_test]:
2064 // ([0, -1], 1) => [0, -1]);
2066 // test_bops!(i8x16[i8; 16] | shr_u[i8x16_uhr_u_test]:
2067 // ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2068 // [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
2069 // test_bops!(i16x8[i16; 8] | shr_u[i16x8_uhr_u_test]:
2070 // ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
2071 // [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
2072 // test_bops!(i32x4[i32; 4] | shr_u[i32x4_uhr_u_test]:
2073 // ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
2074 // test_bops!(i64x2[i64; 2] | shr_u[i64x2_uhr_u_test]:
2075 // ([0, -1], 1) => [0, i64::MAX]);
2077 // #[wasm_bindgen_test]
2078 // fn v128_bitwise_logical_ops() {
2080 // let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
2081 // let b: [u32; 4] = [u32::MAX; 4];
2082 // let c: [u32; 4] = [0; 4];
2084 // let vec_a: v128 = transmute(a);
2085 // let vec_b: v128 = transmute(b);
2086 // let vec_c: v128 = transmute(c);
2088 // let r: v128 = v128::and(vec_a, vec_a);
2089 // compare_bytes(r, vec_a);
2090 // let r: v128 = v128::and(vec_a, vec_b);
2091 // compare_bytes(r, vec_a);
2092 // let r: v128 = v128::or(vec_a, vec_b);
2093 // compare_bytes(r, vec_b);
2094 // let r: v128 = v128::not(vec_b);
2095 // compare_bytes(r, vec_c);
2096 // let r: v128 = v128::xor(vec_a, vec_c);
2097 // compare_bytes(r, vec_a);
2099 // let r: v128 = v128::bitselect(vec_b, vec_c, vec_b);
2100 // compare_bytes(r, vec_b);
2101 // let r: v128 = v128::bitselect(vec_b, vec_c, vec_c);
2102 // compare_bytes(r, vec_c);
2103 // let r: v128 = v128::bitselect(vec_b, vec_c, vec_a);
2104 // compare_bytes(r, vec_a);
2108 // macro_rules! test_bool_red {
2109 // ($id:ident[$test_id:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
2110 // #[wasm_bindgen_test]
2113 // let vec_a: v128 = transmute([$($true),*]); // true
2114 // let vec_b: v128 = transmute([$($false),*]); // false
2115 // let vec_c: v128 = transmute([$($alt),*]); // alternating
2117 // assert_eq!($id::any_true(vec_a), 1);
2118 // assert_eq!($id::any_true(vec_b), 0);
2119 // assert_eq!($id::any_true(vec_c), 1);
2121 // assert_eq!($id::all_true(vec_a), 1);
2122 // assert_eq!($id::all_true(vec_b), 0);
2123 // assert_eq!($id::all_true(vec_c), 0);
2130 // i8x16[i8x16_boolean_reductions]
2131 // | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
2132 // | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
2133 // | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
2136 // i16x8[i16x8_boolean_reductions]
2137 // | [1_i16, 1, 1, 1, 1, 1, 1, 1]
2138 // | [0_i16, 0, 0, 0, 0, 0, 0, 0]
2139 // | [1_i16, 0, 1, 0, 1, 0, 1, 0]
2142 // i32x4[i32x4_boolean_reductions]
2143 // | [1_i32, 1, 1, 1]
2144 // | [0_i32, 0, 0, 0]
2145 // | [1_i32, 0, 1, 0]
2148 // i64x2[i64x2_boolean_reductions] | [1_i64, 1] | [0_i64, 0] | [1_i64, 0]
2151 // test_bop!(i8x16[i8; 16] | eq[i8x16_eq_test]:
2152 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2153 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2154 // [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2155 // test_bop!(i16x8[i16; 8] | eq[i16x8_eq_test]:
2156 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2157 // [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2158 // test_bop!(i32x4[i32; 4] | eq[i32x4_eq_test]:
2159 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
2160 // test_bop!(i64x2[i64; 2] | eq[i64x2_eq_test]: ([0, 1], [0, 2]) => [-1, 0]);
2161 // test_bop!(f32x4[f32; 4] => i32 | eq[f32x4_eq_test]:
2162 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
2163 // test_bop!(f64x2[f64; 2] => i64 | eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
2165 // test_bop!(i8x16[i8; 16] | ne[i8x16_ne_test]:
2166 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2167 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2168 // [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2169 // test_bop!(i16x8[i16; 8] | ne[i16x8_ne_test]:
2170 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2171 // [0, -1, 0, -1 ,0, -1, 0, 0]);
2172 // test_bop!(i32x4[i32; 4] | ne[i32x4_ne_test]:
2173 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
2174 // test_bop!(i64x2[i64; 2] | ne[i64x2_ne_test]: ([0, 1], [0, 2]) => [0, -1]);
2175 // test_bop!(f32x4[f32; 4] => i32 | ne[f32x4_ne_test]:
2176 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
2177 // test_bop!(f64x2[f64; 2] => i64 | ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
2179 // test_bop!(i8x16[i8; 16] | lt[i8x16_lt_test]:
2180 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2181 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2182 // [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2183 // test_bop!(i16x8[i16; 8] | lt[i16x8_lt_test]:
2184 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2185 // [0, -1, 0, -1 ,0, -1, 0, 0]);
2186 // test_bop!(i32x4[i32; 4] | lt[i32x4_lt_test]:
2187 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
2188 // test_bop!(i64x2[i64; 2] | lt[i64x2_lt_test]: ([0, 1], [0, 2]) => [0, -1]);
2189 // test_bop!(f32x4[f32; 4] => i32 | lt[f32x4_lt_test]:
2190 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
2191 // test_bop!(f64x2[f64; 2] => i64 | lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
2193 // test_bop!(i8x16[i8; 16] | gt[i8x16_gt_test]:
2194 // ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15],
2195 // [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
2196 // [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2197 // test_bop!(i16x8[i16; 8] | gt[i16x8_gt_test]:
2198 // ([0, 2, 2, 4, 4, 6, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
2199 // [0, -1, 0, -1 ,0, -1, 0, 0]);
2200 // test_bop!(i32x4[i32; 4] | gt[i32x4_gt_test]:
2201 // ([0, 2, 2, 4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
2202 // test_bop!(i64x2[i64; 2] | gt[i64x2_gt_test]: ([0, 2], [0, 1]) => [0, -1]);
2203 // test_bop!(f32x4[f32; 4] => i32 | gt[f32x4_gt_test]:
2204 // ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
2205 // test_bop!(f64x2[f64; 2] => i64 | gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
2207 // test_bop!(i8x16[i8; 16] | ge[i8x16_ge_test]:
2208 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2209 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2210 // [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2211 // test_bop!(i16x8[i16; 8] | ge[i16x8_ge_test]:
2212 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2213 // [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2214 // test_bop!(i32x4[i32; 4] | ge[i32x4_ge_test]:
2215 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
2216 // test_bop!(i64x2[i64; 2] | ge[i64x2_ge_test]: ([0, 1], [0, 2]) => [-1, 0]);
2217 // test_bop!(f32x4[f32; 4] => i32 | ge[f32x4_ge_test]:
2218 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
2219 // test_bop!(f64x2[f64; 2] => i64 | ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
2221 // test_bop!(i8x16[i8; 16] | le[i8x16_le_test]:
2222 // ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15],
2223 // [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
2225 // [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2226 // test_bop!(i16x8[i16; 8] | le[i16x8_le_test]:
2227 // ([0, 2, 2, 4, 4, 6, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
2228 // [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2229 // test_bop!(i32x4[i32; 4] | le[i32x4_le_test]:
2230 // ([0, 2, 2, 4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
2231 // test_bop!(i64x2[i64; 2] | le[i64x2_le_test]: ([0, 2], [0, 1]) => [-1, 0]);
2232 // test_bop!(f32x4[f32; 4] => i32 | le[f32x4_le_test]:
2233 // ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
2234 // test_bop!(f64x2[f64; 2] => i64 | le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
2236 // #[wasm_bindgen_test]
2237 // fn v128_bitwise_load_store() {
2239 // let mut arr: [i32; 4] = [0, 1, 2, 3];
2241 // let vec = v128::load(arr.as_ptr() as *const v128);
2242 // let vec = i32x4::add(vec, vec);
2243 // v128::store(arr.as_mut_ptr() as *mut v128, vec);
2245 // assert_eq!(arr, [0, 2, 4, 6]);
2249 // test_uop!(f32x4[f32; 4] | neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
2250 // test_uop!(f32x4[f32; 4] | abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
2251 // test_bop!(f32x4[f32; 4] | min[f32x4_min_test]:
2252 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
2253 // test_bop!(f32x4[f32; 4] | min[f32x4_min_test_nan]:
2254 // ([0., -1., 7., 8.], [1., -3., -4., std::f32::NAN])
2255 // => [0., -3., -4., std::f32::NAN]);
2256 // test_bop!(f32x4[f32; 4] | max[f32x4_max_test]:
2257 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
2258 // test_bop!(f32x4[f32; 4] | max[f32x4_max_test_nan]:
2259 // ([0., -1., 7., 8.], [1., -3., -4., std::f32::NAN])
2260 // => [1., -1., 7., std::f32::NAN]);
2261 // test_bop!(f32x4[f32; 4] | add[f32x4_add_test]:
2262 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
2263 // test_bop!(f32x4[f32; 4] | sub[f32x4_sub_test]:
2264 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
2265 // test_bop!(f32x4[f32; 4] | mul[f32x4_mul_test]:
2266 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
2267 // test_bop!(f32x4[f32; 4] | div[f32x4_div_test]:
2268 // ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
2270 // test_uop!(f64x2[f64; 2] | neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
2271 // test_uop!(f64x2[f64; 2] | abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
2272 // test_bop!(f64x2[f64; 2] | min[f64x2_min_test]:
2273 // ([0., -1.], [1., -3.]) => [0., -3.]);
2274 // test_bop!(f64x2[f64; 2] | min[f64x2_min_test_nan]:
2275 // ([7., 8.], [-4., std::f64::NAN])
2276 // => [ -4., std::f64::NAN]);
2277 // test_bop!(f64x2[f64; 2] | max[f64x2_max_test]:
2278 // ([0., -1.], [1., -3.]) => [1., -1.]);
2279 // test_bop!(f64x2[f64; 2] | max[f64x2_max_test_nan]:
2280 // ([7., 8.], [ -4., std::f64::NAN])
2281 // => [7., std::f64::NAN]);
2282 // test_bop!(f64x2[f64; 2] | add[f64x2_add_test]:
2283 // ([0., -1.], [1., -3.]) => [1., -4.]);
2284 // test_bop!(f64x2[f64; 2] | sub[f64x2_sub_test]:
2285 // ([0., -1.], [1., -3.]) => [-1., 2.]);
2286 // test_bop!(f64x2[f64; 2] | mul[f64x2_mul_test]:
2287 // ([0., -1.], [1., -3.]) => [0., 3.]);
2288 // test_bop!(f64x2[f64; 2] | div[f64x2_div_test]:
2289 // ([0., -8.], [1., 4.]) => [0., -2.]);
2291 // macro_rules! test_conv {
2292 // ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr, $to:expr) => {
2293 // #[wasm_bindgen_test]
2296 // let from: v128 = transmute($from);
2297 // let to: v128 = transmute($to);
2299 // let r: v128 = $to_ty::$conv_id(from);
2301 // compare_bytes(r, to);
2308 // f32x4_convert_s_i32x4 | convert_s_i32x4 | f32x4 | [1_i32, 2, 3, 4],
2309 // [1_f32, 2., 3., 4.]
2312 // f32x4_convert_u_i32x4
2313 // | convert_u_i32x4
2315 // | [u32::MAX, 2, 3, 4],
2316 // [u32::MAX as f32, 2., 3., 4.]
2319 // f64x2_convert_s_i64x2 | convert_s_i64x2 | f64x2 | [1_i64, 2],
2323 // f64x2_convert_u_i64x2
2324 // | convert_u_i64x2
2327 // [18446744073709552000.0, 2.]
2330 // // FIXME: this fails, and produces -2147483648 instead of saturating at
2331 // // i32::MAX test_conv!(i32x4_trunc_s_f32x4_sat | trunc_s_f32x4_sat
2332 // // | i32x4 | [1_f32, 2., (i32::MAX as f32 + 1.), 4.],
2333 // // [1_i32, 2, i32::MAX, 4]); FIXME: add other saturating tests