]> git.proxmox.com Git - rustc.git/blob - src/stdarch/crates/core_arch/src/wasm32/simd128.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / stdarch / crates / core_arch / src / wasm32 / simd128.rs
1 //! This module implements the [WebAssembly `SIMD128` ISA].
2 //!
3 //! [WebAssembly `SIMD128` ISA]:
4 //! https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md
5
6 #![allow(non_camel_case_types)]
7
8 use crate::{
9 core_arch::{simd::*, simd_llvm::*},
10 marker::Sized,
11 mem::transmute,
12 ptr,
13 };
14
15 #[cfg(test)]
16 use stdarch_test::assert_instr;
17 #[cfg(test)]
18 use wasm_bindgen_test::wasm_bindgen_test;
19
20 types! {
21 /// WASM-specific 128-bit wide SIMD vector type.
22 // N.B., internals here are arbitrary.
23 pub struct v128(i32, i32, i32, i32);
24 }
25
26 #[allow(non_camel_case_types)]
27 #[unstable(feature = "stdimd_internal", issue = "none")]
28 pub(crate) trait v128Ext: Sized {
29 fn as_v128(self) -> v128;
30
31 #[inline]
32 fn as_u8x16(self) -> u8x16 {
33 unsafe { transmute(self.as_v128()) }
34 }
35
36 #[inline]
37 fn as_u16x8(self) -> u16x8 {
38 unsafe { transmute(self.as_v128()) }
39 }
40
41 #[inline]
42 fn as_u32x4(self) -> u32x4 {
43 unsafe { transmute(self.as_v128()) }
44 }
45
46 #[inline]
47 fn as_u64x2(self) -> u64x2 {
48 unsafe { transmute(self.as_v128()) }
49 }
50
51 #[inline]
52 fn as_i8x16(self) -> i8x16 {
53 unsafe { transmute(self.as_v128()) }
54 }
55
56 #[inline]
57 fn as_i16x8(self) -> i16x8 {
58 unsafe { transmute(self.as_v128()) }
59 }
60
61 #[inline]
62 fn as_i32x4(self) -> i32x4 {
63 unsafe { transmute(self.as_v128()) }
64 }
65
66 #[inline]
67 fn as_i64x2(self) -> i64x2 {
68 unsafe { transmute(self.as_v128()) }
69 }
70
71 #[inline]
72 fn as_f32x4(self) -> f32x4 {
73 unsafe { transmute(self.as_v128()) }
74 }
75
76 #[inline]
77 fn as_f64x2(self) -> f64x2 {
78 unsafe { transmute(self.as_v128()) }
79 }
80 }
81
82 impl v128Ext for v128 {
83 #[inline]
84 fn as_v128(self) -> Self {
85 self
86 }
87 }
88
89 #[allow(improper_ctypes)]
90 extern "C" {
91 #[link_name = "llvm.wasm.anytrue.v16i8"]
92 fn llvm_i8x16_any_true(x: i8x16) -> i32;
93 #[link_name = "llvm.wasm.alltrue.v16i8"]
94 fn llvm_i8x16_all_true(x: i8x16) -> i32;
95 #[link_name = "llvm.sadd.sat.v16i8"]
96 fn llvm_i8x16_add_saturate_s(a: i8x16, b: i8x16) -> i8x16;
97 #[link_name = "llvm.uadd.sat.v16i8"]
98 fn llvm_i8x16_add_saturate_u(a: i8x16, b: i8x16) -> i8x16;
99 #[link_name = "llvm.wasm.sub.saturate.signed.v16i8"]
100 fn llvm_i8x16_sub_saturate_s(a: i8x16, b: i8x16) -> i8x16;
101 #[link_name = "llvm.wasm.sub.saturate.unsigned.v16i8"]
102 fn llvm_i8x16_sub_saturate_u(a: i8x16, b: i8x16) -> i8x16;
103
104 #[link_name = "llvm.wasm.anytrue.v8i16"]
105 fn llvm_i16x8_any_true(x: i16x8) -> i32;
106 #[link_name = "llvm.wasm.alltrue.v8i16"]
107 fn llvm_i16x8_all_true(x: i16x8) -> i32;
108 #[link_name = "llvm.sadd.sat.v8i16"]
109 fn llvm_i16x8_add_saturate_s(a: i16x8, b: i16x8) -> i16x8;
110 #[link_name = "llvm.uadd.sat.v8i16"]
111 fn llvm_i16x8_add_saturate_u(a: i16x8, b: i16x8) -> i16x8;
112 #[link_name = "llvm.wasm.sub.saturate.signed.v8i16"]
113 fn llvm_i16x8_sub_saturate_s(a: i16x8, b: i16x8) -> i16x8;
114 #[link_name = "llvm.wasm.sub.saturate.unsigned.v8i16"]
115 fn llvm_i16x8_sub_saturate_u(a: i16x8, b: i16x8) -> i16x8;
116
117 #[link_name = "llvm.wasm.anytrue.v4i32"]
118 fn llvm_i32x4_any_true(x: i32x4) -> i32;
119 #[link_name = "llvm.wasm.alltrue.v4i32"]
120 fn llvm_i32x4_all_true(x: i32x4) -> i32;
121
122 #[link_name = "llvm.wasm.anytrue.v2i64"]
123 fn llvm_i64x2_any_true(x: i64x2) -> i32;
124 #[link_name = "llvm.wasm.alltrue.v2i64"]
125 fn llvm_i64x2_all_true(x: i64x2) -> i32;
126
127 #[link_name = "llvm.fabs.v4f32"]
128 fn llvm_f32x4_abs(x: f32x4) -> f32x4;
129 #[link_name = "llvm.sqrt.v4f32"]
130 fn llvm_f32x4_sqrt(x: f32x4) -> f32x4;
131 #[link_name = "llvm.minimum.v4f32"]
132 fn llvm_f32x4_min(x: f32x4, y: f32x4) -> f32x4;
133 #[link_name = "llvm.maximum.v4f32"]
134 fn llvm_f32x4_max(x: f32x4, y: f32x4) -> f32x4;
135 #[link_name = "llvm.fabs.v2f64"]
136 fn llvm_f64x2_abs(x: f64x2) -> f64x2;
137 #[link_name = "llvm.sqrt.v2f64"]
138 fn llvm_f64x2_sqrt(x: f64x2) -> f64x2;
139 #[link_name = "llvm.minimum.v2f64"]
140 fn llvm_f64x2_min(x: f64x2, y: f64x2) -> f64x2;
141 #[link_name = "llvm.maximum.v2f64"]
142 fn llvm_f64x2_max(x: f64x2, y: f64x2) -> f64x2;
143
144 #[link_name = "llvm.wasm.bitselect.v16i8"]
145 fn llvm_bitselect(a: i8x16, b: i8x16, c: i8x16) -> i8x16;
146 }
147
148 /// Loads a `v128` vector from the given heap address.
149 #[inline]
150 #[cfg_attr(test, assert_instr(v128.load))]
151 pub unsafe fn v128_load(m: *const v128) -> v128 {
152 ptr::read(m)
153 }
154
155 /// Stores a `v128` vector to the given heap address.
156 #[inline]
157 #[cfg_attr(test, assert_instr(v128.store))]
158 pub unsafe fn v128_store(m: *mut v128, a: v128) {
159 ptr::write(m, a)
160 }
161
162 /// Materializes a constant SIMD value from the immediate operands.
163 ///
164 /// The `v128.const` instruction is encoded with 16 immediate bytes
165 /// `imm` which provide the bits of the vector directly.
166 #[inline]
167 #[cfg(not(only_node_compatible_functions))]
168 #[rustc_args_required_const(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)]
169 #[cfg_attr(test, assert_instr(
170 v128.const,
171 a0 = 0,
172 a1 = 1,
173 a2 = 2,
174 a3 = 3,
175 a4 = 4,
176 a5 = 5,
177 a6 = 6,
178 a7 = 7,
179 a8 = 8,
180 a9 = 9,
181 a10 = 10,
182 a11 = 11,
183 a12 = 12,
184 a13 = 13,
185 a14 = 14,
186 a15 = 15,
187 ))]
188 pub const fn v128_const(
189 a0: u8,
190 a1: u8,
191 a2: u8,
192 a3: u8,
193 a4: u8,
194 a5: u8,
195 a6: u8,
196 a7: u8,
197 a8: u8,
198 a9: u8,
199 a10: u8,
200 a11: u8,
201 a12: u8,
202 a13: u8,
203 a14: u8,
204 a15: u8,
205 ) -> v128 {
206 union U {
207 imm: [u8; 16],
208 vec: v128,
209 }
210 unsafe {
211 U {
212 imm: [
213 a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
214 ],
215 }
216 .vec
217 }
218 }
219
220 /// Creates a vector with identical lanes.
221 ///
222 /// Constructs a vector with `x` replicated to all 16 lanes.
223 #[inline]
224 #[cfg_attr(test, assert_instr(i8x16.splat))]
225 pub fn i8x16_splat(a: i8) -> v128 {
226 unsafe { transmute(i8x16::splat(a)) }
227 }
228
229 /// Extracts a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
230 ///
231 /// Extracts the scalar value of lane specified in the immediate mode operand
232 /// `imm` from `a`.
233 ///
234 /// # Unsafety
235 ///
236 /// This function has undefined behavior if `imm` is greater than or equal to
237 /// 16.
238 #[inline]
239 #[rustc_args_required_const(1)]
240 pub unsafe fn i8x16_extract_lane(a: v128, imm: usize) -> i8 {
241 #[cfg(test)]
242 #[assert_instr(i8x16.extract_lane_s)]
243 fn extract_lane_s(a: v128) -> i32 {
244 unsafe { i8x16_extract_lane(a, 0) as i32 }
245 }
246 #[cfg(test)]
247 #[cfg(not(only_node_compatible_functions))]
248 #[assert_instr(i8x16.extract_lane_u)]
249 fn extract_lane_u(a: v128) -> u32 {
250 unsafe { i8x16_extract_lane(a, 0) as u32 }
251 }
252 simd_extract(a.as_i8x16(), imm as u32)
253 }
254
255 /// Replaces a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
256 ///
257 /// Replaces the scalar value of lane specified in the immediate mode operand
258 /// `imm` with `a`.
259 ///
260 /// # Unsafety
261 ///
262 /// This function has undefined behavior if `imm` is greater than or equal to
263 /// 16.
264 #[inline]
265 #[cfg_attr(test, assert_instr(i8x16.replace_lane, imm = 0))]
266 #[rustc_args_required_const(1)]
267 pub unsafe fn i8x16_replace_lane(a: v128, imm: usize, val: i8) -> v128 {
268 transmute(simd_insert(a.as_i8x16(), imm as u32, val))
269 }
270
271 /// Creates a vector with identical lanes.
272 ///
273 /// Construct a vector with `x` replicated to all 8 lanes.
274 #[inline]
275 #[cfg_attr(test, assert_instr(i16x8.splat))]
276 pub fn i16x8_splat(a: i16) -> v128 {
277 unsafe { transmute(i16x8::splat(a)) }
278 }
279
280 /// Extracts a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
281 ///
282 /// Extracts a the scalar value of lane specified in the immediate mode operand
283 /// `imm` from `a`.
284 ///
285 /// # Unsafety
286 ///
287 /// This function has undefined behavior if `imm` is greater than or equal to
288 /// 8.
289 #[inline]
290 #[rustc_args_required_const(1)]
291 pub unsafe fn i16x8_extract_lane(a: v128, imm: usize) -> i16 {
292 #[cfg(test)]
293 #[assert_instr(i16x8.extract_lane_s)]
294 fn extract_lane_s(a: v128) -> i32 {
295 unsafe { i16x8_extract_lane(a, 0) as i32 }
296 }
297 #[cfg(test)]
298 #[cfg(not(only_node_compatible_functions))]
299 #[assert_instr(i16x8.extract_lane_u)]
300 fn extract_lane_u(a: v128) -> u32 {
301 unsafe { i16x8_extract_lane(a, 0) as u32 }
302 }
303 simd_extract(a.as_i16x8(), imm as u32)
304 }
305
306 /// Replaces a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
307 ///
308 /// Replaces the scalar value of lane specified in the immediate mode operand
309 /// `imm` with `a`.
310 ///
311 /// # Unsafety
312 ///
313 /// This function has undefined behavior if `imm` is greater than or equal to
314 /// 8.
315 #[inline]
316 #[cfg_attr(test, assert_instr(i16x8.replace_lane, imm = 0))]
317 #[rustc_args_required_const(1)]
318 pub unsafe fn i16x8_replace_lane(a: v128, imm: usize, val: i16) -> v128 {
319 transmute(simd_insert(a.as_i16x8(), imm as u32, val))
320 }
321
322 /// Creates a vector with identical lanes.
323 ///
324 /// Constructs a vector with `x` replicated to all 4 lanes.
325 #[inline]
326 #[cfg_attr(test, assert_instr(i32x4.splat))]
327 pub fn i32x4_splat(a: i32) -> v128 {
328 unsafe { transmute(i32x4::splat(a)) }
329 }
330
331 /// Extracts a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
332 ///
333 /// Extracts the scalar value of lane specified in the immediate mode operand
334 /// `imm` from `a`.
335 ///
336 /// # Unsafety
337 ///
338 /// This function has undefined behavior if `imm` is greater than or equal to
339 /// 4.
340 #[inline]
341 #[cfg_attr(test, assert_instr(i32x4.extract_lane, imm = 0))]
342 #[rustc_args_required_const(1)]
343 pub unsafe fn i32x4_extract_lane(a: v128, imm: usize) -> i32 {
344 simd_extract(a.as_i32x4(), imm as u32)
345 }
346
347 /// Replaces a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
348 ///
349 /// Replaces the scalar value of lane specified in the immediate mode operand
350 /// `imm` with `a`.
351 ///
352 /// # Unsafety
353 ///
354 /// This function has undefined behavior if `imm` is greater than or equal to
355 /// 4.
356 #[inline]
357 #[cfg_attr(test, assert_instr(i32x4.replace_lane, imm = 0))]
358 #[rustc_args_required_const(1)]
359 pub unsafe fn i32x4_replace_lane(a: v128, imm: usize, val: i32) -> v128 {
360 transmute(simd_insert(a.as_i32x4(), imm as u32, val))
361 }
362
363 /// Creates a vector with identical lanes.
364 ///
365 /// Construct a vector with `x` replicated to all 2 lanes.
366 #[inline]
367 #[cfg(not(only_node_compatible_functions))]
368 #[cfg_attr(test, assert_instr(i8x16.splat))]
369 pub fn i64x2_splat(a: i64) -> v128 {
370 unsafe { transmute(i64x2::splat(a)) }
371 }
372
373 /// Extracts a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
374 ///
375 /// Extracts the scalar value of lane specified in the immediate mode operand
376 /// `imm` from `a`.
377 ///
378 /// # Unsafety
379 ///
380 /// This function has undefined behavior if `imm` is greater than or equal to
381 /// 2.
382 #[inline]
383 #[cfg(not(only_node_compatible_functions))]
384 #[cfg_attr(test, assert_instr(i64x2.extract_lane_s, imm = 0))]
385 #[rustc_args_required_const(1)]
386 pub unsafe fn i64x2_extract_lane(a: v128, imm: usize) -> i64 {
387 simd_extract(a.as_i64x2(), imm as u32)
388 }
389
390 /// Replaces a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
391 ///
392 /// Replaces the scalar value of lane specified in the immediate mode operand
393 /// `imm` with `a`.
394 ///
395 /// # Unsafety
396 ///
397 /// This function has undefined behavior if `imm` is greater than or equal to
398 /// 2.
399 #[inline]
400 #[cfg(not(only_node_compatible_functions))]
401 #[cfg_attr(test, assert_instr(i64x2.replace_lane, imm = 0))]
402 #[rustc_args_required_const(1)]
403 pub unsafe fn i64x2_replace_lane(a: v128, imm: usize, val: i64) -> v128 {
404 transmute(simd_insert(a.as_i64x2(), imm as u32, val))
405 }
406
407 /// Creates a vector with identical lanes.
408 ///
409 /// Constructs a vector with `x` replicated to all 4 lanes.
410 #[inline]
411 #[cfg_attr(test, assert_instr(f32x4.splat))]
412 pub fn f32x4_splat(a: f32) -> v128 {
413 unsafe { transmute(f32x4::splat(a)) }
414 }
415
416 /// Extracts a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
417 ///
418 /// Extracts the scalar value of lane specified in the immediate mode operand
419 /// `imm` from `a`.
420 ///
421 /// # Unsafety
422 ///
423 /// This function has undefined behavior if `imm` is greater than or equal to
424 /// 4.
425 #[inline]
426 #[cfg_attr(test, assert_instr(f32x4.extract_lane, imm = 0))]
427 #[rustc_args_required_const(1)]
428 pub unsafe fn f32x4_extract_lane(a: v128, imm: usize) -> f32 {
429 simd_extract(a.as_f32x4(), imm as u32)
430 }
431
432 /// Replaces a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
433 ///
434 /// Replaces the scalar value of lane specified in the immediate mode operand
435 /// `imm` with `a`.
436 ///
437 /// # Unsafety
438 ///
439 /// This function has undefined behavior if `imm` is greater than or equal to
440 /// 4.
441 #[inline]
442 #[cfg_attr(test, assert_instr(f32x4.replace_lane, imm = 0))]
443 #[rustc_args_required_const(1)]
444 pub unsafe fn f32x4_replace_lane(a: v128, imm: usize, val: f32) -> v128 {
445 transmute(simd_insert(a.as_f32x4(), imm as u32, val))
446 }
447
448 /// Creates a vector with identical lanes.
449 ///
450 /// Constructs a vector with `x` replicated to all 2 lanes.
451 #[inline]
452 #[cfg(not(only_node_compatible_functions))]
453 #[cfg_attr(test, assert_instr(f64x2.splat))]
454 pub fn f64x2_splat(a: f64) -> v128 {
455 unsafe { transmute(f64x2::splat(a)) }
456 }
457
458 /// Extracts lane from a 128-bit vector interpreted as 2 packed f64 numbers.
459 ///
460 /// Extracts the scalar value of lane specified in the immediate mode operand
461 /// `imm` from `a`.
462 ///
463 /// # Unsafety
464 ///
465 /// This function has undefined behavior if `imm` is greater than or equal to
466 /// 2.
467 #[inline]
468 #[cfg(not(only_node_compatible_functions))]
469 #[cfg_attr(test, assert_instr(f64x2.extract_lane_s, imm = 0))]
470 #[rustc_args_required_const(1)]
471 pub unsafe fn f64x2_extract_lane(a: v128, imm: usize) -> f64 {
472 simd_extract(a.as_f64x2(), imm as u32)
473 }
474
475 /// Replaces a lane from a 128-bit vector interpreted as 2 packed f64 numbers.
476 ///
477 /// Replaces the scalar value of lane specified in the immediate mode operand
478 /// `imm` with `a`.
479 ///
480 /// # Unsafety
481 ///
482 /// This function has undefined behavior if `imm` is greater than or equal to
483 /// 2.
484 #[inline]
485 #[cfg(not(only_node_compatible_functions))]
486 #[cfg_attr(test, assert_instr(f64x2.replace_lane, imm = 0))]
487 #[rustc_args_required_const(1)]
488 pub unsafe fn f64x2_replace_lane(a: v128, imm: usize, val: f64) -> v128 {
489 transmute(simd_insert(a.as_f64x2(), imm as u32, val))
490 }
491
492 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
493 /// integers.
494 ///
495 /// Returns a new vector where each lane is all ones if the pairwise elements
496 /// were equal, or all zeros if the elements were not equal.
497 #[inline]
498 #[cfg_attr(test, assert_instr(i8x16.eq))]
499 pub fn i8x16_eq(a: v128, b: v128) -> v128 {
500 unsafe { transmute(simd_eq::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
501 }
502
503 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
504 /// integers.
505 ///
506 /// Returns a new vector where each lane is all ones if the pairwise elements
507 /// were not equal, or all zeros if the elements were equal.
508 #[inline]
509 #[cfg_attr(test, assert_instr(i8x16.ne))]
510 pub fn i8x16_ne(a: v128, b: v128) -> v128 {
511 unsafe { transmute(simd_ne::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
512 }
513
514 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
515 /// signed integers.
516 ///
517 /// Returns a new vector where each lane is all ones if the pairwise left
518 /// element is less than the pairwise right element, or all zeros otherwise.
519 #[inline]
520 #[cfg_attr(test, assert_instr(i8x16.lt_s))]
521 pub fn i8x16_lt_s(a: v128, b: v128) -> v128 {
522 unsafe { transmute(simd_lt::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
523 }
524
525 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
526 /// unsigned integers.
527 ///
528 /// Returns a new vector where each lane is all ones if the pairwise left
529 /// element is less than the pairwise right element, or all zeros otherwise.
530 #[inline]
531 #[cfg_attr(test, assert_instr(i8x16.lt_u))]
532 pub fn i8x16_lt_u(a: v128, b: v128) -> v128 {
533 unsafe { transmute(simd_lt::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
534 }
535
536 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
537 /// signed integers.
538 ///
539 /// Returns a new vector where each lane is all ones if the pairwise left
540 /// element is greater than the pairwise right element, or all zeros otherwise.
541 #[inline]
542 #[cfg_attr(test, assert_instr(i8x16.gt_s))]
543 pub fn i8x16_gt_s(a: v128, b: v128) -> v128 {
544 unsafe { transmute(simd_gt::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
545 }
546
547 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
548 /// unsigned integers.
549 ///
550 /// Returns a new vector where each lane is all ones if the pairwise left
551 /// element is greater than the pairwise right element, or all zeros otherwise.
552 #[inline]
553 #[cfg_attr(test, assert_instr(i8x16.gt_u))]
554 pub fn i8x16_gt_u(a: v128, b: v128) -> v128 {
555 unsafe { transmute(simd_gt::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
556 }
557
558 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
559 /// signed integers.
560 ///
561 /// Returns a new vector where each lane is all ones if the pairwise left
562 /// element is less than the pairwise right element, or all zeros otherwise.
563 #[inline]
564 #[cfg_attr(test, assert_instr(i8x16.le_s))]
565 pub fn i8x16_le_s(a: v128, b: v128) -> v128 {
566 unsafe { transmute(simd_le::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
567 }
568
569 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
570 /// unsigned integers.
571 ///
572 /// Returns a new vector where each lane is all ones if the pairwise left
573 /// element is less than the pairwise right element, or all zeros otherwise.
574 #[inline]
575 #[cfg_attr(test, assert_instr(i8x16.le_u))]
576 pub fn i8x16_le_u(a: v128, b: v128) -> v128 {
577 unsafe { transmute(simd_le::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
578 }
579
580 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
581 /// signed integers.
582 ///
583 /// Returns a new vector where each lane is all ones if the pairwise left
584 /// element is greater than the pairwise right element, or all zeros otherwise.
585 #[inline]
586 #[cfg_attr(test, assert_instr(i8x16.ge_s))]
587 pub fn i8x16_ge_s(a: v128, b: v128) -> v128 {
588 unsafe { transmute(simd_ge::<_, i8x16>(a.as_i8x16(), b.as_i8x16())) }
589 }
590
591 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
592 /// unsigned integers.
593 ///
594 /// Returns a new vector where each lane is all ones if the pairwise left
595 /// element is greater than the pairwise right element, or all zeros otherwise.
596 #[inline]
597 #[cfg_attr(test, assert_instr(i8x16.ge_u))]
598 pub fn i8x16_ge_u(a: v128, b: v128) -> v128 {
599 unsafe { transmute(simd_ge::<_, i8x16>(a.as_u8x16(), b.as_u8x16())) }
600 }
601
602 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
603 /// integers.
604 ///
605 /// Returns a new vector where each lane is all ones if the pairwise elements
606 /// were equal, or all zeros if the elements were not equal.
607 #[inline]
608 #[cfg_attr(test, assert_instr(i16x8.eq))]
609 pub fn i16x8_eq(a: v128, b: v128) -> v128 {
610 unsafe { transmute(simd_eq::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
611 }
612
613 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
614 /// integers.
615 ///
616 /// Returns a new vector where each lane is all ones if the pairwise elements
617 /// were not equal, or all zeros if the elements were equal.
618 #[inline]
619 #[cfg_attr(test, assert_instr(i16x8.ne))]
620 pub fn i16x8_ne(a: v128, b: v128) -> v128 {
621 unsafe { transmute(simd_ne::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
622 }
623
624 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
625 /// signed integers.
626 ///
627 /// Returns a new vector where each lane is all ones if the pairwise left
628 /// element is less than the pairwise right element, or all zeros otherwise.
629 #[inline]
630 #[cfg_attr(test, assert_instr(i16x8.lt_s))]
631 pub fn i16x8_lt_s(a: v128, b: v128) -> v128 {
632 unsafe { transmute(simd_lt::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
633 }
634
635 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
636 /// unsigned integers.
637 ///
638 /// Returns a new vector where each lane is all ones if the pairwise left
639 /// element is less than the pairwise right element, or all zeros otherwise.
640 #[inline]
641 #[cfg_attr(test, assert_instr(i16x8.lt_u))]
642 pub fn i16x8_lt_u(a: v128, b: v128) -> v128 {
643 unsafe { transmute(simd_lt::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
644 }
645
646 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
647 /// signed integers.
648 ///
649 /// Returns a new vector where each lane is all ones if the pairwise left
650 /// element is greater than the pairwise right element, or all zeros otherwise.
651 #[inline]
652 #[cfg_attr(test, assert_instr(i16x8.gt_s))]
653 pub fn i16x8_gt_s(a: v128, b: v128) -> v128 {
654 unsafe { transmute(simd_gt::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
655 }
656
657 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
658 /// unsigned integers.
659 ///
660 /// Returns a new vector where each lane is all ones if the pairwise left
661 /// element is greater than the pairwise right element, or all zeros otherwise.
662 #[inline]
663 #[cfg_attr(test, assert_instr(i16x8.gt_u))]
664 pub fn i16x8_gt_u(a: v128, b: v128) -> v128 {
665 unsafe { transmute(simd_gt::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
666 }
667
668 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
669 /// signed integers.
670 ///
671 /// Returns a new vector where each lane is all ones if the pairwise left
672 /// element is less than the pairwise right element, or all zeros otherwise.
673 #[inline]
674 #[cfg_attr(test, assert_instr(i16x8.le_s))]
675 pub fn i16x8_le_s(a: v128, b: v128) -> v128 {
676 unsafe { transmute(simd_le::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
677 }
678
679 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
680 /// unsigned integers.
681 ///
682 /// Returns a new vector where each lane is all ones if the pairwise left
683 /// element is less than the pairwise right element, or all zeros otherwise.
684 #[inline]
685 #[cfg_attr(test, assert_instr(i16x8.le_u))]
686 pub fn i16x8_le_u(a: v128, b: v128) -> v128 {
687 unsafe { transmute(simd_le::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
688 }
689
690 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
691 /// signed integers.
692 ///
693 /// Returns a new vector where each lane is all ones if the pairwise left
694 /// element is greater than the pairwise right element, or all zeros otherwise.
695 #[inline]
696 #[cfg_attr(test, assert_instr(i16x8.ge_s))]
697 pub fn i16x8_ge_s(a: v128, b: v128) -> v128 {
698 unsafe { transmute(simd_ge::<_, i16x8>(a.as_i16x8(), b.as_i16x8())) }
699 }
700
701 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
702 /// unsigned integers.
703 ///
704 /// Returns a new vector where each lane is all ones if the pairwise left
705 /// element is greater than the pairwise right element, or all zeros otherwise.
706 #[inline]
707 #[cfg_attr(test, assert_instr(i16x8.ge_u))]
708 pub fn i16x8_ge_u(a: v128, b: v128) -> v128 {
709 unsafe { transmute(simd_ge::<_, i16x8>(a.as_u16x8(), b.as_u16x8())) }
710 }
711
712 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
713 /// integers.
714 ///
715 /// Returns a new vector where each lane is all ones if the pairwise elements
716 /// were equal, or all zeros if the elements were not equal.
717 #[inline]
718 #[cfg_attr(test, assert_instr(i32x4.eq))]
719 pub fn i32x4_eq(a: v128, b: v128) -> v128 {
720 unsafe { transmute(simd_eq::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
721 }
722
723 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
724 /// integers.
725 ///
726 /// Returns a new vector where each lane is all ones if the pairwise elements
727 /// were not equal, or all zeros if the elements were equal.
728 #[inline]
729 #[cfg_attr(test, assert_instr(i32x4.ne))]
730 pub fn i32x4_ne(a: v128, b: v128) -> v128 {
731 unsafe { transmute(simd_ne::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
732 }
733
734 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
735 /// signed integers.
736 ///
737 /// Returns a new vector where each lane is all ones if the pairwise left
738 /// element is less than the pairwise right element, or all zeros otherwise.
739 #[inline]
740 #[cfg_attr(test, assert_instr(i32x4.lt_s))]
741 pub fn i32x4_lt_s(a: v128, b: v128) -> v128 {
742 unsafe { transmute(simd_lt::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
743 }
744
745 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
746 /// unsigned integers.
747 ///
748 /// Returns a new vector where each lane is all ones if the pairwise left
749 /// element is less than the pairwise right element, or all zeros otherwise.
750 #[inline]
751 #[cfg_attr(test, assert_instr(i32x4.lt_u))]
752 pub fn i32x4_lt_u(a: v128, b: v128) -> v128 {
753 unsafe { transmute(simd_lt::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
754 }
755
756 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
757 /// signed integers.
758 ///
759 /// Returns a new vector where each lane is all ones if the pairwise left
760 /// element is greater than the pairwise right element, or all zeros otherwise.
761 #[inline]
762 #[cfg_attr(test, assert_instr(i32x4.gt_s))]
763 pub fn i32x4_gt_s(a: v128, b: v128) -> v128 {
764 unsafe { transmute(simd_gt::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
765 }
766
767 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
768 /// unsigned integers.
769 ///
770 /// Returns a new vector where each lane is all ones if the pairwise left
771 /// element is greater than the pairwise right element, or all zeros otherwise.
772 #[inline]
773 #[cfg_attr(test, assert_instr(i32x4.gt_u))]
774 pub fn i32x4_gt_u(a: v128, b: v128) -> v128 {
775 unsafe { transmute(simd_gt::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
776 }
777
778 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
779 /// signed integers.
780 ///
781 /// Returns a new vector where each lane is all ones if the pairwise left
782 /// element is less than the pairwise right element, or all zeros otherwise.
783 #[inline]
784 #[cfg_attr(test, assert_instr(i32x4.le_s))]
785 pub fn i32x4_le_s(a: v128, b: v128) -> v128 {
786 unsafe { transmute(simd_le::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
787 }
788
789 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
790 /// unsigned integers.
791 ///
792 /// Returns a new vector where each lane is all ones if the pairwise left
793 /// element is less than the pairwise right element, or all zeros otherwise.
794 #[inline]
795 #[cfg_attr(test, assert_instr(i32x4.le_u))]
796 pub fn i32x4_le_u(a: v128, b: v128) -> v128 {
797 unsafe { transmute(simd_le::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
798 }
799
800 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
801 /// signed integers.
802 ///
803 /// Returns a new vector where each lane is all ones if the pairwise left
804 /// element is greater than the pairwise right element, or all zeros otherwise.
805 #[inline]
806 #[cfg_attr(test, assert_instr(i32x4.ge_s))]
807 pub fn i32x4_ge_s(a: v128, b: v128) -> v128 {
808 unsafe { transmute(simd_ge::<_, i32x4>(a.as_i32x4(), b.as_i32x4())) }
809 }
810
811 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
812 /// unsigned integers.
813 ///
814 /// Returns a new vector where each lane is all ones if the pairwise left
815 /// element is greater than the pairwise right element, or all zeros otherwise.
816 #[inline]
817 #[cfg_attr(test, assert_instr(i32x4.ge_u))]
818 pub fn i32x4_ge_u(a: v128, b: v128) -> v128 {
819 unsafe { transmute(simd_ge::<_, i32x4>(a.as_u32x4(), b.as_u32x4())) }
820 }
821
822 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
823 /// floating point numbers.
824 ///
825 /// Returns a new vector where each lane is all ones if the pairwise elements
826 /// were equal, or all zeros if the elements were not equal.
827 #[inline]
828 #[cfg_attr(test, assert_instr(f32x4.eq))]
829 pub fn f32x4_eq(a: v128, b: v128) -> v128 {
830 unsafe { transmute(simd_eq::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
831 }
832
833 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
834 /// floating point numbers.
835 ///
836 /// Returns a new vector where each lane is all ones if the pairwise elements
837 /// were not equal, or all zeros if the elements were equal.
838 #[inline]
839 #[cfg_attr(test, assert_instr(f32x4.ne))]
840 pub fn f32x4_ne(a: v128, b: v128) -> v128 {
841 unsafe { transmute(simd_ne::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
842 }
843
844 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
845 /// floating point numbers.
846 ///
847 /// Returns a new vector where each lane is all ones if the pairwise left
848 /// element is less than the pairwise right element, or all zeros otherwise.
849 #[inline]
850 #[cfg_attr(test, assert_instr(f32x4.lt))]
851 pub fn f32x4_lt(a: v128, b: v128) -> v128 {
852 unsafe { transmute(simd_lt::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
853 }
854
855 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
856 /// floating point numbers.
857 ///
858 /// Returns a new vector where each lane is all ones if the pairwise left
859 /// element is greater than the pairwise right element, or all zeros otherwise.
860 #[inline]
861 #[cfg_attr(test, assert_instr(f32x4.gt))]
862 pub fn f32x4_gt(a: v128, b: v128) -> v128 {
863 unsafe { transmute(simd_gt::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
864 }
865
866 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
867 /// floating point numbers.
868 ///
869 /// Returns a new vector where each lane is all ones if the pairwise left
870 /// element is less than the pairwise right element, or all zeros otherwise.
871 #[inline]
872 #[cfg_attr(test, assert_instr(f32x4.le))]
873 pub fn f32x4_le(a: v128, b: v128) -> v128 {
874 unsafe { transmute(simd_le::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
875 }
876
877 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
878 /// floating point numbers.
879 ///
880 /// Returns a new vector where each lane is all ones if the pairwise left
881 /// element is greater than the pairwise right element, or all zeros otherwise.
882 #[inline]
883 #[cfg_attr(test, assert_instr(f32x4.ge))]
884 pub fn f32x4_ge(a: v128, b: v128) -> v128 {
885 unsafe { transmute(simd_ge::<_, i32x4>(a.as_f32x4(), b.as_f32x4())) }
886 }
887
888 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
889 /// floating point numbers.
890 ///
891 /// Returns a new vector where each lane is all ones if the pairwise elements
892 /// were equal, or all zeros if the elements were not equal.
893 #[inline]
894 #[cfg(not(only_node_compatible_functions))]
895 #[cfg_attr(test, assert_instr(f64x2.eq))]
896 pub fn f64x2_eq(a: v128, b: v128) -> v128 {
897 unsafe { transmute(simd_eq::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
898 }
899
900 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
901 /// floating point numbers.
902 ///
903 /// Returns a new vector where each lane is all ones if the pairwise elements
904 /// were not equal, or all zeros if the elements were equal.
905 #[inline]
906 #[cfg(not(only_node_compatible_functions))]
907 #[cfg_attr(test, assert_instr(f64x2.ne))]
908 pub fn f64x2_ne(a: v128, b: v128) -> v128 {
909 unsafe { transmute(simd_ne::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
910 }
911
912 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
913 /// floating point numbers.
914 ///
915 /// Returns a new vector where each lane is all ones if the pairwise left
916 /// element is less than the pairwise right element, or all zeros otherwise.
917 #[inline]
918 #[cfg(not(only_node_compatible_functions))]
919 #[cfg_attr(test, assert_instr(f64x2.lt))]
920 pub fn f64x2_lt(a: v128, b: v128) -> v128 {
921 unsafe { transmute(simd_lt::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
922 }
923
924 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
925 /// floating point numbers.
926 ///
927 /// Returns a new vector where each lane is all ones if the pairwise left
928 /// element is greater than the pairwise right element, or all zeros otherwise.
929 #[inline]
930 #[cfg(not(only_node_compatible_functions))]
931 #[cfg_attr(test, assert_instr(f64x2.gt))]
932 pub fn f64x2_gt(a: v128, b: v128) -> v128 {
933 unsafe { transmute(simd_gt::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
934 }
935
936 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
937 /// floating point numbers.
938 ///
939 /// Returns a new vector where each lane is all ones if the pairwise left
940 /// element is less than the pairwise right element, or all zeros otherwise.
941 #[inline]
942 #[cfg(not(only_node_compatible_functions))]
943 #[cfg_attr(test, assert_instr(f64x2.le))]
944 pub fn f64x2_le(a: v128, b: v128) -> v128 {
945 unsafe { transmute(simd_le::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
946 }
947
948 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
949 /// floating point numbers.
950 ///
951 /// Returns a new vector where each lane is all ones if the pairwise left
952 /// element is greater than the pairwise right element, or all zeros otherwise.
953 #[inline]
954 #[cfg(not(only_node_compatible_functions))]
955 #[cfg_attr(test, assert_instr(f64x2.ge))]
956 pub fn f64x2_ge(a: v128, b: v128) -> v128 {
957 unsafe { transmute(simd_ge::<_, i64x2>(a.as_f64x2(), b.as_f64x2())) }
958 }
959
960 /// Flips each bit of the 128-bit input vector.
961 #[inline]
962 #[cfg_attr(test, assert_instr(v128.not))]
963 pub fn v128_not(a: v128) -> v128 {
964 unsafe { transmute(simd_xor(a.as_i64x2(), i64x2(!0, !0))) }
965 }
966
967 /// Performs a bitwise and of the two input 128-bit vectors, returning the
968 /// resulting vector.
969 #[inline]
970 #[cfg_attr(test, assert_instr(v128.and))]
971 pub fn v128_and(a: v128, b: v128) -> v128 {
972 unsafe { transmute(simd_and(a.as_i64x2(), b.as_i64x2())) }
973 }
974
975 /// Performs a bitwise or of the two input 128-bit vectors, returning the
976 /// resulting vector.
977 #[inline]
978 #[cfg_attr(test, assert_instr(v128.or))]
979 pub fn v128_or(a: v128, b: v128) -> v128 {
980 unsafe { transmute(simd_or(a.as_i64x2(), b.as_i64x2())) }
981 }
982
983 /// Performs a bitwise xor of the two input 128-bit vectors, returning the
984 /// resulting vector.
985 #[inline]
986 #[cfg_attr(test, assert_instr(v128.xor))]
987 pub fn v128_xor(a: v128, b: v128) -> v128 {
988 unsafe { transmute(simd_xor(a.as_i64x2(), b.as_i64x2())) }
989 }
990
991 /// Use the bitmask in `c` to select bits from `v1` when 1 and `v2` when 0.
992 #[inline]
993 #[cfg_attr(test, assert_instr(v128.bitselect))]
994 pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 {
995 unsafe { transmute(llvm_bitselect(c.as_i8x16(), v1.as_i8x16(), v2.as_i8x16())) }
996 }
997
998 /// Negates a 128-bit vectors intepreted as sixteen 8-bit signed integers
999 #[inline]
1000 #[cfg_attr(test, assert_instr(i8x16.neg))]
1001 pub fn i8x16_neg(a: v128) -> v128 {
1002 unsafe { transmute(simd_mul(a.as_i8x16(), i8x16::splat(-1))) }
1003 }
1004
1005 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1006 #[inline]
1007 #[cfg_attr(test, assert_instr(i8x16.any_true))]
1008 pub fn i8x16_any_true(a: v128) -> i32 {
1009 unsafe { llvm_i8x16_any_true(a.as_i8x16()) }
1010 }
1011
1012 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1013 #[inline]
1014 #[cfg_attr(test, assert_instr(i8x16.all_true))]
1015 pub fn i8x16_all_true(a: v128) -> i32 {
1016 unsafe { llvm_i8x16_all_true(a.as_i8x16()) }
1017 }
1018
1019 /// Shifts each lane to the left by the specified number of bits.
1020 ///
1021 /// Only the low bits of the shift amount are used if the shift amount is
1022 /// greater than the lane width.
1023 #[inline]
1024 #[cfg(not(only_node_compatible_functions))]
1025 #[cfg_attr(test, assert_instr(i8x16.shl))]
1026 pub fn i8x16_shl(a: v128, amt: u32) -> v128 {
1027 unsafe { transmute(simd_shl(a.as_i8x16(), i8x16::splat(amt as i8))) }
1028 }
1029
1030 /// Shifts each lane to the right by the specified number of bits, sign
1031 /// extending.
1032 ///
1033 /// Only the low bits of the shift amount are used if the shift amount is
1034 /// greater than the lane width.
1035 #[inline]
1036 #[cfg(not(only_node_compatible_functions))]
1037 #[cfg_attr(test, assert_instr(i8x16.shl))]
1038 pub fn i8x16_shr_s(a: v128, amt: u32) -> v128 {
1039 unsafe { transmute(simd_shr(a.as_i8x16(), i8x16::splat(amt as i8))) }
1040 }
1041
1042 /// Shifts each lane to the right by the specified number of bits, shifting in
1043 /// zeros.
1044 ///
1045 /// Only the low bits of the shift amount are used if the shift amount is
1046 /// greater than the lane width.
1047 #[inline]
1048 #[cfg(not(only_node_compatible_functions))]
1049 #[cfg_attr(test, assert_instr(i8x16.shl))]
1050 pub fn i8x16_shr_u(a: v128, amt: u32) -> v128 {
1051 unsafe { transmute(simd_shr(a.as_u8x16(), u8x16::splat(amt as u8))) }
1052 }
1053
1054 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit integers.
1055 #[inline]
1056 #[cfg_attr(test, assert_instr(i8x16.add))]
1057 pub fn i8x16_add(a: v128, b: v128) -> v128 {
1058 unsafe { transmute(simd_add(a.as_i8x16(), b.as_i8x16())) }
1059 }
1060
1061 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit signed
1062 /// integers, saturating on overflow to `i8::MAX`.
1063 #[inline]
1064 #[cfg_attr(test, assert_instr(i8x16.add_saturate_s))]
1065 pub fn i8x16_add_saturate_s(a: v128, b: v128) -> v128 {
1066 unsafe { transmute(llvm_i8x16_add_saturate_s(a.as_i8x16(), b.as_i8x16())) }
1067 }
1068
1069 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit unsigned
1070 /// integers, saturating on overflow to `u8::MAX`.
1071 #[inline]
1072 #[cfg_attr(test, assert_instr(i8x16.add_saturate_u))]
1073 pub fn i8x16_add_saturate_u(a: v128, b: v128) -> v128 {
1074 unsafe { transmute(llvm_i8x16_add_saturate_u(a.as_i8x16(), b.as_i8x16())) }
1075 }
1076
1077 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit integers.
1078 #[inline]
1079 #[cfg_attr(test, assert_instr(i8x16.sub))]
1080 pub fn i8x16_sub(a: v128, b: v128) -> v128 {
1081 unsafe { transmute(simd_sub(a.as_i8x16(), b.as_i8x16())) }
1082 }
1083
1084 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
1085 /// signed integers, saturating on overflow to `i8::MIN`.
1086 #[inline]
1087 #[cfg_attr(test, assert_instr(i8x16.sub_saturate_s))]
1088 pub fn i8x16_sub_saturate_s(a: v128, b: v128) -> v128 {
1089 unsafe { transmute(llvm_i8x16_sub_saturate_s(a.as_i8x16(), b.as_i8x16())) }
1090 }
1091
1092 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
1093 /// unsigned integers, saturating on overflow to 0.
1094 #[inline]
1095 #[cfg_attr(test, assert_instr(i8x16.sub_saturate_u))]
1096 pub fn i8x16_sub_saturate_u(a: v128, b: v128) -> v128 {
1097 unsafe { transmute(llvm_i8x16_sub_saturate_u(a.as_i8x16(), b.as_i8x16())) }
1098 }
1099
1100 /// Multiplies two 128-bit vectors as if they were two packed sixteen 8-bit
1101 /// signed integers.
1102 #[inline]
1103 #[cfg_attr(test, assert_instr(i8x16.mul))]
1104 pub fn i8x16_mul(a: v128, b: v128) -> v128 {
1105 unsafe { transmute(simd_mul(a.as_i8x16(), b.as_i8x16())) }
1106 }
1107
1108 /// Negates a 128-bit vectors intepreted as eight 16-bit signed integers
1109 #[inline]
1110 #[cfg_attr(test, assert_instr(i16x8.neg))]
1111 pub fn i16x8_neg(a: v128) -> v128 {
1112 unsafe { transmute(simd_mul(a.as_i16x8(), i16x8::splat(-1))) }
1113 }
1114
1115 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1116 #[inline]
1117 #[cfg_attr(test, assert_instr(i16x8.any_true))]
1118 pub fn i16x8_any_true(a: v128) -> i32 {
1119 unsafe { llvm_i16x8_any_true(a.as_i16x8()) }
1120 }
1121
1122 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1123 #[inline]
1124 #[cfg_attr(test, assert_instr(i16x8.all_true))]
1125 pub fn i16x8_all_true(a: v128) -> i32 {
1126 unsafe { llvm_i16x8_all_true(a.as_i16x8()) }
1127 }
1128
1129 /// Shifts each lane to the left by the specified number of bits.
1130 ///
1131 /// Only the low bits of the shift amount are used if the shift amount is
1132 /// greater than the lane width.
1133 #[inline]
1134 #[cfg(not(only_node_compatible_functions))]
1135 #[cfg_attr(test, assert_instr(i16x8.shl))]
1136 pub fn i16x8_shl(a: v128, amt: u32) -> v128 {
1137 unsafe { transmute(simd_shl(a.as_i16x8(), i16x8::splat(amt as i16))) }
1138 }
1139
1140 /// Shifts each lane to the right by the specified number of bits, sign
1141 /// extending.
1142 ///
1143 /// Only the low bits of the shift amount are used if the shift amount is
1144 /// greater than the lane width.
1145 #[inline]
1146 #[cfg(not(only_node_compatible_functions))]
1147 #[cfg_attr(test, assert_instr(i16x8.shl))]
1148 pub fn i16x8_shr_s(a: v128, amt: u32) -> v128 {
1149 unsafe { transmute(simd_shr(a.as_i16x8(), i16x8::splat(amt as i16))) }
1150 }
1151
1152 /// Shifts each lane to the right by the specified number of bits, shifting in
1153 /// zeros.
1154 ///
1155 /// Only the low bits of the shift amount are used if the shift amount is
1156 /// greater than the lane width.
1157 #[inline]
1158 #[cfg(not(only_node_compatible_functions))]
1159 #[cfg_attr(test, assert_instr(i16x8.shl))]
1160 pub fn i16x8_shr_u(a: v128, amt: u32) -> v128 {
1161 unsafe { transmute(simd_shr(a.as_u16x8(), u16x8::splat(amt as u16))) }
1162 }
1163
1164 /// Adds two 128-bit vectors as if they were two packed eight 16-bit integers.
1165 #[inline]
1166 #[cfg_attr(test, assert_instr(i16x8.add))]
1167 pub fn i16x8_add(a: v128, b: v128) -> v128 {
1168 unsafe { transmute(simd_add(a.as_i16x8(), b.as_i16x8())) }
1169 }
1170
1171 /// Adds two 128-bit vectors as if they were two packed eight 16-bit signed
1172 /// integers, saturating on overflow to `i16::MAX`.
1173 #[inline]
1174 #[cfg_attr(test, assert_instr(i16x8.add_saturate_s))]
1175 pub fn i16x8_add_saturate_s(a: v128, b: v128) -> v128 {
1176 unsafe { transmute(llvm_i16x8_add_saturate_s(a.as_i16x8(), b.as_i16x8())) }
1177 }
1178
1179 /// Adds two 128-bit vectors as if they were two packed eight 16-bit unsigned
1180 /// integers, saturating on overflow to `u16::MAX`.
1181 #[inline]
1182 #[cfg_attr(test, assert_instr(i16x8.add_saturate_u))]
1183 pub fn i16x8_add_saturate_u(a: v128, b: v128) -> v128 {
1184 unsafe { transmute(llvm_i16x8_add_saturate_u(a.as_i16x8(), b.as_i16x8())) }
1185 }
1186
1187 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit integers.
1188 #[inline]
1189 #[cfg_attr(test, assert_instr(i16x8.sub))]
1190 pub fn i16x8_sub(a: v128, b: v128) -> v128 {
1191 unsafe { transmute(simd_sub(a.as_i16x8(), b.as_i16x8())) }
1192 }
1193
1194 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
1195 /// signed integers, saturating on overflow to `i16::MIN`.
1196 #[inline]
1197 #[cfg_attr(test, assert_instr(i16x8.sub_saturate_s))]
1198 pub fn i16x8_sub_saturate_s(a: v128, b: v128) -> v128 {
1199 unsafe { transmute(llvm_i16x8_sub_saturate_s(a.as_i16x8(), b.as_i16x8())) }
1200 }
1201
1202 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
1203 /// unsigned integers, saturating on overflow to 0.
1204 #[inline]
1205 #[cfg_attr(test, assert_instr(i16x8.sub_saturate_u))]
1206 pub fn i16x8_sub_saturate_u(a: v128, b: v128) -> v128 {
1207 unsafe { transmute(llvm_i16x8_sub_saturate_u(a.as_i16x8(), b.as_i16x8())) }
1208 }
1209
1210 /// Multiplies two 128-bit vectors as if they were two packed eight 16-bit
1211 /// signed integers.
1212 #[inline]
1213 #[cfg_attr(test, assert_instr(i16x8.mul))]
1214 pub fn i16x8_mul(a: v128, b: v128) -> v128 {
1215 unsafe { transmute(simd_mul(a.as_i16x8(), b.as_i16x8())) }
1216 }
1217
1218 /// Negates a 128-bit vectors intepreted as four 32-bit signed integers
1219 #[inline]
1220 #[cfg_attr(test, assert_instr(i32x4.neg))]
1221 pub fn i32x4_neg(a: v128) -> v128 {
1222 unsafe { transmute(simd_mul(a.as_i32x4(), i32x4::splat(-1))) }
1223 }
1224
1225 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1226 #[inline]
1227 #[cfg_attr(test, assert_instr(i32x4.any_true))]
1228 pub fn i32x4_any_true(a: v128) -> i32 {
1229 unsafe { llvm_i32x4_any_true(a.as_i32x4()) }
1230 }
1231
1232 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1233 #[inline]
1234 #[cfg_attr(test, assert_instr(i32x4.all_true))]
1235 pub fn i32x4_all_true(a: v128) -> i32 {
1236 unsafe { llvm_i32x4_all_true(a.as_i32x4()) }
1237 }
1238
1239 /// Shifts each lane to the left by the specified number of bits.
1240 ///
1241 /// Only the low bits of the shift amount are used if the shift amount is
1242 /// greater than the lane width.
1243 #[inline]
1244 #[cfg(not(only_node_compatible_functions))]
1245 #[cfg_attr(test, assert_instr(i32x4.shl))]
1246 pub fn i32x4_shl(a: v128, amt: u32) -> v128 {
1247 unsafe { transmute(simd_shl(a.as_i32x4(), i32x4::splat(amt as i32))) }
1248 }
1249
1250 /// Shifts each lane to the right by the specified number of bits, sign
1251 /// extending.
1252 ///
1253 /// Only the low bits of the shift amount are used if the shift amount is
1254 /// greater than the lane width.
1255 #[inline]
1256 #[cfg(not(only_node_compatible_functions))]
1257 #[cfg_attr(test, assert_instr(i32x4.shl))]
1258 pub fn i32x4_shr_s(a: v128, amt: u32) -> v128 {
1259 unsafe { transmute(simd_shr(a.as_i32x4(), i32x4::splat(amt as i32))) }
1260 }
1261
1262 /// Shifts each lane to the right by the specified number of bits, shifting in
1263 /// zeros.
1264 ///
1265 /// Only the low bits of the shift amount are used if the shift amount is
1266 /// greater than the lane width.
1267 #[inline]
1268 #[cfg(not(only_node_compatible_functions))]
1269 #[cfg_attr(test, assert_instr(i32x4.shl))]
1270 pub fn i32x4_shr_u(a: v128, amt: u32) -> v128 {
1271 unsafe { transmute(simd_shr(a.as_u32x4(), u32x4::splat(amt as u32))) }
1272 }
1273
1274 /// Adds two 128-bit vectors as if they were two packed four 32-bit integers.
1275 #[inline]
1276 #[cfg_attr(test, assert_instr(i32x4.add))]
1277 pub fn i32x4_add(a: v128, b: v128) -> v128 {
1278 unsafe { transmute(simd_add(a.as_i32x4(), b.as_i32x4())) }
1279 }
1280
1281 /// Subtracts two 128-bit vectors as if they were two packed four 32-bit integers.
1282 #[inline]
1283 #[cfg_attr(test, assert_instr(i32x4.sub))]
1284 pub fn i32x4_sub(a: v128, b: v128) -> v128 {
1285 unsafe { transmute(simd_sub(a.as_i32x4(), b.as_i32x4())) }
1286 }
1287
1288 /// Multiplies two 128-bit vectors as if they were two packed four 32-bit
1289 /// signed integers.
1290 #[inline]
1291 #[cfg_attr(test, assert_instr(i32x4.mul))]
1292 pub fn i32x4_mul(a: v128, b: v128) -> v128 {
1293 unsafe { transmute(simd_mul(a.as_i32x4(), b.as_i32x4())) }
1294 }
1295
1296 /// Negates a 128-bit vectors intepreted as two 64-bit signed integers
1297 #[inline]
1298 #[cfg(not(only_node_compatible_functions))]
1299 #[cfg_attr(test, assert_instr(i32x4.neg))]
1300 pub fn i64x2_neg(a: v128) -> v128 {
1301 unsafe { transmute(simd_mul(a.as_i64x2(), i64x2::splat(-1))) }
1302 }
1303
1304 /// Returns 1 if any lane is nonzero or 0 if all lanes are zero.
1305 #[inline]
1306 #[cfg(not(only_node_compatible_functions))]
1307 #[cfg_attr(test, assert_instr(i64x2.any_true))]
1308 pub fn i64x2_any_true(a: v128) -> i32 {
1309 unsafe { llvm_i64x2_any_true(a.as_i64x2()) }
1310 }
1311
1312 /// Returns 1 if all lanes are nonzero or 0 if any lane is nonzero.
1313 #[inline]
1314 #[cfg(not(only_node_compatible_functions))]
1315 #[cfg_attr(test, assert_instr(i64x2.all_true))]
1316 pub fn i64x2_all_true(a: v128) -> i32 {
1317 unsafe { llvm_i64x2_all_true(a.as_i64x2()) }
1318 }
1319
1320 /// Shifts each lane to the left by the specified number of bits.
1321 ///
1322 /// Only the low bits of the shift amount are used if the shift amount is
1323 /// greater than the lane width.
1324 #[inline]
1325 #[cfg(not(only_node_compatible_functions))]
1326 #[cfg_attr(test, assert_instr(i64x2.shl))]
1327 pub fn i64x2_shl(a: v128, amt: u32) -> v128 {
1328 unsafe { transmute(simd_shl(a.as_i64x2(), i64x2::splat(amt as i64))) }
1329 }
1330
1331 /// Shifts each lane to the right by the specified number of bits, sign
1332 /// extending.
1333 ///
1334 /// Only the low bits of the shift amount are used if the shift amount is
1335 /// greater than the lane width.
1336 #[inline]
1337 #[cfg(not(only_node_compatible_functions))]
1338 #[cfg_attr(test, assert_instr(i64x2.shl))]
1339 pub fn i64x2_shr_s(a: v128, amt: u32) -> v128 {
1340 unsafe { transmute(simd_shr(a.as_i64x2(), i64x2::splat(amt as i64))) }
1341 }
1342
1343 /// Shifts each lane to the right by the specified number of bits, shifting in
1344 /// zeros.
1345 ///
1346 /// Only the low bits of the shift amount are used if the shift amount is
1347 /// greater than the lane width.
1348 #[inline]
1349 #[cfg(not(only_node_compatible_functions))]
1350 #[cfg_attr(test, assert_instr(i64x2.shl))]
1351 pub fn i64x2_shr_u(a: v128, amt: u32) -> v128 {
1352 unsafe { transmute(simd_shr(a.as_u64x2(), u64x2::splat(amt as u64))) }
1353 }
1354
1355 /// Adds two 128-bit vectors as if they were two packed two 64-bit integers.
1356 #[inline]
1357 #[cfg(not(only_node_compatible_functions))]
1358 #[cfg_attr(test, assert_instr(i64x2.add))]
1359 pub fn i64x2_add(a: v128, b: v128) -> v128 {
1360 unsafe { transmute(simd_add(a.as_i64x2(), b.as_i64x2())) }
1361 }
1362
1363 /// Subtracts two 128-bit vectors as if they were two packed two 64-bit integers.
1364 #[inline]
1365 #[cfg(not(only_node_compatible_functions))]
1366 #[cfg_attr(test, assert_instr(i64x2.sub))]
1367 pub fn i64x2_sub(a: v128, b: v128) -> v128 {
1368 unsafe { transmute(simd_sub(a.as_i64x2(), b.as_i64x2())) }
1369 }
1370
1371 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
1372 /// as four 32-bit floating point numbers.
1373 #[inline]
1374 #[cfg_attr(test, assert_instr(f32x4.abs))]
1375 pub fn f32x4_abs(a: v128) -> v128 {
1376 unsafe { transmute(llvm_f32x4_abs(a.as_f32x4())) }
1377 }
1378
1379 /// Negates each lane of a 128-bit vector interpreted as four 32-bit floating
1380 /// point numbers.
1381 #[inline]
1382 #[cfg_attr(test, assert_instr(f32x4.neg))]
1383 pub fn f32x4_neg(a: v128) -> v128 {
1384 unsafe { f32x4_mul(a, transmute(f32x4(-1.0, -1.0, -1.0, -1.0))) }
1385 }
1386
1387 /// Calculates the square root of each lane of a 128-bit vector interpreted as
1388 /// four 32-bit floating point numbers.
1389 #[inline]
1390 #[cfg(not(only_node_compatible_functions))]
1391 #[cfg_attr(test, assert_instr(f32x4.sqrt))]
1392 pub fn f32x4_sqrt(a: v128) -> v128 {
1393 unsafe { transmute(llvm_f32x4_sqrt(a.as_f32x4())) }
1394 }
1395
1396 /// Adds pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1397 /// floating point numbers.
1398 #[inline]
1399 #[cfg_attr(test, assert_instr(f32x4.add))]
1400 pub fn f32x4_add(a: v128, b: v128) -> v128 {
1401 unsafe { transmute(simd_add(a.as_f32x4(), b.as_f32x4())) }
1402 }
1403
1404 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1405 /// floating point numbers.
1406 #[inline]
1407 #[cfg_attr(test, assert_instr(f32x4.sub))]
1408 pub fn f32x4_sub(a: v128, b: v128) -> v128 {
1409 unsafe { transmute(simd_sub(a.as_f32x4(), b.as_f32x4())) }
1410 }
1411
1412 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1413 /// floating point numbers.
1414 #[inline]
1415 #[cfg_attr(test, assert_instr(f32x4.mul))]
1416 pub fn f32x4_mul(a: v128, b: v128) -> v128 {
1417 unsafe { transmute(simd_mul(a.as_f32x4(), b.as_f32x4())) }
1418 }
1419
1420 /// Divides pairwise lanes of two 128-bit vectors interpreted as four 32-bit
1421 /// floating point numbers.
1422 #[inline]
1423 #[cfg(not(only_node_compatible_functions))]
1424 #[cfg_attr(test, assert_instr(f32x4.div))]
1425 pub fn f32x4_div(a: v128, b: v128) -> v128 {
1426 unsafe { transmute(simd_div(a.as_f32x4(), b.as_f32x4())) }
1427 }
1428
1429 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
1430 /// as four 32-bit floating point numbers.
1431 #[inline]
1432 #[cfg_attr(test, assert_instr(f32x4.min))]
1433 pub fn f32x4_min(a: v128, b: v128) -> v128 {
1434 unsafe { transmute(llvm_f32x4_min(a.as_f32x4(), b.as_f32x4())) }
1435 }
1436
1437 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
1438 /// as four 32-bit floating point numbers.
1439 #[inline]
1440 #[cfg_attr(test, assert_instr(f32x4.max))]
1441 pub fn f32x4_max(a: v128, b: v128) -> v128 {
1442 unsafe { transmute(llvm_f32x4_max(a.as_f32x4(), b.as_f32x4())) }
1443 }
1444
1445 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
1446 /// as two 64-bit floating point numbers.
1447 #[inline]
1448 #[cfg(not(only_node_compatible_functions))]
1449 #[cfg_attr(test, assert_instr(f64x2.abs))]
1450 pub fn f64x2_abs(a: v128) -> v128 {
1451 unsafe { transmute(llvm_f64x2_abs(a.as_f64x2())) }
1452 }
1453
1454 /// Negates each lane of a 128-bit vector interpreted as two 64-bit floating
1455 /// point numbers.
1456 #[inline]
1457 #[cfg(not(only_node_compatible_functions))]
1458 #[cfg_attr(test, assert_instr(f64x2.abs))]
1459 pub fn f64x2_neg(a: v128) -> v128 {
1460 unsafe { f64x2_mul(a, transmute(f64x2(-1.0, -1.0))) }
1461 }
1462
1463 /// Calculates the square root of each lane of a 128-bit vector interpreted as
1464 /// two 64-bit floating point numbers.
1465 #[inline]
1466 #[cfg(not(only_node_compatible_functions))]
1467 #[cfg_attr(test, assert_instr(f64x2.sqrt))]
1468 pub fn f64x2_sqrt(a: v128) -> v128 {
1469 unsafe { transmute(llvm_f64x2_sqrt(a.as_f64x2())) }
1470 }
1471
1472 /// Adds pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1473 /// floating point numbers.
1474 #[inline]
1475 #[cfg(not(only_node_compatible_functions))]
1476 #[cfg_attr(test, assert_instr(f64x2.add))]
1477 pub fn f64x2_add(a: v128, b: v128) -> v128 {
1478 unsafe { transmute(simd_add(a.as_f64x2(), b.as_f64x2())) }
1479 }
1480
1481 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1482 /// floating point numbers.
1483 #[inline]
1484 #[cfg(not(only_node_compatible_functions))]
1485 #[cfg_attr(test, assert_instr(f64x2.sub))]
1486 pub fn f64x2_sub(a: v128, b: v128) -> v128 {
1487 unsafe { transmute(simd_sub(a.as_f64x2(), b.as_f64x2())) }
1488 }
1489
1490 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1491 /// floating point numbers.
1492 #[inline]
1493 #[cfg(not(only_node_compatible_functions))]
1494 #[cfg_attr(test, assert_instr(f64x2.mul))]
1495 pub fn f64x2_mul(a: v128, b: v128) -> v128 {
1496 unsafe { transmute(simd_mul(a.as_f64x2(), b.as_f64x2())) }
1497 }
1498
1499 /// Divides pairwise lanes of two 128-bit vectors interpreted as two 64-bit
1500 /// floating point numbers.
1501 #[inline]
1502 #[cfg(not(only_node_compatible_functions))]
1503 #[cfg_attr(test, assert_instr(f64x2.div))]
1504 pub fn f64x2_div(a: v128, b: v128) -> v128 {
1505 unsafe { transmute(simd_div(a.as_f64x2(), b.as_f64x2())) }
1506 }
1507
1508 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
1509 /// as two 64-bit floating point numbers.
1510 #[inline]
1511 #[cfg(not(only_node_compatible_functions))]
1512 #[cfg_attr(test, assert_instr(f64x2.min))]
1513 pub fn f64x2_min(a: v128, b: v128) -> v128 {
1514 unsafe { transmute(llvm_f64x2_min(a.as_f64x2(), b.as_f64x2())) }
1515 }
1516
1517 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
1518 /// as two 64-bit floating point numbers.
1519 #[inline]
1520 #[cfg(not(only_node_compatible_functions))]
1521 #[cfg_attr(test, assert_instr(f64x2.max))]
1522 pub fn f64x2_max(a: v128, b: v128) -> v128 {
1523 unsafe { transmute(llvm_f64x2_max(a.as_f64x2(), b.as_f64x2())) }
1524 }
1525
1526 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
1527 /// into a 128-bit vector of four 32-bit signed integers.
1528 ///
1529 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1530 /// representable intger.
1531 #[inline]
1532 #[cfg_attr(test, assert_instr("i32x4.trunc_sat_f32x4_s"))]
1533 pub fn i32x4_trunc_s_f32x4_sat(a: v128) -> v128 {
1534 unsafe { transmute(simd_cast::<_, i32x4>(a.as_f32x4())) }
1535 }
1536
1537 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
1538 /// into a 128-bit vector of four 32-bit unsigned integers.
1539 ///
1540 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1541 /// representable intger.
1542 #[inline]
1543 #[cfg_attr(test, assert_instr("i32x4.trunc_sat_f32x4_u"))]
1544 pub fn i32x4_trunc_u_f32x4_sat(a: v128) -> v128 {
1545 unsafe { transmute(simd_cast::<_, u32x4>(a.as_f32x4())) }
1546 }
1547
1548 /// Converts a 128-bit vector interpreted as two 64-bit floating point numbers
1549 /// into a 128-bit vector of two 64-bit signed integers.
1550 ///
1551 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1552 /// representable intger.
1553 #[inline]
1554 #[cfg(not(only_node_compatible_functions))]
1555 #[cfg_attr(test, assert_instr("i64x2.trunc_s/f64x2:sat"))]
1556 pub fn i64x2_trunc_s_f64x2_sat(a: v128) -> v128 {
1557 unsafe { transmute(simd_cast::<_, i64x2>(a.as_f64x2())) }
1558 }
1559
1560 /// Converts a 128-bit vector interpreted as two 64-bit floating point numbers
1561 /// into a 128-bit vector of two 64-bit unsigned integers.
1562 ///
1563 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
1564 /// representable intger.
1565 #[inline]
1566 #[cfg(not(only_node_compatible_functions))]
1567 #[cfg_attr(test, assert_instr("i64x2.trunc_u/f64x2:sat"))]
1568 pub fn i64x2_trunc_u_f64x2_sat(a: v128) -> v128 {
1569 unsafe { transmute(simd_cast::<_, u64x2>(a.as_f64x2())) }
1570 }
1571
1572 /// Converts a 128-bit vector interpreted as four 32-bit signed integers into a
1573 /// 128-bit vector of four 32-bit floating point numbers.
1574 #[inline]
1575 #[cfg_attr(test, assert_instr("f32x4.convert_i32x4_s"))]
1576 pub fn f32x4_convert_i32x4_s(a: v128) -> v128 {
1577 unsafe { transmute(simd_cast::<_, f32x4>(a.as_i32x4())) }
1578 }
1579
1580 /// Converts a 128-bit vector interpreted as four 32-bit unsigned integers into a
1581 /// 128-bit vector of four 32-bit floating point numbers.
1582 #[inline]
1583 #[cfg_attr(test, assert_instr("f32x4.convert_i32x4_u"))]
1584 pub fn f32x4_convert_i32x4_u(a: v128) -> v128 {
1585 unsafe { transmute(simd_cast::<_, f32x4>(a.as_u32x4())) }
1586 }
1587
1588 /// Converts a 128-bit vector interpreted as two 64-bit signed integers into a
1589 /// 128-bit vector of two 64-bit floating point numbers.
1590 #[inline]
1591 #[cfg(not(only_node_compatible_functions))]
1592 #[cfg_attr(test, assert_instr("f64x2.convert_s/i64x2"))]
1593 pub fn f64x2_convert_s_i64x2(a: v128) -> v128 {
1594 unsafe { transmute(simd_cast::<_, f64x2>(a.as_i64x2())) }
1595 }
1596
1597 /// Converts a 128-bit vector interpreted as two 64-bit unsigned integers into a
1598 /// 128-bit vector of two 64-bit floating point numbers.
1599 #[inline]
1600 #[cfg(not(only_node_compatible_functions))]
1601 #[cfg_attr(test, assert_instr("f64x2.convert_u/i64x2"))]
1602 pub fn f64x2_convert_u_i64x2(a: v128) -> v128 {
1603 unsafe { transmute(simd_cast::<_, f64x2>(a.as_u64x2())) }
1604 }
1605
1606 #[cfg(test)]
1607 pub mod tests {
1608 use super::*;
1609 use std;
1610 use std::mem;
1611 use std::num::Wrapping;
1612 use std::prelude::v1::*;
1613 use wasm_bindgen_test::*;
1614
1615 fn compare_bytes(a: v128, b: v128) {
1616 let a: [u8; 16] = unsafe { transmute(a) };
1617 let b: [u8; 16] = unsafe { transmute(b) };
1618 assert_eq!(a, b);
1619 }
1620
1621 #[wasm_bindgen_test]
1622 #[cfg(not(only_node_compatible_functions))]
1623 fn test_v128_const() {
1624 const A: v128 =
1625 unsafe { super::v128_const(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) };
1626 compare_bytes(A, A);
1627 }
1628
1629 macro_rules! test_splat {
1630 ($test_id:ident: $val:expr => $($vals:expr),*) => {
1631 #[wasm_bindgen_test]
1632 fn $test_id() {
1633 let a = super::$test_id($val);
1634 let b: v128 = unsafe {
1635 transmute([$($vals as u8),*])
1636 };
1637 compare_bytes(a, b);
1638 }
1639 }
1640 }
1641
1642 test_splat!(i8x16_splat: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
1643 test_splat!(i16x8_splat: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
1644 test_splat!(i32x4_splat: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
1645 #[cfg(not(only_node_compatible_functions))]
1646 test_splat!(i64x2_splat: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
1647 test_splat!(f32x4_splat: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
1648 #[cfg(not(only_node_compatible_functions))]
1649 test_splat!(f64x2_splat: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
1650
1651 // tests extract and replace lanes
1652 macro_rules! test_extract {
1653 (
1654 name: $test_id:ident,
1655 extract: $extract:ident,
1656 replace: $replace:ident,
1657 elem: $elem:ty,
1658 count: $count:expr,
1659 indices: [$($idx:expr),*],
1660 ) => {
1661 #[wasm_bindgen_test]
1662 fn $test_id() {
1663 unsafe {
1664 let arr: [$elem; $count] = [123 as $elem; $count];
1665 let vec: v128 = transmute(arr);
1666 $(
1667 assert_eq!($extract(vec, $idx), 123 as $elem);
1668 )*;
1669
1670 // create a vector from array and check that the indices contain
1671 // the same values as in the array:
1672 let arr: [$elem; $count] = [$($idx as $elem),*];
1673 let vec: v128 = transmute(arr);
1674 $(
1675 assert_eq!($extract(vec, $idx), $idx as $elem);
1676
1677 let tmp = $replace(vec, $idx, 124 as $elem);
1678 assert_eq!($extract(tmp, $idx), 124 as $elem);
1679 )*;
1680 }
1681 }
1682 }
1683 }
1684
1685 test_extract! {
1686 name: test_i8x16_extract_replace,
1687 extract: i8x16_extract_lane,
1688 replace: i8x16_replace_lane,
1689 elem: i8,
1690 count: 16,
1691 indices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
1692 }
1693 test_extract! {
1694 name: test_i16x8_extract_replace,
1695 extract: i16x8_extract_lane,
1696 replace: i16x8_replace_lane,
1697 elem: i16,
1698 count: 8,
1699 indices: [0, 1, 2, 3, 4, 5, 6, 7],
1700 }
1701 test_extract! {
1702 name: test_i32x4_extract_replace,
1703 extract: i32x4_extract_lane,
1704 replace: i32x4_replace_lane,
1705 elem: i32,
1706 count: 4,
1707 indices: [0, 1, 2, 3],
1708 }
1709 #[cfg(not(only_node_compatible_functions))]
1710 test_extract! {
1711 name: test_i64x2_extract_replace,
1712 extract: i64x2_extract_lane,
1713 replace: i64x2_replace_lane,
1714 elem: i64,
1715 count: 2,
1716 indices: [0, 1],
1717 }
1718 test_extract! {
1719 name: test_f32x4_extract_replace,
1720 extract: f32x4_extract_lane,
1721 replace: f32x4_replace_lane,
1722 elem: f32,
1723 count: 4,
1724 indices: [0, 1, 2, 3],
1725 }
1726 #[cfg(not(only_node_compatible_functions))]
1727 test_extract! {
1728 name: test_f64x2_extract_replace,
1729 extract: f64x2_extract_lane,
1730 replace: f64x2_replace_lane,
1731 elem: f64,
1732 count: 2,
1733 indices: [0, 1],
1734 }
1735
1736 macro_rules! test_binop {
1737 (
1738 $($name:ident => {
1739 $([$($vec1:tt)*] ($op:tt | $f:ident) [$($vec2:tt)*],)*
1740 })*
1741 ) => ($(
1742 #[wasm_bindgen_test]
1743 fn $name() {
1744 unsafe {
1745 $(
1746 let v1 = [$($vec1)*];
1747 let v2 = [$($vec2)*];
1748 let v1_v128: v128 = mem::transmute(v1);
1749 let v2_v128: v128 = mem::transmute(v2);
1750 let v3_v128 = super::$f(v1_v128, v2_v128);
1751 let mut v3 = [$($vec1)*];
1752 drop(v3);
1753 v3 = mem::transmute(v3_v128);
1754
1755 for (i, actual) in v3.iter().enumerate() {
1756 let expected = (Wrapping(v1[i]) $op Wrapping(v2[i])).0;
1757 assert_eq!(*actual, expected);
1758 }
1759 )*
1760 }
1761 }
1762 )*)
1763 }
1764
1765 macro_rules! test_unop {
1766 (
1767 $($name:ident => {
1768 $(($op:tt | $f:ident) [$($vec1:tt)*],)*
1769 })*
1770 ) => ($(
1771 #[wasm_bindgen_test]
1772 fn $name() {
1773 unsafe {
1774 $(
1775 let v1 = [$($vec1)*];
1776 let v1_v128: v128 = mem::transmute(v1);
1777 let v2_v128 = super::$f(v1_v128);
1778 let mut v2 = [$($vec1)*];
1779 drop(v2);
1780 v2 = mem::transmute(v2_v128);
1781
1782 for (i, actual) in v2.iter().enumerate() {
1783 let expected = ($op Wrapping(v1[i])).0;
1784 assert_eq!(*actual, expected);
1785 }
1786 )*
1787 }
1788 }
1789 )*)
1790 }
1791
1792 test_binop! {
1793 test_i8x16_add => {
1794 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1795 (+ | i8x16_add)
1796 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1797
1798 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1799 (+ | i8x16_add)
1800 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1801
1802 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1803 (+ | i8x16_add)
1804 [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
1805 }
1806 test_i8x16_sub => {
1807 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1808 (- | i8x16_sub)
1809 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1810
1811 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1812 (- | i8x16_sub)
1813 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1814
1815 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1816 (- | i8x16_sub)
1817 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
1818 }
1819 test_i8x16_mul => {
1820 [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1821 (* | i8x16_mul)
1822 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1823
1824 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1825 (* | i8x16_mul)
1826 [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1827
1828 [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
1829 (* | i8x16_mul)
1830 [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 30, 3],
1831 }
1832
1833 test_i16x8_add => {
1834 [0i16, 0, 0, 0, 0, 0, 0, 0]
1835 (+ | i16x8_add)
1836 [1i16, 1, 1, 1, 1, 1, 1, 1],
1837
1838 [1i16, 2, 3, 4, 5, 6, 7, 8]
1839 (+ | i16x8_add)
1840 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
1841 }
1842
1843 test_i16x8_sub => {
1844 [0i16, 0, 0, 0, 0, 0, 0, 0]
1845 (- | i16x8_sub)
1846 [1i16, 1, 1, 1, 1, 1, 1, 1],
1847
1848 [1i16, 2, 3, 4, 5, 6, 7, 8]
1849 (- | i16x8_sub)
1850 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
1851 }
1852
1853 test_i16x8_mul => {
1854 [0i16, 0, 0, 0, 0, 0, 0, 0]
1855 (* | i16x8_mul)
1856 [1i16, 1, 1, 1, 1, 1, 1, 1],
1857
1858 [1i16, 2, 3, 4, 5, 6, 7, 8]
1859 (* | i16x8_mul)
1860 [32767, 8, -2494,-4, 4882, -4, 848, 3830],
1861 }
1862
1863 test_i32x4_add => {
1864 [0i32, 0, 0, 0] (+ | i32x4_add) [1, 2, 3, 4],
1865 [1i32, 1283, i32::MAX, i32::MIN]
1866 (+ | i32x4_add)
1867 [i32::MAX; 4],
1868 }
1869
1870 test_i32x4_sub => {
1871 [0i32, 0, 0, 0] (- | i32x4_sub) [1, 2, 3, 4],
1872 [1i32, 1283, i32::MAX, i32::MIN]
1873 (- | i32x4_sub)
1874 [i32::MAX; 4],
1875 }
1876
1877 test_i32x4_mul => {
1878 [0i32, 0, 0, 0] (* | i32x4_mul) [1, 2, 3, 4],
1879 [1i32, 1283, i32::MAX, i32::MIN]
1880 (* | i32x4_mul)
1881 [i32::MAX; 4],
1882 }
1883
1884 // TODO: test_i64x2_add
1885 // TODO: test_i64x2_sub
1886 }
1887
1888 test_unop! {
1889 test_i8x16_neg => {
1890 (- | i8x16_neg)
1891 [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
1892
1893 (- | i8x16_neg)
1894 [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
1895
1896 (- | i8x16_neg)
1897 [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
1898 }
1899
1900 test_i16x8_neg => {
1901 (- | i16x8_neg) [1i16, 1, 1, 1, 1, 1, 1, 1],
1902 (- | i16x8_neg) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
1903 }
1904
1905 test_i32x4_neg => {
1906 (- | i32x4_neg) [1i32, 2, 3, 4],
1907 (- | i32x4_neg) [i32::MIN, i32::MAX, 0, 4],
1908 }
1909
1910 // TODO: test_i64x2_neg
1911 }
1912
1913 // #[wasm_bindgen_test]
1914 // fn v8x16_shuffle() {
1915 // unsafe {
1916 // let a = [0_u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
1917 // let b = [
1918 // 16_u8, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
1919 // 31,
1920 // ];
1921 //
1922 // let vec_a: v128 = transmute(a);
1923 // let vec_b: v128 = transmute(b);
1924 //
1925 // let vec_r = v8x16_shuffle!(
1926 // vec_a,
1927 // vec_b,
1928 // [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]
1929 // );
1930 //
1931 // let e =
1932 // [0_u8, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30];
1933 // let vec_e: v128 = transmute(e);
1934 // compare_bytes(vec_r, vec_e);
1935 // }
1936 // }
1937 //
1938 // macro_rules! floating_point {
1939 // (f32) => {
1940 // true
1941 // };
1942 // (f64) => {
1943 // true
1944 // };
1945 // ($id:ident) => {
1946 // false
1947 // };
1948 // }
1949 //
1950 // trait IsNan: Sized {
1951 // fn is_nan(self) -> bool {
1952 // false
1953 // }
1954 // }
1955 // impl IsNan for i8 {}
1956 // impl IsNan for i16 {}
1957 // impl IsNan for i32 {}
1958 // impl IsNan for i64 {}
1959 //
1960 // macro_rules! test_bop {
1961 // ($id:ident[$ety:ident; $ecount:expr] |
1962 // $binary_op:ident [$op_test_id:ident] :
1963 // ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
1964 // test_bop!(
1965 // $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
1966 // ([$($in_a),*], [$($in_b),*]) => [$($out),*]
1967 // );
1968 //
1969 // };
1970 // ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
1971 // $binary_op:ident [$op_test_id:ident] :
1972 // ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
1973 // #[wasm_bindgen_test]
1974 // fn $op_test_id() {
1975 // unsafe {
1976 // let a_input: [$ety; $ecount] = [$($in_a),*];
1977 // let b_input: [$ety; $ecount] = [$($in_b),*];
1978 // let output: [$oty; $ecount] = [$($out),*];
1979 //
1980 // let a_vec_in: v128 = transmute(a_input);
1981 // let b_vec_in: v128 = transmute(b_input);
1982 // let vec_res: v128 = $id::$binary_op(a_vec_in, b_vec_in);
1983 //
1984 // let res: [$oty; $ecount] = transmute(vec_res);
1985 //
1986 // if !floating_point!($ety) {
1987 // assert_eq!(res, output);
1988 // } else {
1989 // for i in 0..$ecount {
1990 // let r = res[i];
1991 // let o = output[i];
1992 // assert_eq!(r.is_nan(), o.is_nan());
1993 // if !r.is_nan() {
1994 // assert_eq!(r, o);
1995 // }
1996 // }
1997 // }
1998 // }
1999 // }
2000 // }
2001 // }
2002 //
2003 // macro_rules! test_bops {
2004 // ($id:ident[$ety:ident; $ecount:expr] |
2005 // $binary_op:ident [$op_test_id:ident]:
2006 // ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
2007 // #[wasm_bindgen_test]
2008 // fn $op_test_id() {
2009 // unsafe {
2010 // let a_input: [$ety; $ecount] = [$($in_a),*];
2011 // let output: [$ety; $ecount] = [$($out),*];
2012 //
2013 // let a_vec_in: v128 = transmute(a_input);
2014 // let vec_res: v128 = $id::$binary_op(a_vec_in, $in_b);
2015 //
2016 // let res: [$ety; $ecount] = transmute(vec_res);
2017 // assert_eq!(res, output);
2018 // }
2019 // }
2020 // }
2021 // }
2022 //
2023 // macro_rules! test_uop {
2024 // ($id:ident[$ety:ident; $ecount:expr] |
2025 // $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
2026 // #[wasm_bindgen_test]
2027 // fn $op_test_id() {
2028 // unsafe {
2029 // let a_input: [$ety; $ecount] = [$($in_a),*];
2030 // let output: [$ety; $ecount] = [$($out),*];
2031 //
2032 // let a_vec_in: v128 = transmute(a_input);
2033 // let vec_res: v128 = $id::$unary_op(a_vec_in);
2034 //
2035 // let res: [$ety; $ecount] = transmute(vec_res);
2036 // assert_eq!(res, output);
2037 // }
2038 // }
2039 // }
2040 // }
2041 //
2042 //
2043 //
2044 // test_bops!(i8x16[i8; 16] | shl[i8x16_shl_test]:
2045 // ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2046 // [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
2047 // test_bops!(i16x8[i16; 8] | shl[i16x8_shl_test]:
2048 // ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
2049 // [0, -2, 4, 6, 8, 10, 12, -2]);
2050 // test_bops!(i32x4[i32; 4] | shl[i32x4_shl_test]:
2051 // ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
2052 // test_bops!(i64x2[i64; 2] | shl[i64x2_shl_test]:
2053 // ([0, -1], 1) => [0, -2]);
2054 //
2055 // test_bops!(i8x16[i8; 16] | shr_s[i8x16_shr_s_test]:
2056 // ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2057 // [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
2058 // test_bops!(i16x8[i16; 8] | shr_s[i16x8_shr_s_test]:
2059 // ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
2060 // [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
2061 // test_bops!(i32x4[i32; 4] | shr_s[i32x4_shr_s_test]:
2062 // ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
2063 // test_bops!(i64x2[i64; 2] | shr_s[i64x2_shr_s_test]:
2064 // ([0, -1], 1) => [0, -1]);
2065 //
2066 // test_bops!(i8x16[i8; 16] | shr_u[i8x16_uhr_u_test]:
2067 // ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
2068 // [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
2069 // test_bops!(i16x8[i16; 8] | shr_u[i16x8_uhr_u_test]:
2070 // ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
2071 // [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
2072 // test_bops!(i32x4[i32; 4] | shr_u[i32x4_uhr_u_test]:
2073 // ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
2074 // test_bops!(i64x2[i64; 2] | shr_u[i64x2_uhr_u_test]:
2075 // ([0, -1], 1) => [0, i64::MAX]);
2076 //
2077 // #[wasm_bindgen_test]
2078 // fn v128_bitwise_logical_ops() {
2079 // unsafe {
2080 // let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
2081 // let b: [u32; 4] = [u32::MAX; 4];
2082 // let c: [u32; 4] = [0; 4];
2083 //
2084 // let vec_a: v128 = transmute(a);
2085 // let vec_b: v128 = transmute(b);
2086 // let vec_c: v128 = transmute(c);
2087 //
2088 // let r: v128 = v128::and(vec_a, vec_a);
2089 // compare_bytes(r, vec_a);
2090 // let r: v128 = v128::and(vec_a, vec_b);
2091 // compare_bytes(r, vec_a);
2092 // let r: v128 = v128::or(vec_a, vec_b);
2093 // compare_bytes(r, vec_b);
2094 // let r: v128 = v128::not(vec_b);
2095 // compare_bytes(r, vec_c);
2096 // let r: v128 = v128::xor(vec_a, vec_c);
2097 // compare_bytes(r, vec_a);
2098 //
2099 // let r: v128 = v128::bitselect(vec_b, vec_c, vec_b);
2100 // compare_bytes(r, vec_b);
2101 // let r: v128 = v128::bitselect(vec_b, vec_c, vec_c);
2102 // compare_bytes(r, vec_c);
2103 // let r: v128 = v128::bitselect(vec_b, vec_c, vec_a);
2104 // compare_bytes(r, vec_a);
2105 // }
2106 // }
2107 //
2108 // macro_rules! test_bool_red {
2109 // ($id:ident[$test_id:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
2110 // #[wasm_bindgen_test]
2111 // fn $test_id() {
2112 // unsafe {
2113 // let vec_a: v128 = transmute([$($true),*]); // true
2114 // let vec_b: v128 = transmute([$($false),*]); // false
2115 // let vec_c: v128 = transmute([$($alt),*]); // alternating
2116 //
2117 // assert_eq!($id::any_true(vec_a), 1);
2118 // assert_eq!($id::any_true(vec_b), 0);
2119 // assert_eq!($id::any_true(vec_c), 1);
2120 //
2121 // assert_eq!($id::all_true(vec_a), 1);
2122 // assert_eq!($id::all_true(vec_b), 0);
2123 // assert_eq!($id::all_true(vec_c), 0);
2124 // }
2125 // }
2126 // }
2127 // }
2128 //
2129 // test_bool_red!(
2130 // i8x16[i8x16_boolean_reductions]
2131 // | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
2132 // | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
2133 // | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
2134 // );
2135 // test_bool_red!(
2136 // i16x8[i16x8_boolean_reductions]
2137 // | [1_i16, 1, 1, 1, 1, 1, 1, 1]
2138 // | [0_i16, 0, 0, 0, 0, 0, 0, 0]
2139 // | [1_i16, 0, 1, 0, 1, 0, 1, 0]
2140 // );
2141 // test_bool_red!(
2142 // i32x4[i32x4_boolean_reductions]
2143 // | [1_i32, 1, 1, 1]
2144 // | [0_i32, 0, 0, 0]
2145 // | [1_i32, 0, 1, 0]
2146 // );
2147 // test_bool_red!(
2148 // i64x2[i64x2_boolean_reductions] | [1_i64, 1] | [0_i64, 0] | [1_i64, 0]
2149 // );
2150 //
2151 // test_bop!(i8x16[i8; 16] | eq[i8x16_eq_test]:
2152 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2153 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2154 // [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2155 // test_bop!(i16x8[i16; 8] | eq[i16x8_eq_test]:
2156 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2157 // [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2158 // test_bop!(i32x4[i32; 4] | eq[i32x4_eq_test]:
2159 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
2160 // test_bop!(i64x2[i64; 2] | eq[i64x2_eq_test]: ([0, 1], [0, 2]) => [-1, 0]);
2161 // test_bop!(f32x4[f32; 4] => i32 | eq[f32x4_eq_test]:
2162 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
2163 // test_bop!(f64x2[f64; 2] => i64 | eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
2164 //
2165 // test_bop!(i8x16[i8; 16] | ne[i8x16_ne_test]:
2166 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2167 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2168 // [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2169 // test_bop!(i16x8[i16; 8] | ne[i16x8_ne_test]:
2170 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2171 // [0, -1, 0, -1 ,0, -1, 0, 0]);
2172 // test_bop!(i32x4[i32; 4] | ne[i32x4_ne_test]:
2173 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
2174 // test_bop!(i64x2[i64; 2] | ne[i64x2_ne_test]: ([0, 1], [0, 2]) => [0, -1]);
2175 // test_bop!(f32x4[f32; 4] => i32 | ne[f32x4_ne_test]:
2176 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
2177 // test_bop!(f64x2[f64; 2] => i64 | ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
2178 //
2179 // test_bop!(i8x16[i8; 16] | lt[i8x16_lt_test]:
2180 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2181 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2182 // [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2183 // test_bop!(i16x8[i16; 8] | lt[i16x8_lt_test]:
2184 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2185 // [0, -1, 0, -1 ,0, -1, 0, 0]);
2186 // test_bop!(i32x4[i32; 4] | lt[i32x4_lt_test]:
2187 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
2188 // test_bop!(i64x2[i64; 2] | lt[i64x2_lt_test]: ([0, 1], [0, 2]) => [0, -1]);
2189 // test_bop!(f32x4[f32; 4] => i32 | lt[f32x4_lt_test]:
2190 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
2191 // test_bop!(f64x2[f64; 2] => i64 | lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
2192 //
2193 // test_bop!(i8x16[i8; 16] | gt[i8x16_gt_test]:
2194 // ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15],
2195 // [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
2196 // [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
2197 // test_bop!(i16x8[i16; 8] | gt[i16x8_gt_test]:
2198 // ([0, 2, 2, 4, 4, 6, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
2199 // [0, -1, 0, -1 ,0, -1, 0, 0]);
2200 // test_bop!(i32x4[i32; 4] | gt[i32x4_gt_test]:
2201 // ([0, 2, 2, 4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
2202 // test_bop!(i64x2[i64; 2] | gt[i64x2_gt_test]: ([0, 2], [0, 1]) => [0, -1]);
2203 // test_bop!(f32x4[f32; 4] => i32 | gt[f32x4_gt_test]:
2204 // ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
2205 // test_bop!(f64x2[f64; 2] => i64 | gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
2206 //
2207 // test_bop!(i8x16[i8; 16] | ge[i8x16_ge_test]:
2208 // ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
2209 // [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
2210 // [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2211 // test_bop!(i16x8[i16; 8] | ge[i16x8_ge_test]:
2212 // ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
2213 // [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2214 // test_bop!(i32x4[i32; 4] | ge[i32x4_ge_test]:
2215 // ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
2216 // test_bop!(i64x2[i64; 2] | ge[i64x2_ge_test]: ([0, 1], [0, 2]) => [-1, 0]);
2217 // test_bop!(f32x4[f32; 4] => i32 | ge[f32x4_ge_test]:
2218 // ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
2219 // test_bop!(f64x2[f64; 2] => i64 | ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
2220 //
2221 // test_bop!(i8x16[i8; 16] | le[i8x16_le_test]:
2222 // ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15],
2223 // [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
2224 // ) =>
2225 // [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
2226 // test_bop!(i16x8[i16; 8] | le[i16x8_le_test]:
2227 // ([0, 2, 2, 4, 4, 6, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
2228 // [-1, 0, -1, 0 ,-1, 0, -1, -1]);
2229 // test_bop!(i32x4[i32; 4] | le[i32x4_le_test]:
2230 // ([0, 2, 2, 4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
2231 // test_bop!(i64x2[i64; 2] | le[i64x2_le_test]: ([0, 2], [0, 1]) => [-1, 0]);
2232 // test_bop!(f32x4[f32; 4] => i32 | le[f32x4_le_test]:
2233 // ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
2234 // test_bop!(f64x2[f64; 2] => i64 | le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
2235 //
2236 // #[wasm_bindgen_test]
2237 // fn v128_bitwise_load_store() {
2238 // unsafe {
2239 // let mut arr: [i32; 4] = [0, 1, 2, 3];
2240 //
2241 // let vec = v128::load(arr.as_ptr() as *const v128);
2242 // let vec = i32x4::add(vec, vec);
2243 // v128::store(arr.as_mut_ptr() as *mut v128, vec);
2244 //
2245 // assert_eq!(arr, [0, 2, 4, 6]);
2246 // }
2247 // }
2248 //
2249 // test_uop!(f32x4[f32; 4] | neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
2250 // test_uop!(f32x4[f32; 4] | abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
2251 // test_bop!(f32x4[f32; 4] | min[f32x4_min_test]:
2252 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
2253 // test_bop!(f32x4[f32; 4] | min[f32x4_min_test_nan]:
2254 // ([0., -1., 7., 8.], [1., -3., -4., std::f32::NAN])
2255 // => [0., -3., -4., std::f32::NAN]);
2256 // test_bop!(f32x4[f32; 4] | max[f32x4_max_test]:
2257 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
2258 // test_bop!(f32x4[f32; 4] | max[f32x4_max_test_nan]:
2259 // ([0., -1., 7., 8.], [1., -3., -4., std::f32::NAN])
2260 // => [1., -1., 7., std::f32::NAN]);
2261 // test_bop!(f32x4[f32; 4] | add[f32x4_add_test]:
2262 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
2263 // test_bop!(f32x4[f32; 4] | sub[f32x4_sub_test]:
2264 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
2265 // test_bop!(f32x4[f32; 4] | mul[f32x4_mul_test]:
2266 // ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
2267 // test_bop!(f32x4[f32; 4] | div[f32x4_div_test]:
2268 // ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
2269 //
2270 // test_uop!(f64x2[f64; 2] | neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
2271 // test_uop!(f64x2[f64; 2] | abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
2272 // test_bop!(f64x2[f64; 2] | min[f64x2_min_test]:
2273 // ([0., -1.], [1., -3.]) => [0., -3.]);
2274 // test_bop!(f64x2[f64; 2] | min[f64x2_min_test_nan]:
2275 // ([7., 8.], [-4., std::f64::NAN])
2276 // => [ -4., std::f64::NAN]);
2277 // test_bop!(f64x2[f64; 2] | max[f64x2_max_test]:
2278 // ([0., -1.], [1., -3.]) => [1., -1.]);
2279 // test_bop!(f64x2[f64; 2] | max[f64x2_max_test_nan]:
2280 // ([7., 8.], [ -4., std::f64::NAN])
2281 // => [7., std::f64::NAN]);
2282 // test_bop!(f64x2[f64; 2] | add[f64x2_add_test]:
2283 // ([0., -1.], [1., -3.]) => [1., -4.]);
2284 // test_bop!(f64x2[f64; 2] | sub[f64x2_sub_test]:
2285 // ([0., -1.], [1., -3.]) => [-1., 2.]);
2286 // test_bop!(f64x2[f64; 2] | mul[f64x2_mul_test]:
2287 // ([0., -1.], [1., -3.]) => [0., 3.]);
2288 // test_bop!(f64x2[f64; 2] | div[f64x2_div_test]:
2289 // ([0., -8.], [1., 4.]) => [0., -2.]);
2290 //
2291 // macro_rules! test_conv {
2292 // ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr, $to:expr) => {
2293 // #[wasm_bindgen_test]
2294 // fn $test_id() {
2295 // unsafe {
2296 // let from: v128 = transmute($from);
2297 // let to: v128 = transmute($to);
2298 //
2299 // let r: v128 = $to_ty::$conv_id(from);
2300 //
2301 // compare_bytes(r, to);
2302 // }
2303 // }
2304 // };
2305 // }
2306 //
2307 // test_conv!(
2308 // f32x4_convert_s_i32x4 | convert_s_i32x4 | f32x4 | [1_i32, 2, 3, 4],
2309 // [1_f32, 2., 3., 4.]
2310 // );
2311 // test_conv!(
2312 // f32x4_convert_u_i32x4
2313 // | convert_u_i32x4
2314 // | f32x4
2315 // | [u32::MAX, 2, 3, 4],
2316 // [u32::MAX as f32, 2., 3., 4.]
2317 // );
2318 // test_conv!(
2319 // f64x2_convert_s_i64x2 | convert_s_i64x2 | f64x2 | [1_i64, 2],
2320 // [1_f64, 2.]
2321 // );
2322 // test_conv!(
2323 // f64x2_convert_u_i64x2
2324 // | convert_u_i64x2
2325 // | f64x2
2326 // | [u64::MAX, 2],
2327 // [18446744073709552000.0, 2.]
2328 // );
2329 //
2330 // // FIXME: this fails, and produces -2147483648 instead of saturating at
2331 // // i32::MAX test_conv!(i32x4_trunc_s_f32x4_sat | trunc_s_f32x4_sat
2332 // // | i32x4 | [1_f32, 2., (i32::MAX as f32 + 1.), 4.],
2333 // // [1_i32, 2, i32::MAX, 4]); FIXME: add other saturating tests
2334 }