]> git.proxmox.com Git - rustc.git/blob - src/stdsimd/coresimd/ppsv/codegen/xor.rs
New upstream version 1.26.0+dfsg1
[rustc.git] / src / stdsimd / coresimd / ppsv / codegen / xor.rs
1 //! Code generation for the xor reduction.
2 use coresimd::simd::*;
3
4 /// LLVM intrinsics used in the xor reduction
5 #[allow(improper_ctypes)]
6 extern "C" {
7 #[link_name = "llvm.experimental.vector.reduce.xor.i8.v2i8"]
8 fn reduce_xor_i8x2(x: i8x2) -> i8;
9 #[link_name = "llvm.experimental.vector.reduce.xor.u8.v2u8"]
10 fn reduce_xor_u8x2(x: u8x2) -> u8;
11 #[link_name = "llvm.experimental.vector.reduce.xor.i16.v2i16"]
12 fn reduce_xor_i16x2(x: i16x2) -> i16;
13 #[link_name = "llvm.experimental.vector.reduce.xor.u16.v2u16"]
14 fn reduce_xor_u16x2(x: u16x2) -> u16;
15 #[link_name = "llvm.experimental.vector.reduce.xor.i32.v2i32"]
16 fn reduce_xor_i32x2(x: i32x2) -> i32;
17 #[link_name = "llvm.experimental.vector.reduce.xor.u32.v2u32"]
18 fn reduce_xor_u32x2(x: u32x2) -> u32;
19 #[link_name = "llvm.experimental.vector.reduce.xor.i64.v2i64"]
20 fn reduce_xor_i64x2(x: i64x2) -> i64;
21 #[link_name = "llvm.experimental.vector.reduce.xor.u64.v2u64"]
22 fn reduce_xor_u64x2(x: u64x2) -> u64;
23 #[link_name = "llvm.experimental.vector.reduce.xor.i8.v4i8"]
24 fn reduce_xor_i8x4(x: i8x4) -> i8;
25 #[link_name = "llvm.experimental.vector.reduce.xor.u8.v4u8"]
26 fn reduce_xor_u8x4(x: u8x4) -> u8;
27 #[link_name = "llvm.experimental.vector.reduce.xor.i16.v4i16"]
28 fn reduce_xor_i16x4(x: i16x4) -> i16;
29 #[link_name = "llvm.experimental.vector.reduce.xor.u16.v4u16"]
30 fn reduce_xor_u16x4(x: u16x4) -> u16;
31 #[link_name = "llvm.experimental.vector.reduce.xor.i32.v4i32"]
32 fn reduce_xor_i32x4(x: i32x4) -> i32;
33 #[link_name = "llvm.experimental.vector.reduce.xor.u32.v4u32"]
34 fn reduce_xor_u32x4(x: u32x4) -> u32;
35 #[link_name = "llvm.experimental.vector.reduce.xor.i64.v4i64"]
36 fn reduce_xor_i64x4(x: i64x4) -> i64;
37 #[link_name = "llvm.experimental.vector.reduce.xor.u64.v4u64"]
38 fn reduce_xor_u64x4(x: u64x4) -> u64;
39 #[link_name = "llvm.experimental.vector.reduce.xor.i8.v8i8"]
40 fn reduce_xor_i8x8(x: i8x8) -> i8;
41 #[link_name = "llvm.experimental.vector.reduce.xor.u8.v8u8"]
42 fn reduce_xor_u8x8(x: u8x8) -> u8;
43 #[link_name = "llvm.experimental.vector.reduce.xor.i16.v8i16"]
44 fn reduce_xor_i16x8(x: i16x8) -> i16;
45 #[link_name = "llvm.experimental.vector.reduce.xor.u16.v8u16"]
46 fn reduce_xor_u16x8(x: u16x8) -> u16;
47 #[link_name = "llvm.experimental.vector.reduce.xor.i32.v8i32"]
48 fn reduce_xor_i32x8(x: i32x8) -> i32;
49 #[link_name = "llvm.experimental.vector.reduce.xor.u32.v8u32"]
50 fn reduce_xor_u32x8(x: u32x8) -> u32;
51 #[link_name = "llvm.experimental.vector.reduce.xor.i64.v8i64"]
52 fn reduce_xor_i64x8(x: i64x8) -> i64;
53 #[link_name = "llvm.experimental.vector.reduce.xor.u64.v8u64"]
54 fn reduce_xor_u64x8(x: u64x8) -> u64;
55 #[link_name = "llvm.experimental.vector.reduce.xor.i8.v16i8"]
56 fn reduce_xor_i8x16(x: i8x16) -> i8;
57 #[link_name = "llvm.experimental.vector.reduce.xor.u8.v16u8"]
58 fn reduce_xor_u8x16(x: u8x16) -> u8;
59 #[link_name = "llvm.experimental.vector.reduce.xor.i16.v16i16"]
60 fn reduce_xor_i16x16(x: i16x16) -> i16;
61 #[link_name = "llvm.experimental.vector.reduce.xor.u16.v16u16"]
62 fn reduce_xor_u16x16(x: u16x16) -> u16;
63 #[link_name = "llvm.experimental.vector.reduce.xor.i32.v16i32"]
64 fn reduce_xor_i32x16(x: i32x16) -> i32;
65 #[link_name = "llvm.experimental.vector.reduce.xor.u32.v16u32"]
66 fn reduce_xor_u32x16(x: u32x16) -> u32;
67 #[link_name = "llvm.experimental.vector.reduce.xor.i8.v32i8"]
68 fn reduce_xor_i8x32(x: i8x32) -> i8;
69 #[link_name = "llvm.experimental.vector.reduce.xor.u8.v32u8"]
70 fn reduce_xor_u8x32(x: u8x32) -> u8;
71 #[link_name = "llvm.experimental.vector.reduce.xor.i16.v32i16"]
72 fn reduce_xor_i16x32(x: i16x32) -> i16;
73 #[link_name = "llvm.experimental.vector.reduce.xor.u16.v32u16"]
74 fn reduce_xor_u16x32(x: u16x32) -> u16;
75 #[link_name = "llvm.experimental.vector.reduce.xor.i8.v64i8"]
76 fn reduce_xor_i8x64(x: i8x64) -> i8;
77 #[link_name = "llvm.experimental.vector.reduce.xor.u8.v64u8"]
78 fn reduce_xor_u8x64(x: u8x64) -> u8;
79 }
80
81 /// Reduction: horizontal bitwise xor of the vector elements.
82 #[cfg_attr(feature = "cargo-clippy", allow(stutter))]
83 pub trait ReduceXor {
84 /// Result type of the reduction.
85 type Acc;
86 /// Computes the horizontal bitwise xor of the vector elements.
87 fn reduce_xor(self) -> Self::Acc;
88 }
89
90 macro_rules! red_xor {
91 ($id:ident, $elem_ty:ident, $llvm_intr:ident) => {
92 impl ReduceXor for $id {
93 type Acc = $elem_ty;
94 #[cfg(not(target_arch = "aarch64"))]
95 #[inline]
96 fn reduce_xor(self) -> Self::Acc {
97 unsafe { $llvm_intr(self.into_bits()) }
98 }
99 // FIXME: broken in AArch64
100 #[cfg(target_arch = "aarch64")]
101 #[inline]
102 fn reduce_xor(self) -> Self::Acc {
103 let mut x = self.extract(0) as Self::Acc;
104 for i in 1..$id::lanes() {
105 x ^= self.extract(i) as Self::Acc;
106 }
107 x
108 }
109 }
110 };
111 }
112 red_xor!(i8x2, i8, reduce_xor_i8x2);
113 red_xor!(u8x2, u8, reduce_xor_u8x2);
114 red_xor!(i16x2, i16, reduce_xor_i16x2);
115 red_xor!(u16x2, u16, reduce_xor_u16x2);
116 red_xor!(i32x2, i32, reduce_xor_i32x2);
117 red_xor!(u32x2, u32, reduce_xor_u32x2);
118 red_xor!(i64x2, i64, reduce_xor_i64x2);
119 red_xor!(u64x2, u64, reduce_xor_u64x2);
120 red_xor!(i8x4, i8, reduce_xor_i8x4);
121 red_xor!(u8x4, u8, reduce_xor_u8x4);
122 red_xor!(i16x4, i16, reduce_xor_i16x4);
123 red_xor!(u16x4, u16, reduce_xor_u16x4);
124 red_xor!(i32x4, i32, reduce_xor_i32x4);
125 red_xor!(u32x4, u32, reduce_xor_u32x4);
126 red_xor!(i64x4, i64, reduce_xor_i64x4);
127 red_xor!(u64x4, u64, reduce_xor_u64x4);
128 red_xor!(i8x8, i8, reduce_xor_i8x8);
129 red_xor!(u8x8, u8, reduce_xor_u8x8);
130 red_xor!(i16x8, i16, reduce_xor_i16x8);
131 red_xor!(u16x8, u16, reduce_xor_u16x8);
132 red_xor!(i32x8, i32, reduce_xor_i32x8);
133 red_xor!(u32x8, u32, reduce_xor_u32x8);
134 red_xor!(i64x8, i64, reduce_xor_i64x8);
135 red_xor!(u64x8, u64, reduce_xor_u64x8);
136 red_xor!(i8x16, i8, reduce_xor_i8x16);
137 red_xor!(u8x16, u8, reduce_xor_u8x16);
138 red_xor!(i16x16, i16, reduce_xor_i16x16);
139 red_xor!(u16x16, u16, reduce_xor_u16x16);
140 red_xor!(i32x16, i32, reduce_xor_i32x16);
141 red_xor!(u32x16, u32, reduce_xor_u32x16);
142 red_xor!(i8x32, i8, reduce_xor_i8x32);
143 red_xor!(u8x32, u8, reduce_xor_u8x32);
144 red_xor!(i16x32, i16, reduce_xor_i16x32);
145 red_xor!(u16x32, u16, reduce_xor_u16x32);
146 red_xor!(i8x64, i8, reduce_xor_i8x64);
147 red_xor!(u8x64, u8, reduce_xor_u8x64);
148
149 red_xor!(b8x2, i8, reduce_xor_i8x2);
150 red_xor!(b8x4, i8, reduce_xor_i8x4);
151 red_xor!(b8x8, i8, reduce_xor_i8x8);
152 red_xor!(b8x16, i8, reduce_xor_i8x16);
153 red_xor!(b8x32, i8, reduce_xor_i8x32);
154 red_xor!(b8x64, i8, reduce_xor_i8x64);
155
156 #[cfg(test)]
157 mod tests {
158 use super::ReduceXor;
159 use coresimd::simd::*;
160
161 // note: these are tested in the portable vector API tests
162
163 #[test]
164 fn reduce_xor_i32x4() {
165 let v = i32x4::splat(1);
166 assert_eq!(v.reduce_xor(), 0_i32);
167 let v = i32x4::new(1, 0, 0, 0);
168 assert_eq!(v.reduce_xor(), 1_i32);
169 }
170 }