]>
Commit | Line | Data |
---|---|---|
a2a8927a | 1 | #![allow(unused_imports)] |
3c0e092e XL |
2 | use super::MaskElement; |
3 | use crate::simd::intrinsics; | |
4 | use crate::simd::{LaneCount, Simd, SupportedLaneCount}; | |
5 | use core::marker::PhantomData; | |
6 | ||
7 | /// A mask where each lane is represented by a single bit. | |
8 | #[repr(transparent)] | |
9 | pub struct Mask<T, const LANES: usize>( | |
10 | <LaneCount<LANES> as SupportedLaneCount>::BitMask, | |
11 | PhantomData<T>, | |
12 | ) | |
13 | where | |
14 | T: MaskElement, | |
15 | LaneCount<LANES>: SupportedLaneCount; | |
16 | ||
17 | impl<T, const LANES: usize> Copy for Mask<T, LANES> | |
18 | where | |
19 | T: MaskElement, | |
20 | LaneCount<LANES>: SupportedLaneCount, | |
21 | { | |
22 | } | |
23 | ||
24 | impl<T, const LANES: usize> Clone for Mask<T, LANES> | |
25 | where | |
26 | T: MaskElement, | |
27 | LaneCount<LANES>: SupportedLaneCount, | |
28 | { | |
29 | fn clone(&self) -> Self { | |
30 | *self | |
31 | } | |
32 | } | |
33 | ||
34 | impl<T, const LANES: usize> PartialEq for Mask<T, LANES> | |
35 | where | |
36 | T: MaskElement, | |
37 | LaneCount<LANES>: SupportedLaneCount, | |
38 | { | |
39 | fn eq(&self, other: &Self) -> bool { | |
40 | self.0.as_ref() == other.0.as_ref() | |
41 | } | |
42 | } | |
43 | ||
44 | impl<T, const LANES: usize> PartialOrd for Mask<T, LANES> | |
45 | where | |
46 | T: MaskElement, | |
47 | LaneCount<LANES>: SupportedLaneCount, | |
48 | { | |
49 | fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { | |
50 | self.0.as_ref().partial_cmp(other.0.as_ref()) | |
51 | } | |
52 | } | |
53 | ||
54 | impl<T, const LANES: usize> Eq for Mask<T, LANES> | |
55 | where | |
56 | T: MaskElement, | |
57 | LaneCount<LANES>: SupportedLaneCount, | |
58 | { | |
59 | } | |
60 | ||
61 | impl<T, const LANES: usize> Ord for Mask<T, LANES> | |
62 | where | |
63 | T: MaskElement, | |
64 | LaneCount<LANES>: SupportedLaneCount, | |
65 | { | |
66 | fn cmp(&self, other: &Self) -> core::cmp::Ordering { | |
67 | self.0.as_ref().cmp(other.0.as_ref()) | |
68 | } | |
69 | } | |
70 | ||
71 | impl<T, const LANES: usize> Mask<T, LANES> | |
72 | where | |
73 | T: MaskElement, | |
74 | LaneCount<LANES>: SupportedLaneCount, | |
75 | { | |
76 | #[inline] | |
a2a8927a | 77 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e XL |
78 | pub fn splat(value: bool) -> Self { |
79 | let mut mask = <LaneCount<LANES> as SupportedLaneCount>::BitMask::default(); | |
80 | if value { | |
81 | mask.as_mut().fill(u8::MAX) | |
82 | } else { | |
83 | mask.as_mut().fill(u8::MIN) | |
84 | } | |
85 | if LANES % 8 > 0 { | |
86 | *mask.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8); | |
87 | } | |
88 | Self(mask, PhantomData) | |
89 | } | |
90 | ||
91 | #[inline] | |
a2a8927a | 92 | #[must_use = "method returns a new bool and does not mutate the original value"] |
3c0e092e XL |
93 | pub unsafe fn test_unchecked(&self, lane: usize) -> bool { |
94 | (self.0.as_ref()[lane / 8] >> (lane % 8)) & 0x1 > 0 | |
95 | } | |
96 | ||
97 | #[inline] | |
98 | pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) { | |
99 | unsafe { | |
100 | self.0.as_mut()[lane / 8] ^= ((value ^ self.test_unchecked(lane)) as u8) << (lane % 8) | |
101 | } | |
102 | } | |
103 | ||
104 | #[inline] | |
a2a8927a | 105 | #[must_use = "method returns a new vector and does not mutate the original value"] |
3c0e092e XL |
106 | pub fn to_int(self) -> Simd<T, LANES> { |
107 | unsafe { | |
a2a8927a | 108 | intrinsics::simd_select_bitmask(self.0, Simd::splat(T::TRUE), Simd::splat(T::FALSE)) |
3c0e092e XL |
109 | } |
110 | } | |
111 | ||
112 | #[inline] | |
a2a8927a | 113 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e | 114 | pub unsafe fn from_int_unchecked(value: Simd<T, LANES>) -> Self { |
a2a8927a | 115 | unsafe { Self(intrinsics::simd_bitmask(value), PhantomData) } |
3c0e092e XL |
116 | } |
117 | ||
118 | #[cfg(feature = "generic_const_exprs")] | |
119 | #[inline] | |
a2a8927a | 120 | #[must_use = "method returns a new array and does not mutate the original value"] |
3c0e092e XL |
121 | pub fn to_bitmask(self) -> [u8; LaneCount::<LANES>::BITMASK_LEN] { |
122 | // Safety: these are the same type and we are laundering the generic | |
123 | unsafe { core::mem::transmute_copy(&self.0) } | |
124 | } | |
125 | ||
126 | #[cfg(feature = "generic_const_exprs")] | |
127 | #[inline] | |
a2a8927a | 128 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e XL |
129 | pub fn from_bitmask(bitmask: [u8; LaneCount::<LANES>::BITMASK_LEN]) -> Self { |
130 | // Safety: these are the same type and we are laundering the generic | |
131 | Self(unsafe { core::mem::transmute_copy(&bitmask) }, PhantomData) | |
132 | } | |
133 | ||
134 | #[inline] | |
a2a8927a | 135 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e XL |
136 | pub fn convert<U>(self) -> Mask<U, LANES> |
137 | where | |
138 | U: MaskElement, | |
139 | { | |
140 | unsafe { core::mem::transmute_copy(&self) } | |
141 | } | |
142 | ||
143 | #[inline] | |
a2a8927a | 144 | #[must_use = "method returns a new bool and does not mutate the original value"] |
3c0e092e XL |
145 | pub fn any(self) -> bool { |
146 | self != Self::splat(false) | |
147 | } | |
148 | ||
149 | #[inline] | |
a2a8927a | 150 | #[must_use = "method returns a new bool and does not mutate the original value"] |
3c0e092e XL |
151 | pub fn all(self) -> bool { |
152 | self == Self::splat(true) | |
153 | } | |
154 | } | |
155 | ||
156 | impl<T, const LANES: usize> core::ops::BitAnd for Mask<T, LANES> | |
157 | where | |
158 | T: MaskElement, | |
159 | LaneCount<LANES>: SupportedLaneCount, | |
160 | <LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>, | |
161 | { | |
162 | type Output = Self; | |
163 | #[inline] | |
a2a8927a | 164 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e XL |
165 | fn bitand(mut self, rhs: Self) -> Self { |
166 | for (l, r) in self.0.as_mut().iter_mut().zip(rhs.0.as_ref().iter()) { | |
167 | *l &= r; | |
168 | } | |
169 | self | |
170 | } | |
171 | } | |
172 | ||
173 | impl<T, const LANES: usize> core::ops::BitOr for Mask<T, LANES> | |
174 | where | |
175 | T: MaskElement, | |
176 | LaneCount<LANES>: SupportedLaneCount, | |
177 | <LaneCount<LANES> as SupportedLaneCount>::BitMask: AsRef<[u8]> + AsMut<[u8]>, | |
178 | { | |
179 | type Output = Self; | |
180 | #[inline] | |
a2a8927a | 181 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e XL |
182 | fn bitor(mut self, rhs: Self) -> Self { |
183 | for (l, r) in self.0.as_mut().iter_mut().zip(rhs.0.as_ref().iter()) { | |
184 | *l |= r; | |
185 | } | |
186 | self | |
187 | } | |
188 | } | |
189 | ||
190 | impl<T, const LANES: usize> core::ops::BitXor for Mask<T, LANES> | |
191 | where | |
192 | T: MaskElement, | |
193 | LaneCount<LANES>: SupportedLaneCount, | |
194 | { | |
195 | type Output = Self; | |
196 | #[inline] | |
a2a8927a | 197 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e XL |
198 | fn bitxor(mut self, rhs: Self) -> Self::Output { |
199 | for (l, r) in self.0.as_mut().iter_mut().zip(rhs.0.as_ref().iter()) { | |
200 | *l ^= r; | |
201 | } | |
202 | self | |
203 | } | |
204 | } | |
205 | ||
206 | impl<T, const LANES: usize> core::ops::Not for Mask<T, LANES> | |
207 | where | |
208 | T: MaskElement, | |
209 | LaneCount<LANES>: SupportedLaneCount, | |
210 | { | |
211 | type Output = Self; | |
212 | #[inline] | |
a2a8927a | 213 | #[must_use = "method returns a new mask and does not mutate the original value"] |
3c0e092e XL |
214 | fn not(mut self) -> Self::Output { |
215 | for x in self.0.as_mut() { | |
216 | *x = !*x; | |
217 | } | |
218 | if LANES % 8 > 0 { | |
219 | *self.0.as_mut().last_mut().unwrap() &= u8::MAX >> (8 - LANES % 8); | |
220 | } | |
221 | self | |
222 | } | |
223 | } |