1 use crate::simd
::{LaneCount, Simd, SimdElement, SupportedLaneCount}
;
2 use core
::ops
::{Add, Mul}
;
3 use core
::ops
::{BitAnd, BitOr, BitXor}
;
4 use core
::ops
::{Div, Rem, Sub}
;
5 use core
::ops
::{Shl, Shr}
;
11 impl<I
, T
, const LANES
: usize> core
::ops
::Index
<I
> for Simd
<T
, LANES
>
14 LaneCount
<LANES
>: SupportedLaneCount
,
15 I
: core
::slice
::SliceIndex
<[T
]>,
17 type Output
= I
::Output
;
18 fn index(&self, index
: I
) -> &Self::Output
{
19 &self.as_array()[index
]
23 impl<I
, T
, const LANES
: usize> core
::ops
::IndexMut
<I
> for Simd
<T
, LANES
>
26 LaneCount
<LANES
>: SupportedLaneCount
,
27 I
: core
::slice
::SliceIndex
<[T
]>,
29 fn index_mut(&mut self, index
: I
) -> &mut Self::Output
{
30 &mut self.as_mut_array()[index
]
34 macro_rules
! unsafe_base
{
35 ($lhs
:ident
, $rhs
:ident
, {$simd_call:ident}
, $
($_
:tt
)*) => {
36 unsafe { $crate::simd::intrinsics::$simd_call($lhs, $rhs) }
40 /// SAFETY: This macro should not be used for anything except Shl or Shr, and passed the appropriate shift intrinsic.
41 /// It handles performing a bitand in addition to calling the shift operator, so that the result
42 /// is well-defined: LLVM can return a poison value if you shl, lshr, or ashr if rhs >= <Int>::BITS
43 /// At worst, this will maybe add another instruction and cycle,
44 /// at best, it may open up more optimization opportunities,
45 /// or simply be elided entirely, especially for SIMD ISAs which default to this.
47 // FIXME: Consider implementing this in cg_llvm instead?
48 // cg_clif defaults to this, and scalar MIR shifts also default to wrapping
49 macro_rules
! wrap_bitshift
{
50 ($lhs
:ident
, $rhs
:ident
, {$simd_call:ident}
, $int
:ident
) => {
52 $
crate::simd
::intrinsics
::$
simd_call(
54 $rhs
.bitand(Simd
::splat(<$int
>::BITS
as $int
- 1)),
60 // Division by zero is poison, according to LLVM.
61 // So is dividing the MIN value of a signed integer by -1,
62 // since that would return MAX + 1.
63 // FIXME: Rust allows <SInt>::MIN / -1,
64 // so we should probably figure out how to make that safe.
65 macro_rules
! int_divrem_guard
{
68 { const PANIC_ZERO
: &'
static str = $zero
:literal
;
69 const PANIC_OVERFLOW
: &'
static str = $overflow
:literal
;
73 if $rhs
.lanes_eq(Simd
::splat(0)).any() {
75 } else if <$int
>::MIN
!= 0
76 && ($lhs
.lanes_eq(Simd
::splat(<$int
>::MIN
))
77 // type inference can break here, so cut an SInt to size
78 & $rhs
.lanes_eq(Simd
::splat(-1i64 as _
))).any()
82 unsafe { $crate::simd::intrinsics::$simd_call($lhs, $rhs) }
87 macro_rules
! for_base_types
{
88 ( T
= ($
($scalar
:ident
),*);
89 type Lhs
= Simd
<T
, N
>;
90 type Rhs
= Simd
<T
, N
>;
91 type Output
= $out
:ty
;
93 impl $op
:ident
::$call
:ident
{
94 $macro_impl
:ident $inner
:tt
97 impl<const N
: usize> $op
<Self> for Simd
<$scalar
, N
>
100 LaneCount
<N
>: SupportedLaneCount
,
105 #[must_use = "operator returns a new vector without mutating the inputs"]
106 fn $
call(self, rhs
: Self) -> Self::Output
{
107 $macro_impl
!(self, rhs
, $inner
, $scalar
)
113 // A "TokenTree muncher": takes a set of scalar types `T = {};`
114 // type parameters for the ops it implements, `Op::fn` names,
115 // and a macro that expands into an expr, substituting in an intrinsic.
116 // It passes that to for_base_types, which expands an impl for the types,
117 // using the expanded expr in the function, and recurses with itself.
119 // tl;dr impls a set of ops::{Traits} for a set of types
120 macro_rules
! for_base_ops
{
123 type Lhs
= Simd
<T
, N
>;
124 type Rhs
= Simd
<T
, N
>;
125 type Output
= $out
:ident
;
126 impl $op
:ident
::$call
:ident
132 type Lhs
= Simd
<T
, N
>;
133 type Rhs
= Simd
<T
, N
>;
140 type Lhs
= Simd
<T
, N
>;
141 type Rhs
= Simd
<T
, N
>;
151 // Integers can always accept add, mul, sub, bitand, bitor, and bitxor.
152 // For all of these operations, simd_* intrinsics apply wrapping logic.
154 T
= (i8, i16, i32, i64, isize, u8, u16, u32, u64, usize);
155 type Lhs
= Simd
<T
, N
>;
156 type Rhs
= Simd
<T
, N
>;
160 unsafe_base { simd_add }
164 unsafe_base { simd_mul }
168 unsafe_base { simd_sub }
171 impl BitAnd
::bitand
{
172 unsafe_base { simd_and }
176 unsafe_base { simd_or }
179 impl BitXor
::bitxor
{
180 unsafe_base { simd_xor }
185 const PANIC_ZERO
: &'
static str = "attempt to divide by zero";
186 const PANIC_OVERFLOW
: &'
static str = "attempt to divide with overflow";
193 const PANIC_ZERO
: &'
static str = "attempt to calculate the remainder with a divisor of zero";
194 const PANIC_OVERFLOW
: &'
static str = "attempt to calculate the remainder with overflow";
199 // The only question is how to handle shifts >= <Int>::BITS?
200 // Our current solution uses wrapping logic.
202 wrap_bitshift { simd_shl }
207 // This automatically monomorphizes to lshr or ashr, depending,
208 // so it's fine to use it for both UInts and SInts.
214 // We don't need any special precautions here:
215 // Floats always accept arithmetic ops, but may become NaN.
218 type Lhs
= Simd
<T
, N
>;
219 type Rhs
= Simd
<T
, N
>;
223 unsafe_base { simd_add }
227 unsafe_base { simd_mul }
231 unsafe_base { simd_sub }
235 unsafe_base { simd_div }
239 unsafe_base { simd_rem }