]> git.proxmox.com Git - rustc.git/blame - vendor/hashbrown-0.5.0/src/raw/generic.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / vendor / hashbrown-0.5.0 / src / raw / generic.rs
CommitLineData
e74abb32
XL
1use super::bitmask::BitMask;
2use super::EMPTY;
3use core::{mem, ptr};
4
5// Use the native word size as the group size. Using a 64-bit group size on
6// a 32-bit architecture will just end up being more expensive because
7// shifts and multiplies will need to be emulated.
8#[cfg(any(
9 target_pointer_width = "64",
10 target_arch = "aarch64",
11 target_arch = "x86_64",
12))]
13type GroupWord = u64;
14#[cfg(all(
15 target_pointer_width = "32",
16 not(target_arch = "aarch64"),
17 not(target_arch = "x86_64"),
18))]
19type GroupWord = u32;
20
21pub type BitMaskWord = GroupWord;
22pub const BITMASK_STRIDE: usize = 8;
23// We only care about the highest bit of each byte for the mask.
24#[allow(
25 clippy::cast_possible_truncation,
26 clippy::unnecessary_cast,
27)]
28pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
29
30/// Helper function to replicate a byte across a `GroupWord`.
31#[inline]
32fn repeat(byte: u8) -> GroupWord {
33 let repeat = GroupWord::from(byte);
34 let repeat = repeat | repeat.wrapping_shl(8);
35 let repeat = repeat | repeat.wrapping_shl(16);
36 // This last line is a no-op with a 32-bit GroupWord
37 repeat | repeat.wrapping_shl(32)
38}
39
40/// Abstraction over a group of control bytes which can be scanned in
41/// parallel.
42///
43/// This implementation uses a word-sized integer.
44#[derive(Copy, Clone)]
45pub struct Group(GroupWord);
46
47// We perform all operations in the native endianess, and convert to
48// little-endian just before creating a BitMask. The can potentially
49// enable the compiler to eliminate unnecessary byte swaps if we are
50// only checking whether a BitMask is empty.
51#[allow(clippy::use_self)]
52impl Group {
53 /// Number of bytes in the group.
54 pub const WIDTH: usize = mem::size_of::<Self>();
55
56 /// Returns a full group of empty bytes, suitable for use as the initial
57 /// value for an empty hash table.
58 ///
59 /// This is guaranteed to be aligned to the group size.
60 #[inline]
61 pub fn static_empty() -> &'static [u8] {
62 union AlignedBytes {
63 _align: Group,
64 bytes: [u8; Group::WIDTH],
65 };
66 const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
67 bytes: [EMPTY; Group::WIDTH],
68 };
69 unsafe { &ALIGNED_BYTES.bytes }
70 }
71
72 /// Loads a group of bytes starting at the given address.
73 #[inline]
74 #[allow(clippy::cast_ptr_alignment)] // unaligned load
75 pub unsafe fn load(ptr: *const u8) -> Self {
76 Group(ptr::read_unaligned(ptr as *const _))
77 }
78
79 /// Loads a group of bytes starting at the given address, which must be
80 /// aligned to `mem::align_of::<Group>()`.
81 #[inline]
82 #[allow(clippy::cast_ptr_alignment)]
83 pub unsafe fn load_aligned(ptr: *const u8) -> Self {
84 // FIXME: use align_offset once it stabilizes
85 debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
86 Group(ptr::read(ptr as *const _))
87 }
88
89 /// Stores the group of bytes to the given address, which must be
90 /// aligned to `mem::align_of::<Group>()`.
91 #[inline]
92 #[allow(clippy::cast_ptr_alignment)]
93 pub unsafe fn store_aligned(self, ptr: *mut u8) {
94 // FIXME: use align_offset once it stabilizes
95 debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
96 ptr::write(ptr as *mut _, self.0);
97 }
98
99 /// Returns a `BitMask` indicating all bytes in the group which *may*
100 /// have the given value.
101 ///
102 /// This function may return a false positive in certain cases where
103 /// the byte in the group differs from the searched value only in its
104 /// lowest bit. This is fine because:
105 /// - This never happens for `EMPTY` and `DELETED`, only full entries.
106 /// - The check for key equality will catch these.
107 /// - This only happens if there is at least 1 true match.
108 /// - The chance of this happening is very low (< 1% chance per byte).
109 #[inline]
110 pub fn match_byte(self, byte: u8) -> BitMask {
111 // This algorithm is derived from
112 // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
113 let cmp = self.0 ^ repeat(byte);
114 BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le())
115 }
116
117 /// Returns a `BitMask` indicating all bytes in the group which are
118 /// `EMPTY`.
119 #[inline]
120 pub fn match_empty(self) -> BitMask {
121 // If the high bit is set, then the byte must be either:
122 // 1111_1111 (EMPTY) or 1000_0000 (DELETED).
123 // So we can just check if the top two bits are 1 by ANDing them.
124 BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le())
125 }
126
127 /// Returns a `BitMask` indicating all bytes in the group which are
128 /// `EMPTY` or `DELETED`.
129 #[inline]
130 pub fn match_empty_or_deleted(self) -> BitMask {
131 // A byte is EMPTY or DELETED iff the high bit is set
132 BitMask((self.0 & repeat(0x80)).to_le())
133 }
134
135 /// Returns a `BitMask` indicating all bytes in the group which are full.
136 #[inline]
137 pub fn match_full(&self) -> BitMask {
138 self.match_empty_or_deleted().invert()
139 }
140
141 /// Performs the following transformation on all bytes in the group:
142 /// - `EMPTY => EMPTY`
143 /// - `DELETED => EMPTY`
144 /// - `FULL => DELETED`
145 #[inline]
146 pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
147 // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
148 // and high_bit = 0 (FULL) to 1000_0000
149 //
150 // Here's this logic expanded to concrete values:
151 // let full = 1000_0000 (true) or 0000_0000 (false)
152 // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry)
153 // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry)
154 let full = !self.0 & repeat(0x80);
155 Group(!full + (full >> 7))
156 }
157}