]>
Commit | Line | Data |
---|---|---|
f20569fa XL |
1 | #[cfg(not(feature = "runtime-dispatch-simd"))] |
2 | use core::{mem, ptr, usize}; | |
3 | #[cfg(feature = "runtime-dispatch-simd")] | |
4 | use std::{mem, ptr, usize}; | |
5 | ||
6 | fn splat(byte: u8) -> usize { | |
7 | let lo = usize::MAX / 0xFF; | |
8 | lo * byte as usize | |
9 | } | |
10 | ||
11 | unsafe fn usize_load_unchecked(bytes: &[u8], offset: usize) -> usize { | |
12 | let mut output = 0; | |
13 | ptr::copy_nonoverlapping( | |
14 | bytes.as_ptr().offset(offset as isize), | |
15 | &mut output as *mut usize as *mut u8, | |
16 | mem::size_of::<usize>() | |
17 | ); | |
18 | output | |
19 | } | |
20 | ||
21 | fn bytewise_equal(lhs: usize, rhs: usize) -> usize { | |
22 | let lo = usize::MAX / 0xFF; | |
23 | let hi = lo << 7; | |
24 | ||
25 | let x = lhs ^ rhs; | |
26 | !((((x & !hi) + !hi) | x) >> 7) & lo | |
27 | } | |
28 | ||
29 | fn sum_usize(values: usize) -> usize { | |
30 | let every_other_byte_lo = usize::MAX / 0xFFFF; | |
31 | let every_other_byte = every_other_byte_lo * 0xFF; | |
32 | ||
33 | // Pairwise reduction to avoid overflow on next step. | |
34 | let pair_sum: usize = (values & every_other_byte) + ((values >> 8) & every_other_byte); | |
35 | ||
36 | // Multiplication results in top two bytes holding sum. | |
37 | pair_sum.wrapping_mul(every_other_byte_lo) >> ((mem::size_of::<usize>() - 2) * 8) | |
38 | } | |
39 | ||
40 | fn is_leading_utf8_byte(values: usize) -> usize { | |
41 | // a leading UTF-8 byte is one which does not start with the bits 10. | |
42 | ((!values >> 7) | (values >> 6)) & splat(1) | |
43 | } | |
44 | ||
45 | pub fn chunk_count(haystack: &[u8], needle: u8) -> usize { | |
46 | let chunksize = mem::size_of::<usize>(); | |
47 | assert!(haystack.len() >= chunksize); | |
48 | ||
49 | unsafe { | |
50 | let mut offset = 0; | |
51 | let mut count = 0; | |
52 | ||
53 | let needles = splat(needle); | |
54 | ||
55 | // 2040 | |
56 | while haystack.len() >= offset + chunksize * 255 { | |
57 | let mut counts = 0; | |
58 | for _ in 0..255 { | |
59 | counts += bytewise_equal(usize_load_unchecked(haystack, offset), needles); | |
60 | offset += chunksize; | |
61 | } | |
62 | count += sum_usize(counts); | |
63 | } | |
64 | ||
65 | // 8 | |
66 | let mut counts = 0; | |
67 | for i in 0..(haystack.len() - offset) / chunksize { | |
68 | counts += bytewise_equal(usize_load_unchecked(haystack, offset + i * chunksize), needles); | |
69 | } | |
70 | if haystack.len() % 8 != 0 { | |
71 | let mask = usize::from_le(!(!0 >> ((haystack.len() % chunksize) * 8))); | |
72 | counts += bytewise_equal(usize_load_unchecked(haystack, haystack.len() - chunksize), needles) & mask; | |
73 | } | |
74 | count += sum_usize(counts); | |
75 | ||
76 | count | |
77 | } | |
78 | } | |
79 | ||
80 | pub fn chunk_num_chars(utf8_chars: &[u8]) -> usize { | |
81 | let chunksize = mem::size_of::<usize>(); | |
82 | assert!(utf8_chars.len() >= chunksize); | |
83 | ||
84 | unsafe { | |
85 | let mut offset = 0; | |
86 | let mut count = 0; | |
87 | ||
88 | // 2040 | |
89 | while utf8_chars.len() >= offset + chunksize * 255 { | |
90 | let mut counts = 0; | |
91 | for _ in 0..255 { | |
92 | counts += is_leading_utf8_byte(usize_load_unchecked(utf8_chars, offset)); | |
93 | offset += chunksize; | |
94 | } | |
95 | count += sum_usize(counts); | |
96 | } | |
97 | ||
98 | // 8 | |
99 | let mut counts = 0; | |
100 | for i in 0..(utf8_chars.len() - offset) / chunksize { | |
101 | counts += is_leading_utf8_byte(usize_load_unchecked(utf8_chars, offset + i * chunksize)); | |
102 | } | |
103 | if utf8_chars.len() % 8 != 0 { | |
104 | let mask = usize::from_le(!(!0 >> ((utf8_chars.len() % chunksize) * 8))); | |
105 | counts += is_leading_utf8_byte(usize_load_unchecked(utf8_chars, utf8_chars.len() - chunksize)) & mask; | |
106 | } | |
107 | count += sum_usize(counts); | |
108 | ||
109 | count | |
110 | } | |
111 | } |