]>
Commit | Line | Data |
---|---|---|
88ca8e80 RH |
1 | /* |
2 | * Simple C functions to supplement the C library | |
3 | * | |
4 | * Copyright (c) 2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | #include "qemu/osdep.h" | |
25 | #include "qemu-common.h" | |
26 | #include "qemu/cutils.h" | |
5e33a872 | 27 | #include "qemu/bswap.h" |
88ca8e80 | 28 | |
5e33a872 RH |
29 | static bool |
30 | buffer_zero_int(const void *buf, size_t len) | |
31 | { | |
32 | if (unlikely(len < 8)) { | |
33 | /* For a very small buffer, simply accumulate all the bytes. */ | |
34 | const unsigned char *p = buf; | |
35 | const unsigned char *e = buf + len; | |
36 | unsigned char t = 0; | |
37 | ||
38 | do { | |
39 | t |= *p++; | |
40 | } while (p < e); | |
41 | ||
42 | return t == 0; | |
43 | } else { | |
44 | /* Otherwise, use the unaligned memory access functions to | |
45 | handle the beginning and end of the buffer, with a couple | |
46 | of loops handling the middle aligned section. */ | |
47 | uint64_t t = ldq_he_p(buf); | |
48 | const uint64_t *p = (uint64_t *)(((uintptr_t)buf + 8) & -8); | |
49 | const uint64_t *e = (uint64_t *)(((uintptr_t)buf + len) & -8); | |
50 | ||
51 | for (; p + 8 <= e; p += 8) { | |
52 | __builtin_prefetch(p + 8); | |
53 | if (t) { | |
54 | return false; | |
55 | } | |
56 | t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7]; | |
57 | } | |
58 | while (p < e) { | |
59 | t |= *p++; | |
60 | } | |
61 | t |= ldq_he_p(buf + len - 8); | |
62 | ||
63 | return t == 0; | |
64 | } | |
65 | } | |
66 | ||
d9911d14 | 67 | #if defined(CONFIG_AVX2_OPT) || defined(__SSE2__) |
5e33a872 RH |
68 | /* Do not use push_options pragmas unnecessarily, because clang |
69 | * does not support them. | |
70 | */ | |
d9911d14 | 71 | #ifdef CONFIG_AVX2_OPT |
5e33a872 RH |
72 | #pragma GCC push_options |
73 | #pragma GCC target("sse2") | |
74 | #endif | |
75 | #include <emmintrin.h> | |
d9911d14 RH |
76 | |
77 | /* Note that each of these vectorized functions require len >= 64. */ | |
78 | ||
79 | static bool | |
80 | buffer_zero_sse2(const void *buf, size_t len) | |
81 | { | |
82 | __m128i t = _mm_loadu_si128(buf); | |
83 | __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16); | |
84 | __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16); | |
85 | __m128i zero = _mm_setzero_si128(); | |
86 | ||
87 | /* Loop over 16-byte aligned blocks of 64. */ | |
88 | while (likely(p <= e)) { | |
89 | __builtin_prefetch(p); | |
90 | t = _mm_cmpeq_epi8(t, zero); | |
91 | if (unlikely(_mm_movemask_epi8(t) != 0xFFFF)) { | |
92 | return false; | |
93 | } | |
94 | t = p[-4] | p[-3] | p[-2] | p[-1]; | |
95 | p += 4; | |
96 | } | |
97 | ||
98 | /* Finish the aligned tail. */ | |
99 | t |= e[-3]; | |
100 | t |= e[-2]; | |
101 | t |= e[-1]; | |
102 | ||
103 | /* Finish the unaligned tail. */ | |
104 | t |= _mm_loadu_si128(buf + len - 16); | |
105 | ||
106 | return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF; | |
107 | } | |
108 | #ifdef CONFIG_AVX2_OPT | |
5e33a872 RH |
109 | #pragma GCC pop_options |
110 | #endif | |
88ca8e80 | 111 | |
5e33a872 | 112 | #ifdef CONFIG_AVX2_OPT |
d9911d14 RH |
113 | /* Note that due to restrictions/bugs wrt __builtin functions in gcc <= 4.8, |
114 | * the includes have to be within the corresponding push_options region, and | |
115 | * therefore the regions themselves have to be ordered with increasing ISA. | |
116 | */ | |
86444f08 PB |
117 | #pragma GCC push_options |
118 | #pragma GCC target("sse4") | |
119 | #include <smmintrin.h> | |
86444f08 | 120 | |
d9911d14 RH |
121 | static bool |
122 | buffer_zero_sse4(const void *buf, size_t len) | |
123 | { | |
124 | __m128i t = _mm_loadu_si128(buf); | |
125 | __m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16); | |
126 | __m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16); | |
127 | ||
128 | /* Loop over 16-byte aligned blocks of 64. */ | |
129 | while (likely(p <= e)) { | |
130 | __builtin_prefetch(p); | |
131 | if (unlikely(!_mm_testz_si128(t, t))) { | |
132 | return false; | |
133 | } | |
134 | t = p[-4] | p[-3] | p[-2] | p[-1]; | |
135 | p += 4; | |
136 | } | |
137 | ||
138 | /* Finish the aligned tail. */ | |
139 | t |= e[-3]; | |
140 | t |= e[-2]; | |
141 | t |= e[-1]; | |
142 | ||
143 | /* Finish the unaligned tail. */ | |
144 | t |= _mm_loadu_si128(buf + len - 16); | |
145 | ||
146 | return _mm_testz_si128(t, t); | |
147 | } | |
148 | ||
149 | #pragma GCC pop_options | |
88ca8e80 RH |
150 | #pragma GCC push_options |
151 | #pragma GCC target("avx2") | |
88ca8e80 | 152 | #include <immintrin.h> |
d9911d14 RH |
153 | |
154 | static bool | |
155 | buffer_zero_avx2(const void *buf, size_t len) | |
156 | { | |
157 | /* Begin with an unaligned head of 32 bytes. */ | |
158 | __m256i t = _mm256_loadu_si256(buf); | |
159 | __m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32); | |
160 | __m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32); | |
161 | ||
162 | if (likely(p <= e)) { | |
163 | /* Loop over 32-byte aligned blocks of 128. */ | |
164 | do { | |
165 | __builtin_prefetch(p); | |
166 | if (unlikely(!_mm256_testz_si256(t, t))) { | |
167 | return false; | |
168 | } | |
169 | t = p[-4] | p[-3] | p[-2] | p[-1]; | |
170 | p += 4; | |
171 | } while (p <= e); | |
172 | } else { | |
173 | t |= _mm256_loadu_si256(buf + 32); | |
174 | if (len <= 128) { | |
175 | goto last2; | |
176 | } | |
177 | } | |
178 | ||
179 | /* Finish the last block of 128 unaligned. */ | |
180 | t |= _mm256_loadu_si256(buf + len - 4 * 32); | |
181 | t |= _mm256_loadu_si256(buf + len - 3 * 32); | |
182 | last2: | |
183 | t |= _mm256_loadu_si256(buf + len - 2 * 32); | |
184 | t |= _mm256_loadu_si256(buf + len - 1 * 32); | |
185 | ||
186 | return _mm256_testz_si256(t, t); | |
187 | } | |
5e33a872 | 188 | #pragma GCC pop_options |
d9911d14 RH |
189 | #endif /* CONFIG_AVX2_OPT */ |
190 | ||
191 | /* Note that for test_buffer_is_zero_next_accel, the most preferred | |
192 | * ISA must have the least significant bit. | |
193 | */ | |
194 | #define CACHE_AVX2 1 | |
195 | #define CACHE_SSE4 2 | |
196 | #define CACHE_SSE2 4 | |
197 | ||
198 | /* Make sure that these variables are appropriately initialized when | |
199 | * SSE2 is enabled on the compiler command-line, but the compiler is | |
5dd89908 | 200 | * too old to support CONFIG_AVX2_OPT. |
d9911d14 RH |
201 | */ |
202 | #ifdef CONFIG_AVX2_OPT | |
203 | # define INIT_CACHE 0 | |
204 | # define INIT_ACCEL buffer_zero_int | |
205 | #else | |
206 | # ifndef __SSE2__ | |
207 | # error "ISA selection confusion" | |
208 | # endif | |
209 | # define INIT_CACHE CACHE_SSE2 | |
210 | # define INIT_ACCEL buffer_zero_sse2 | |
5e33a872 | 211 | #endif |
88ca8e80 | 212 | |
d9911d14 RH |
213 | static unsigned cpuid_cache = INIT_CACHE; |
214 | static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL; | |
88ca8e80 | 215 | |
d9911d14 RH |
216 | static void init_accel(unsigned cache) |
217 | { | |
218 | bool (*fn)(const void *, size_t) = buffer_zero_int; | |
219 | if (cache & CACHE_SSE2) { | |
220 | fn = buffer_zero_sse2; | |
221 | } | |
222 | #ifdef CONFIG_AVX2_OPT | |
223 | if (cache & CACHE_SSE4) { | |
224 | fn = buffer_zero_sse4; | |
225 | } | |
226 | if (cache & CACHE_AVX2) { | |
227 | fn = buffer_zero_avx2; | |
228 | } | |
229 | #endif | |
230 | buffer_accel = fn; | |
231 | } | |
88ca8e80 | 232 | |
d9911d14 | 233 | #ifdef CONFIG_AVX2_OPT |
5dd89908 RH |
234 | #include "qemu/cpuid.h" |
235 | ||
5e33a872 | 236 | static void __attribute__((constructor)) init_cpuid_cache(void) |
88ca8e80 | 237 | { |
5e33a872 RH |
238 | int max = __get_cpuid_max(0, NULL); |
239 | int a, b, c, d; | |
240 | unsigned cache = 0; | |
88ca8e80 | 241 | |
5e33a872 RH |
242 | if (max >= 1) { |
243 | __cpuid(1, a, b, c, d); | |
244 | if (d & bit_SSE2) { | |
245 | cache |= CACHE_SSE2; | |
246 | } | |
5e33a872 RH |
247 | if (c & bit_SSE4_1) { |
248 | cache |= CACHE_SSE4; | |
88ca8e80 | 249 | } |
88ca8e80 | 250 | |
5e33a872 | 251 | /* We must check that AVX is not just available, but usable. */ |
d9911d14 RH |
252 | if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { |
253 | int bv; | |
254 | __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); | |
255 | __cpuid_count(7, 0, a, b, c, d); | |
256 | if ((bv & 6) == 6 && (b & bit_AVX2)) { | |
257 | cache |= CACHE_AVX2; | |
5e33a872 | 258 | } |
88ca8e80 RH |
259 | } |
260 | } | |
5e33a872 | 261 | cpuid_cache = cache; |
d9911d14 | 262 | init_accel(cache); |
88ca8e80 | 263 | } |
d9911d14 | 264 | #endif /* CONFIG_AVX2_OPT */ |
88ca8e80 | 265 | |
efad6682 RH |
266 | bool test_buffer_is_zero_next_accel(void) |
267 | { | |
268 | /* If no bits set, we just tested buffer_zero_int, and there | |
269 | are no more acceleration options to test. */ | |
270 | if (cpuid_cache == 0) { | |
271 | return false; | |
272 | } | |
273 | /* Disable the accelerator we used before and select a new one. */ | |
274 | cpuid_cache &= cpuid_cache - 1; | |
d9911d14 | 275 | init_accel(cpuid_cache); |
efad6682 RH |
276 | return true; |
277 | } | |
278 | ||
5e33a872 | 279 | static bool select_accel_fn(const void *buf, size_t len) |
88ca8e80 | 280 | { |
d9911d14 RH |
281 | if (likely(len >= 64)) { |
282 | return buffer_accel(buf, len); | |
5e33a872 RH |
283 | } |
284 | return buffer_zero_int(buf, len); | |
88ca8e80 RH |
285 | } |
286 | ||
5e33a872 RH |
287 | #else |
288 | #define select_accel_fn buffer_zero_int | |
efad6682 RH |
289 | bool test_buffer_is_zero_next_accel(void) |
290 | { | |
291 | return false; | |
292 | } | |
293 | #endif | |
294 | ||
88ca8e80 RH |
295 | /* |
296 | * Checks if a buffer is all zeroes | |
88ca8e80 RH |
297 | */ |
298 | bool buffer_is_zero(const void *buf, size_t len) | |
299 | { | |
5e33a872 RH |
300 | if (unlikely(len == 0)) { |
301 | return true; | |
88ca8e80 RH |
302 | } |
303 | ||
083d012a RH |
304 | /* Fetch the beginning of the buffer while we select the accelerator. */ |
305 | __builtin_prefetch(buf); | |
306 | ||
5e33a872 RH |
307 | /* Use an optimized zero check if possible. Note that this also |
308 | includes a check for an unrolled loop over 64-bit integers. */ | |
309 | return select_accel_fn(buf, len); | |
88ca8e80 | 310 | } |