]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/host-utils.h
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.9-20170323' into staging
[mirror_qemu.git] / include / qemu / host-utils.h
CommitLineData
05f778c8
TS
1/*
2 * Utility compute operations used by translated code.
3 *
4 * Copyright (c) 2007 Thiemo Seufer
5 * Copyright (c) 2007 Jocelyn Mayer
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
175de524 25
cb9c377f 26#ifndef HOST_UTILS_H
175de524 27#define HOST_UTILS_H
05f778c8 28
652a4b7e 29#include "qemu/bswap.h"
cebdff77 30
f540166b 31#ifdef CONFIG_INT128
facd2857
BS
32static inline void mulu64(uint64_t *plow, uint64_t *phigh,
33 uint64_t a, uint64_t b)
7a51ad82 34{
f540166b
RH
35 __uint128_t r = (__uint128_t)a * b;
36 *plow = r;
37 *phigh = r >> 64;
7a51ad82 38}
f540166b 39
facd2857
BS
40static inline void muls64(uint64_t *plow, uint64_t *phigh,
41 int64_t a, int64_t b)
7a51ad82 42{
f540166b
RH
43 __int128_t r = (__int128_t)a * b;
44 *plow = r;
45 *phigh = r >> 64;
7a51ad82 46}
98d1eb27 47
49caffe0
PM
48/* compute with 96 bit intermediate result: (a*b)/c */
49static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
50{
51 return (__int128_t)a * b / c;
52}
53
98d1eb27
TM
54static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
55{
56 if (divisor == 0) {
57 return 1;
58 } else {
59 __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
60 __uint128_t result = dividend / divisor;
61 *plow = result;
62 *phigh = dividend % divisor;
63 return result > UINT64_MAX;
64 }
65}
e44259b6
TM
66
67static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
68{
69 if (divisor == 0) {
70 return 1;
71 } else {
72 __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
73 __int128_t result = dividend / divisor;
74 *plow = result;
75 *phigh = dividend % divisor;
76 return result != *plow;
77 }
78}
7a51ad82 79#else
05e1d830 80void muls64(uint64_t *phigh, uint64_t *plow, int64_t a, int64_t b);
7a51ad82 81void mulu64(uint64_t *phigh, uint64_t *plow, uint64_t a, uint64_t b);
98d1eb27 82int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
e44259b6 83int divs128(int64_t *plow, int64_t *phigh, int64_t divisor);
49caffe0
PM
84
85static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
86{
87 union {
88 uint64_t ll;
89 struct {
90#ifdef HOST_WORDS_BIGENDIAN
91 uint32_t high, low;
92#else
93 uint32_t low, high;
94#endif
95 } l;
96 } u, res;
97 uint64_t rl, rh;
98
99 u.ll = a;
100 rl = (uint64_t)u.l.low * (uint64_t)b;
101 rh = (uint64_t)u.l.high * (uint64_t)b;
102 rh += (rl >> 32);
103 res.l.high = rh / c;
104 res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
105 return res.ll;
106}
7a51ad82
JM
107#endif
108
72d81155
RH
109/**
110 * clz32 - count leading zeros in a 32-bit value.
111 * @val: The value to search
112 *
113 * Returns 32 if the value is zero. Note that the GCC builtin is
114 * undefined if the value is zero.
115 */
facd2857 116static inline int clz32(uint32_t val)
05f778c8 117{
bad5b1ec 118#if QEMU_GNUC_PREREQ(3, 4)
72d81155 119 return val ? __builtin_clz(val) : 32;
7d019980 120#else
72d81155 121 /* Binary search for the leading one bit. */
05f778c8
TS
122 int cnt = 0;
123
124 if (!(val & 0xFFFF0000U)) {
125 cnt += 16;
126 val <<= 16;
127 }
128 if (!(val & 0xFF000000U)) {
129 cnt += 8;
130 val <<= 8;
131 }
132 if (!(val & 0xF0000000U)) {
133 cnt += 4;
134 val <<= 4;
135 }
136 if (!(val & 0xC0000000U)) {
137 cnt += 2;
138 val <<= 2;
139 }
140 if (!(val & 0x80000000U)) {
141 cnt++;
142 val <<= 1;
143 }
144 if (!(val & 0x80000000U)) {
145 cnt++;
146 }
147 return cnt;
7d019980 148#endif
05f778c8
TS
149}
150
72d81155
RH
151/**
152 * clo32 - count leading ones in a 32-bit value.
153 * @val: The value to search
154 *
155 * Returns 32 if the value is -1.
156 */
facd2857 157static inline int clo32(uint32_t val)
05f778c8
TS
158{
159 return clz32(~val);
160}
161
72d81155
RH
162/**
163 * clz64 - count leading zeros in a 64-bit value.
164 * @val: The value to search
165 *
166 * Returns 64 if the value is zero. Note that the GCC builtin is
167 * undefined if the value is zero.
168 */
facd2857 169static inline int clz64(uint64_t val)
05f778c8 170{
bad5b1ec 171#if QEMU_GNUC_PREREQ(3, 4)
72d81155 172 return val ? __builtin_clzll(val) : 64;
7d019980 173#else
05f778c8
TS
174 int cnt = 0;
175
7a51ad82 176 if (!(val >> 32)) {
05f778c8 177 cnt += 32;
7a51ad82
JM
178 } else {
179 val >>= 32;
05f778c8 180 }
7a51ad82
JM
181
182 return cnt + clz32(val);
7d019980 183#endif
05f778c8
TS
184}
185
72d81155
RH
186/**
187 * clo64 - count leading ones in a 64-bit value.
188 * @val: The value to search
189 *
190 * Returns 64 if the value is -1.
191 */
facd2857 192static inline int clo64(uint64_t val)
05f778c8
TS
193{
194 return clz64(~val);
195}
b9ef45ff 196
72d81155
RH
197/**
198 * ctz32 - count trailing zeros in a 32-bit value.
199 * @val: The value to search
200 *
201 * Returns 32 if the value is zero. Note that the GCC builtin is
202 * undefined if the value is zero.
203 */
facd2857 204static inline int ctz32(uint32_t val)
b9ef45ff 205{
bad5b1ec 206#if QEMU_GNUC_PREREQ(3, 4)
72d81155 207 return val ? __builtin_ctz(val) : 32;
7d019980 208#else
72d81155 209 /* Binary search for the trailing one bit. */
b9ef45ff
JM
210 int cnt;
211
212 cnt = 0;
213 if (!(val & 0x0000FFFFUL)) {
c8906845 214 cnt += 16;
b9ef45ff 215 val >>= 16;
c8906845 216 }
b9ef45ff 217 if (!(val & 0x000000FFUL)) {
c8906845 218 cnt += 8;
b9ef45ff 219 val >>= 8;
c8906845 220 }
b9ef45ff 221 if (!(val & 0x0000000FUL)) {
c8906845 222 cnt += 4;
b9ef45ff 223 val >>= 4;
c8906845 224 }
b9ef45ff 225 if (!(val & 0x00000003UL)) {
c8906845 226 cnt += 2;
b9ef45ff 227 val >>= 2;
c8906845 228 }
b9ef45ff 229 if (!(val & 0x00000001UL)) {
c8906845 230 cnt++;
b9ef45ff 231 val >>= 1;
c8906845 232 }
b9ef45ff 233 if (!(val & 0x00000001UL)) {
c8906845
AZ
234 cnt++;
235 }
b9ef45ff 236
c8906845 237 return cnt;
7d019980 238#endif
c8906845
AZ
239}
240
72d81155
RH
241/**
242 * cto32 - count trailing ones in a 32-bit value.
243 * @val: The value to search
244 *
245 * Returns 32 if the value is -1.
246 */
facd2857 247static inline int cto32(uint32_t val)
c8906845 248{
b9ef45ff
JM
249 return ctz32(~val);
250}
251
72d81155
RH
252/**
253 * ctz64 - count trailing zeros in a 64-bit value.
254 * @val: The value to search
255 *
256 * Returns 64 if the value is zero. Note that the GCC builtin is
257 * undefined if the value is zero.
258 */
facd2857 259static inline int ctz64(uint64_t val)
b9ef45ff 260{
bad5b1ec 261#if QEMU_GNUC_PREREQ(3, 4)
72d81155 262 return val ? __builtin_ctzll(val) : 64;
7d019980 263#else
b9ef45ff
JM
264 int cnt;
265
266 cnt = 0;
267 if (!((uint32_t)val)) {
268 cnt += 32;
269 val >>= 32;
270 }
271
272 return cnt + ctz32(val);
7d019980 273#endif
b9ef45ff
JM
274}
275
72d81155 276/**
1c884abe 277 * cto64 - count trailing ones in a 64-bit value.
72d81155
RH
278 * @val: The value to search
279 *
280 * Returns 64 if the value is -1.
281 */
facd2857 282static inline int cto64(uint64_t val)
b9ef45ff
JM
283{
284 return ctz64(~val);
285}
286
afd3fe4c
CF
287/**
288 * clrsb32 - count leading redundant sign bits in a 32-bit value.
289 * @val: The value to search
290 *
291 * Returns the number of bits following the sign bit that are equal to it.
292 * No special cases; output range is [0-31].
293 */
294static inline int clrsb32(uint32_t val)
295{
296#if QEMU_GNUC_PREREQ(4, 7)
297 return __builtin_clrsb(val);
298#else
299 return clz32(val ^ ((int32_t)val >> 1)) - 1;
300#endif
301}
302
303/**
304 * clrsb64 - count leading redundant sign bits in a 64-bit value.
305 * @val: The value to search
306 *
307 * Returns the number of bits following the sign bit that are equal to it.
308 * No special cases; output range is [0-63].
309 */
310static inline int clrsb64(uint64_t val)
311{
312#if QEMU_GNUC_PREREQ(4, 7)
313 return __builtin_clrsbll(val);
314#else
315 return clz64(val ^ ((int64_t)val >> 1)) - 1;
316#endif
317}
318
72d81155
RH
319/**
320 * ctpop8 - count the population of one bits in an 8-bit value.
321 * @val: The value to search
322 */
facd2857 323static inline int ctpop8(uint8_t val)
b9ef45ff 324{
72d81155
RH
325#if QEMU_GNUC_PREREQ(3, 4)
326 return __builtin_popcount(val);
327#else
b9ef45ff
JM
328 val = (val & 0x55) + ((val >> 1) & 0x55);
329 val = (val & 0x33) + ((val >> 2) & 0x33);
7bdcecb7 330 val = (val + (val >> 4)) & 0x0f;
b9ef45ff
JM
331
332 return val;
72d81155 333#endif
b9ef45ff
JM
334}
335
72d81155
RH
336/**
337 * ctpop16 - count the population of one bits in a 16-bit value.
338 * @val: The value to search
339 */
facd2857 340static inline int ctpop16(uint16_t val)
b9ef45ff 341{
72d81155
RH
342#if QEMU_GNUC_PREREQ(3, 4)
343 return __builtin_popcount(val);
344#else
b9ef45ff
JM
345 val = (val & 0x5555) + ((val >> 1) & 0x5555);
346 val = (val & 0x3333) + ((val >> 2) & 0x3333);
7bdcecb7
RH
347 val = (val + (val >> 4)) & 0x0f0f;
348 val = (val + (val >> 8)) & 0x00ff;
b9ef45ff
JM
349
350 return val;
72d81155 351#endif
b9ef45ff
JM
352}
353
72d81155
RH
354/**
355 * ctpop32 - count the population of one bits in a 32-bit value.
356 * @val: The value to search
357 */
facd2857 358static inline int ctpop32(uint32_t val)
b9ef45ff 359{
bad5b1ec 360#if QEMU_GNUC_PREREQ(3, 4)
7d019980
AJ
361 return __builtin_popcount(val);
362#else
7bdcecb7
RH
363 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
364 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
365 val = (val + (val >> 4)) & 0x0f0f0f0f;
366 val = (val * 0x01010101) >> 24;
b9ef45ff
JM
367
368 return val;
7d019980 369#endif
b9ef45ff
JM
370}
371
72d81155
RH
372/**
373 * ctpop64 - count the population of one bits in a 64-bit value.
374 * @val: The value to search
375 */
facd2857 376static inline int ctpop64(uint64_t val)
b9ef45ff 377{
bad5b1ec 378#if QEMU_GNUC_PREREQ(3, 4)
7d019980
AJ
379 return __builtin_popcountll(val);
380#else
7bdcecb7
RH
381 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
382 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
383 val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
384 val = (val * 0x0101010101010101ULL) >> 56;
b9ef45ff
JM
385
386 return val;
7d019980 387#endif
3800af9e 388}
cb9c377f 389
652a4b7e
RH
390/**
391 * revbit8 - reverse the bits in an 8-bit value.
392 * @x: The value to modify.
393 */
394static inline uint8_t revbit8(uint8_t x)
395{
396 /* Assign the correct nibble position. */
397 x = ((x & 0xf0) >> 4)
398 | ((x & 0x0f) << 4);
399 /* Assign the correct bit position. */
400 x = ((x & 0x88) >> 3)
401 | ((x & 0x44) >> 1)
402 | ((x & 0x22) << 1)
403 | ((x & 0x11) << 3);
404 return x;
405}
406
407/**
408 * revbit16 - reverse the bits in a 16-bit value.
409 * @x: The value to modify.
410 */
411static inline uint16_t revbit16(uint16_t x)
412{
413 /* Assign the correct byte position. */
414 x = bswap16(x);
415 /* Assign the correct nibble position. */
416 x = ((x & 0xf0f0) >> 4)
417 | ((x & 0x0f0f) << 4);
418 /* Assign the correct bit position. */
419 x = ((x & 0x8888) >> 3)
420 | ((x & 0x4444) >> 1)
421 | ((x & 0x2222) << 1)
422 | ((x & 0x1111) << 3);
423 return x;
424}
425
426/**
427 * revbit32 - reverse the bits in a 32-bit value.
428 * @x: The value to modify.
429 */
430static inline uint32_t revbit32(uint32_t x)
431{
432 /* Assign the correct byte position. */
433 x = bswap32(x);
434 /* Assign the correct nibble position. */
435 x = ((x & 0xf0f0f0f0u) >> 4)
436 | ((x & 0x0f0f0f0fu) << 4);
437 /* Assign the correct bit position. */
438 x = ((x & 0x88888888u) >> 3)
439 | ((x & 0x44444444u) >> 1)
440 | ((x & 0x22222222u) << 1)
441 | ((x & 0x11111111u) << 3);
442 return x;
443}
444
445/**
446 * revbit64 - reverse the bits in a 64-bit value.
447 * @x: The value to modify.
448 */
449static inline uint64_t revbit64(uint64_t x)
450{
451 /* Assign the correct byte position. */
452 x = bswap64(x);
453 /* Assign the correct nibble position. */
454 x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4)
455 | ((x & 0x0f0f0f0f0f0f0f0full) << 4);
456 /* Assign the correct bit position. */
457 x = ((x & 0x8888888888888888ull) >> 3)
458 | ((x & 0x4444444444444444ull) >> 1)
459 | ((x & 0x2222222222222222ull) << 1)
460 | ((x & 0x1111111111111111ull) << 3);
461 return x;
462}
463
01654373
RH
464/* Host type specific sizes of these routines. */
465
466#if ULONG_MAX == UINT32_MAX
467# define clzl clz32
468# define ctzl ctz32
469# define clol clo32
470# define ctol cto32
471# define ctpopl ctpop32
652a4b7e 472# define revbitl revbit32
01654373
RH
473#elif ULONG_MAX == UINT64_MAX
474# define clzl clz64
475# define ctzl ctz64
476# define clol clo64
477# define ctol cto64
478# define ctpopl ctpop64
652a4b7e 479# define revbitl revbit64
01654373
RH
480#else
481# error Unknown sizeof long
482#endif
483
8f1ed5f5
PM
484static inline bool is_power_of_2(uint64_t value)
485{
486 if (!value) {
e52eeb46 487 return false;
8f1ed5f5
PM
488 }
489
490 return !(value & (value - 1));
491}
492
493/* round down to the nearest power of 2*/
494static inline int64_t pow2floor(int64_t value)
495{
496 if (!is_power_of_2(value)) {
497 value = 0x8000000000000000ULL >> clz64(value);
498 }
499 return value;
500}
501
502/* round up to the nearest power of 2 (0 if overflow) */
503static inline uint64_t pow2ceil(uint64_t value)
504{
505 uint8_t nlz = clz64(value);
506
507 if (is_power_of_2(value)) {
508 return value;
509 }
510 if (!nlz) {
511 return 0;
512 }
513 return 1ULL << (64 - nlz);
514}
515
f539fbe3
JRZ
516/**
517 * urshift - 128-bit Unsigned Right Shift.
518 * @plow: in/out - lower 64-bit integer.
519 * @phigh: in/out - higher 64-bit integer.
520 * @shift: in - bytes to shift, between 0 and 127.
521 *
522 * Result is zero-extended and stored in plow/phigh, which are
523 * input/output variables. Shift values outside the range will
524 * be mod to 128. In other words, the caller is responsible to
525 * verify/assert both the shift range and plow/phigh pointers.
526 */
527void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
528
529/**
530 * ulshift - 128-bit Unsigned Left Shift.
531 * @plow: in/out - lower 64-bit integer.
532 * @phigh: in/out - higher 64-bit integer.
533 * @shift: in - bytes to shift, between 0 and 127.
534 * @overflow: out - true if any 1-bit is shifted out.
535 *
536 * Result is zero-extended and stored in plow/phigh, which are
537 * input/output variables. Shift values outside the range will
538 * be mod to 128. In other words, the caller is responsible to
539 * verify/assert both the shift range and plow/phigh pointers.
540 */
541void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
542
cb9c377f 543#endif