]> git.proxmox.com Git - qemu.git/blob - include/qemu/bswap.h
bswap: Add host endian unaligned access functions
[qemu.git] / include / qemu / bswap.h
1 #ifndef BSWAP_H
2 #define BSWAP_H
3
4 #include "config-host.h"
5
6 #include <inttypes.h>
7 #include "fpu/softfloat.h"
8
9 #ifdef CONFIG_MACHINE_BSWAP_H
10 # include <sys/endian.h>
11 # include <sys/types.h>
12 # include <machine/bswap.h>
13 #elif defined(CONFIG_BYTESWAP_H)
14 # include <byteswap.h>
15
16 static inline uint16_t bswap16(uint16_t x)
17 {
18 return bswap_16(x);
19 }
20
21 static inline uint32_t bswap32(uint32_t x)
22 {
23 return bswap_32(x);
24 }
25
26 static inline uint64_t bswap64(uint64_t x)
27 {
28 return bswap_64(x);
29 }
30 # else
31 static inline uint16_t bswap16(uint16_t x)
32 {
33 return (((x & 0x00ff) << 8) |
34 ((x & 0xff00) >> 8));
35 }
36
37 static inline uint32_t bswap32(uint32_t x)
38 {
39 return (((x & 0x000000ffU) << 24) |
40 ((x & 0x0000ff00U) << 8) |
41 ((x & 0x00ff0000U) >> 8) |
42 ((x & 0xff000000U) >> 24));
43 }
44
45 static inline uint64_t bswap64(uint64_t x)
46 {
47 return (((x & 0x00000000000000ffULL) << 56) |
48 ((x & 0x000000000000ff00ULL) << 40) |
49 ((x & 0x0000000000ff0000ULL) << 24) |
50 ((x & 0x00000000ff000000ULL) << 8) |
51 ((x & 0x000000ff00000000ULL) >> 8) |
52 ((x & 0x0000ff0000000000ULL) >> 24) |
53 ((x & 0x00ff000000000000ULL) >> 40) |
54 ((x & 0xff00000000000000ULL) >> 56));
55 }
56 #endif /* ! CONFIG_MACHINE_BSWAP_H */
57
58 static inline void bswap16s(uint16_t *s)
59 {
60 *s = bswap16(*s);
61 }
62
63 static inline void bswap32s(uint32_t *s)
64 {
65 *s = bswap32(*s);
66 }
67
68 static inline void bswap64s(uint64_t *s)
69 {
70 *s = bswap64(*s);
71 }
72
73 #if defined(HOST_WORDS_BIGENDIAN)
74 #define be_bswap(v, size) (v)
75 #define le_bswap(v, size) bswap ## size(v)
76 #define be_bswaps(v, size)
77 #define le_bswaps(p, size) *p = bswap ## size(*p);
78 #else
79 #define le_bswap(v, size) (v)
80 #define be_bswap(v, size) bswap ## size(v)
81 #define le_bswaps(v, size)
82 #define be_bswaps(p, size) *p = bswap ## size(*p);
83 #endif
84
85 #define CPU_CONVERT(endian, size, type)\
86 static inline type endian ## size ## _to_cpu(type v)\
87 {\
88 return endian ## _bswap(v, size);\
89 }\
90 \
91 static inline type cpu_to_ ## endian ## size(type v)\
92 {\
93 return endian ## _bswap(v, size);\
94 }\
95 \
96 static inline void endian ## size ## _to_cpus(type *p)\
97 {\
98 endian ## _bswaps(p, size)\
99 }\
100 \
101 static inline void cpu_to_ ## endian ## size ## s(type *p)\
102 {\
103 endian ## _bswaps(p, size)\
104 }\
105 \
106 static inline type endian ## size ## _to_cpup(const type *p)\
107 {\
108 return endian ## size ## _to_cpu(*p);\
109 }\
110 \
111 static inline void cpu_to_ ## endian ## size ## w(type *p, type v)\
112 {\
113 *p = cpu_to_ ## endian ## size(v);\
114 }
115
116 CPU_CONVERT(be, 16, uint16_t)
117 CPU_CONVERT(be, 32, uint32_t)
118 CPU_CONVERT(be, 64, uint64_t)
119
120 CPU_CONVERT(le, 16, uint16_t)
121 CPU_CONVERT(le, 32, uint32_t)
122 CPU_CONVERT(le, 64, uint64_t)
123
124 /* unaligned versions (optimized for frequent unaligned accesses)*/
125
126 #if defined(__i386__) || defined(_ARCH_PPC)
127
128 #define cpu_to_le16wu(p, v) cpu_to_le16w(p, v)
129 #define cpu_to_le32wu(p, v) cpu_to_le32w(p, v)
130 #define le16_to_cpupu(p) le16_to_cpup(p)
131 #define le32_to_cpupu(p) le32_to_cpup(p)
132 #define be32_to_cpupu(p) be32_to_cpup(p)
133
134 #define cpu_to_be16wu(p, v) cpu_to_be16w(p, v)
135 #define cpu_to_be32wu(p, v) cpu_to_be32w(p, v)
136 #define cpu_to_be64wu(p, v) cpu_to_be64w(p, v)
137
138 #else
139
140 static inline void cpu_to_le16wu(uint16_t *p, uint16_t v)
141 {
142 uint8_t *p1 = (uint8_t *)p;
143
144 p1[0] = v & 0xff;
145 p1[1] = v >> 8;
146 }
147
148 static inline void cpu_to_le32wu(uint32_t *p, uint32_t v)
149 {
150 uint8_t *p1 = (uint8_t *)p;
151
152 p1[0] = v & 0xff;
153 p1[1] = v >> 8;
154 p1[2] = v >> 16;
155 p1[3] = v >> 24;
156 }
157
158 static inline uint16_t le16_to_cpupu(const uint16_t *p)
159 {
160 const uint8_t *p1 = (const uint8_t *)p;
161 return p1[0] | (p1[1] << 8);
162 }
163
164 static inline uint32_t le32_to_cpupu(const uint32_t *p)
165 {
166 const uint8_t *p1 = (const uint8_t *)p;
167 return p1[0] | (p1[1] << 8) | (p1[2] << 16) | (p1[3] << 24);
168 }
169
170 static inline uint32_t be32_to_cpupu(const uint32_t *p)
171 {
172 const uint8_t *p1 = (const uint8_t *)p;
173 return p1[3] | (p1[2] << 8) | (p1[1] << 16) | (p1[0] << 24);
174 }
175
176 static inline void cpu_to_be16wu(uint16_t *p, uint16_t v)
177 {
178 uint8_t *p1 = (uint8_t *)p;
179
180 p1[0] = v >> 8;
181 p1[1] = v & 0xff;
182 }
183
184 static inline void cpu_to_be32wu(uint32_t *p, uint32_t v)
185 {
186 uint8_t *p1 = (uint8_t *)p;
187
188 p1[0] = v >> 24;
189 p1[1] = v >> 16;
190 p1[2] = v >> 8;
191 p1[3] = v & 0xff;
192 }
193
194 static inline void cpu_to_be64wu(uint64_t *p, uint64_t v)
195 {
196 uint8_t *p1 = (uint8_t *)p;
197
198 p1[0] = v >> 56;
199 p1[1] = v >> 48;
200 p1[2] = v >> 40;
201 p1[3] = v >> 32;
202 p1[4] = v >> 24;
203 p1[5] = v >> 16;
204 p1[6] = v >> 8;
205 p1[7] = v & 0xff;
206 }
207
208 #endif
209
210 #ifdef HOST_WORDS_BIGENDIAN
211 #define cpu_to_32wu cpu_to_be32wu
212 #define leul_to_cpu(v) glue(glue(le,HOST_LONG_BITS),_to_cpu)(v)
213 #else
214 #define cpu_to_32wu cpu_to_le32wu
215 #define leul_to_cpu(v) (v)
216 #endif
217
218 #undef le_bswap
219 #undef be_bswap
220 #undef le_bswaps
221 #undef be_bswaps
222
223 /* len must be one of 1, 2, 4 */
224 static inline uint32_t qemu_bswap_len(uint32_t value, int len)
225 {
226 return bswap32(value) >> (32 - 8 * len);
227 }
228
229 /* Unions for reinterpreting between floats and integers. */
230
231 typedef union {
232 float32 f;
233 uint32_t l;
234 } CPU_FloatU;
235
236 typedef union {
237 float64 d;
238 #if defined(HOST_WORDS_BIGENDIAN)
239 struct {
240 uint32_t upper;
241 uint32_t lower;
242 } l;
243 #else
244 struct {
245 uint32_t lower;
246 uint32_t upper;
247 } l;
248 #endif
249 uint64_t ll;
250 } CPU_DoubleU;
251
252 typedef union {
253 floatx80 d;
254 struct {
255 uint64_t lower;
256 uint16_t upper;
257 } l;
258 } CPU_LDoubleU;
259
260 typedef union {
261 float128 q;
262 #if defined(HOST_WORDS_BIGENDIAN)
263 struct {
264 uint32_t upmost;
265 uint32_t upper;
266 uint32_t lower;
267 uint32_t lowest;
268 } l;
269 struct {
270 uint64_t upper;
271 uint64_t lower;
272 } ll;
273 #else
274 struct {
275 uint32_t lowest;
276 uint32_t lower;
277 uint32_t upper;
278 uint32_t upmost;
279 } l;
280 struct {
281 uint64_t lower;
282 uint64_t upper;
283 } ll;
284 #endif
285 } CPU_QuadU;
286
287 /* unaligned/endian-independent pointer access */
288
289 /*
290 * the generic syntax is:
291 *
292 * load: ld{type}{sign}{size}{endian}_p(ptr)
293 *
294 * store: st{type}{size}{endian}_p(ptr, val)
295 *
296 * Note there are small differences with the softmmu access API!
297 *
298 * type is:
299 * (empty): integer access
300 * f : float access
301 *
302 * sign is:
303 * (empty): for floats or 32 bit size
304 * u : unsigned
305 * s : signed
306 *
307 * size is:
308 * b: 8 bits
309 * w: 16 bits
310 * l: 32 bits
311 * q: 64 bits
312 *
313 * endian is:
314 * (empty): host endian
315 * be : big endian
316 * le : little endian
317 */
318 static inline int ldub_p(const void *ptr)
319 {
320 return *(uint8_t *)ptr;
321 }
322
323 static inline int ldsb_p(const void *ptr)
324 {
325 return *(int8_t *)ptr;
326 }
327
328 static inline void stb_p(void *ptr, int v)
329 {
330 *(uint8_t *)ptr = v;
331 }
332
333 /* Any compiler worth its salt will turn these memcpy into native unaligned
334 operations. Thus we don't need to play games with packed attributes, or
335 inline byte-by-byte stores. */
336
337 static inline int lduw_p(const void *ptr)
338 {
339 uint16_t r;
340 memcpy(&r, ptr, sizeof(r));
341 return r;
342 }
343
344 static inline int ldsw_p(const void *ptr)
345 {
346 int16_t r;
347 memcpy(&r, ptr, sizeof(r));
348 return r;
349 }
350
351 static inline void stw_p(void *ptr, uint16_t v)
352 {
353 memcpy(ptr, &v, sizeof(v));
354 }
355
356 static inline int ldl_p(const void *ptr)
357 {
358 int32_t r;
359 memcpy(&r, ptr, sizeof(r));
360 return r;
361 }
362
363 static inline void stl_p(void *ptr, uint32_t v)
364 {
365 memcpy(ptr, &v, sizeof(v));
366 }
367
368 static inline uint64_t ldq_p(const void *ptr)
369 {
370 uint64_t r;
371 memcpy(&r, ptr, sizeof(r));
372 return r;
373 }
374
375 static inline void stq_p(void *ptr, uint64_t v)
376 {
377 memcpy(ptr, &v, sizeof(v));
378 }
379
380 /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
381 kernel handles unaligned load/stores may give better results, but
382 it is a system wide setting : bad */
383 #if defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
384
385 /* conservative code for little endian unaligned accesses */
386 static inline int lduw_le_p(const void *ptr)
387 {
388 #ifdef _ARCH_PPC
389 int val;
390 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
391 return val;
392 #else
393 const uint8_t *p = ptr;
394 return p[0] | (p[1] << 8);
395 #endif
396 }
397
398 static inline int ldsw_le_p(const void *ptr)
399 {
400 #ifdef _ARCH_PPC
401 int val;
402 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
403 return (int16_t)val;
404 #else
405 const uint8_t *p = ptr;
406 return (int16_t)(p[0] | (p[1] << 8));
407 #endif
408 }
409
410 static inline int ldl_le_p(const void *ptr)
411 {
412 #ifdef _ARCH_PPC
413 int val;
414 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
415 return val;
416 #else
417 const uint8_t *p = ptr;
418 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
419 #endif
420 }
421
422 static inline uint64_t ldq_le_p(const void *ptr)
423 {
424 const uint8_t *p = ptr;
425 uint32_t v1, v2;
426 v1 = ldl_le_p(p);
427 v2 = ldl_le_p(p + 4);
428 return v1 | ((uint64_t)v2 << 32);
429 }
430
431 static inline void stw_le_p(void *ptr, int v)
432 {
433 #ifdef _ARCH_PPC
434 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
435 #else
436 uint8_t *p = ptr;
437 p[0] = v;
438 p[1] = v >> 8;
439 #endif
440 }
441
442 static inline void stl_le_p(void *ptr, int v)
443 {
444 #ifdef _ARCH_PPC
445 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
446 #else
447 uint8_t *p = ptr;
448 p[0] = v;
449 p[1] = v >> 8;
450 p[2] = v >> 16;
451 p[3] = v >> 24;
452 #endif
453 }
454
455 static inline void stq_le_p(void *ptr, uint64_t v)
456 {
457 uint8_t *p = ptr;
458 stl_le_p(p, (uint32_t)v);
459 stl_le_p(p + 4, v >> 32);
460 }
461
462 /* float access */
463
464 static inline float32 ldfl_le_p(const void *ptr)
465 {
466 union {
467 float32 f;
468 uint32_t i;
469 } u;
470 u.i = ldl_le_p(ptr);
471 return u.f;
472 }
473
474 static inline void stfl_le_p(void *ptr, float32 v)
475 {
476 union {
477 float32 f;
478 uint32_t i;
479 } u;
480 u.f = v;
481 stl_le_p(ptr, u.i);
482 }
483
484 static inline float64 ldfq_le_p(const void *ptr)
485 {
486 CPU_DoubleU u;
487 u.l.lower = ldl_le_p(ptr);
488 u.l.upper = ldl_le_p(ptr + 4);
489 return u.d;
490 }
491
492 static inline void stfq_le_p(void *ptr, float64 v)
493 {
494 CPU_DoubleU u;
495 u.d = v;
496 stl_le_p(ptr, u.l.lower);
497 stl_le_p(ptr + 4, u.l.upper);
498 }
499
500 #else
501
502 static inline int lduw_le_p(const void *ptr)
503 {
504 return *(uint16_t *)ptr;
505 }
506
507 static inline int ldsw_le_p(const void *ptr)
508 {
509 return *(int16_t *)ptr;
510 }
511
512 static inline int ldl_le_p(const void *ptr)
513 {
514 return *(uint32_t *)ptr;
515 }
516
517 static inline uint64_t ldq_le_p(const void *ptr)
518 {
519 return *(uint64_t *)ptr;
520 }
521
522 static inline void stw_le_p(void *ptr, int v)
523 {
524 *(uint16_t *)ptr = v;
525 }
526
527 static inline void stl_le_p(void *ptr, int v)
528 {
529 *(uint32_t *)ptr = v;
530 }
531
532 static inline void stq_le_p(void *ptr, uint64_t v)
533 {
534 *(uint64_t *)ptr = v;
535 }
536
537 /* float access */
538
539 static inline float32 ldfl_le_p(const void *ptr)
540 {
541 return *(float32 *)ptr;
542 }
543
544 static inline float64 ldfq_le_p(const void *ptr)
545 {
546 return *(float64 *)ptr;
547 }
548
549 static inline void stfl_le_p(void *ptr, float32 v)
550 {
551 *(float32 *)ptr = v;
552 }
553
554 static inline void stfq_le_p(void *ptr, float64 v)
555 {
556 *(float64 *)ptr = v;
557 }
558 #endif
559
560 #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
561
562 static inline int lduw_be_p(const void *ptr)
563 {
564 #if defined(__i386__)
565 int val;
566 asm volatile ("movzwl %1, %0\n"
567 "xchgb %b0, %h0\n"
568 : "=q" (val)
569 : "m" (*(uint16_t *)ptr));
570 return val;
571 #else
572 const uint8_t *b = ptr;
573 return ((b[0] << 8) | b[1]);
574 #endif
575 }
576
577 static inline int ldsw_be_p(const void *ptr)
578 {
579 #if defined(__i386__)
580 int val;
581 asm volatile ("movzwl %1, %0\n"
582 "xchgb %b0, %h0\n"
583 : "=q" (val)
584 : "m" (*(uint16_t *)ptr));
585 return (int16_t)val;
586 #else
587 const uint8_t *b = ptr;
588 return (int16_t)((b[0] << 8) | b[1]);
589 #endif
590 }
591
592 static inline int ldl_be_p(const void *ptr)
593 {
594 #if defined(__i386__) || defined(__x86_64__)
595 int val;
596 asm volatile ("movl %1, %0\n"
597 "bswap %0\n"
598 : "=r" (val)
599 : "m" (*(uint32_t *)ptr));
600 return val;
601 #else
602 const uint8_t *b = ptr;
603 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
604 #endif
605 }
606
607 static inline uint64_t ldq_be_p(const void *ptr)
608 {
609 uint32_t a,b;
610 a = ldl_be_p(ptr);
611 b = ldl_be_p((uint8_t *)ptr + 4);
612 return (((uint64_t)a<<32)|b);
613 }
614
615 static inline void stw_be_p(void *ptr, int v)
616 {
617 #if defined(__i386__)
618 asm volatile ("xchgb %b0, %h0\n"
619 "movw %w0, %1\n"
620 : "=q" (v)
621 : "m" (*(uint16_t *)ptr), "0" (v));
622 #else
623 uint8_t *d = (uint8_t *) ptr;
624 d[0] = v >> 8;
625 d[1] = v;
626 #endif
627 }
628
629 static inline void stl_be_p(void *ptr, int v)
630 {
631 #if defined(__i386__) || defined(__x86_64__)
632 asm volatile ("bswap %0\n"
633 "movl %0, %1\n"
634 : "=r" (v)
635 : "m" (*(uint32_t *)ptr), "0" (v));
636 #else
637 uint8_t *d = (uint8_t *) ptr;
638 d[0] = v >> 24;
639 d[1] = v >> 16;
640 d[2] = v >> 8;
641 d[3] = v;
642 #endif
643 }
644
645 static inline void stq_be_p(void *ptr, uint64_t v)
646 {
647 stl_be_p(ptr, v >> 32);
648 stl_be_p((uint8_t *)ptr + 4, v);
649 }
650
651 /* float access */
652
653 static inline float32 ldfl_be_p(const void *ptr)
654 {
655 union {
656 float32 f;
657 uint32_t i;
658 } u;
659 u.i = ldl_be_p(ptr);
660 return u.f;
661 }
662
663 static inline void stfl_be_p(void *ptr, float32 v)
664 {
665 union {
666 float32 f;
667 uint32_t i;
668 } u;
669 u.f = v;
670 stl_be_p(ptr, u.i);
671 }
672
673 static inline float64 ldfq_be_p(const void *ptr)
674 {
675 CPU_DoubleU u;
676 u.l.upper = ldl_be_p(ptr);
677 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
678 return u.d;
679 }
680
681 static inline void stfq_be_p(void *ptr, float64 v)
682 {
683 CPU_DoubleU u;
684 u.d = v;
685 stl_be_p(ptr, u.l.upper);
686 stl_be_p((uint8_t *)ptr + 4, u.l.lower);
687 }
688
689 #else
690
691 static inline int lduw_be_p(const void *ptr)
692 {
693 return *(uint16_t *)ptr;
694 }
695
696 static inline int ldsw_be_p(const void *ptr)
697 {
698 return *(int16_t *)ptr;
699 }
700
701 static inline int ldl_be_p(const void *ptr)
702 {
703 return *(uint32_t *)ptr;
704 }
705
706 static inline uint64_t ldq_be_p(const void *ptr)
707 {
708 return *(uint64_t *)ptr;
709 }
710
711 static inline void stw_be_p(void *ptr, int v)
712 {
713 *(uint16_t *)ptr = v;
714 }
715
716 static inline void stl_be_p(void *ptr, int v)
717 {
718 *(uint32_t *)ptr = v;
719 }
720
721 static inline void stq_be_p(void *ptr, uint64_t v)
722 {
723 *(uint64_t *)ptr = v;
724 }
725
726 /* float access */
727
728 static inline float32 ldfl_be_p(const void *ptr)
729 {
730 return *(float32 *)ptr;
731 }
732
733 static inline float64 ldfq_be_p(const void *ptr)
734 {
735 return *(float64 *)ptr;
736 }
737
738 static inline void stfl_be_p(void *ptr, float32 v)
739 {
740 *(float32 *)ptr = v;
741 }
742
743 static inline void stfq_be_p(void *ptr, float64 v)
744 {
745 *(float64 *)ptr = v;
746 }
747
748 #endif
749
750 #endif /* BSWAP_H */