]>
git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/bswap.h
5 #define bswap16(_x) __builtin_bswap16(_x)
7 #define bswap32(_x) __builtin_bswap32(_x)
9 #define bswap64(_x) __builtin_bswap64(_x)
11 static inline uint32_t bswap24(uint32_t x
)
13 return (((x
& 0x000000ffU
) << 16) |
14 ((x
& 0x0000ff00U
) << 0) |
15 ((x
& 0x00ff0000U
) >> 16));
18 static inline void bswap16s(uint16_t *s
)
20 *s
= __builtin_bswap16(*s
);
23 static inline void bswap24s(uint32_t *s
)
25 *s
= bswap24(*s
& 0x00ffffffU
);
28 static inline void bswap32s(uint32_t *s
)
30 *s
= __builtin_bswap32(*s
);
33 static inline void bswap64s(uint64_t *s
)
35 *s
= __builtin_bswap64(*s
);
39 #define be_bswap(v, size) (v)
40 #define le_bswap(v, size) glue(__builtin_bswap, size)(v)
41 #define le_bswap24(v) bswap24(v)
42 #define be_bswaps(v, size)
43 #define le_bswaps(p, size) \
44 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
46 #define le_bswap(v, size) (v)
47 #define le_bswap24(v) (v)
48 #define be_bswap(v, size) glue(__builtin_bswap, size)(v)
49 #define le_bswaps(v, size)
50 #define be_bswaps(p, size) \
51 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
55 * Endianness conversion functions between host cpu and specified endianness.
56 * (We list the complete set of prototypes produced by the macros below
57 * to assist people who search the headers to find their definitions.)
59 * uint16_t le16_to_cpu(uint16_t v);
60 * uint32_t le32_to_cpu(uint32_t v);
61 * uint64_t le64_to_cpu(uint64_t v);
62 * uint16_t be16_to_cpu(uint16_t v);
63 * uint32_t be32_to_cpu(uint32_t v);
64 * uint64_t be64_to_cpu(uint64_t v);
66 * Convert the value @v from the specified format to the native
67 * endianness of the host CPU by byteswapping if necessary, and
68 * return the converted value.
70 * uint16_t cpu_to_le16(uint16_t v);
71 * uint32_t cpu_to_le32(uint32_t v);
72 * uint64_t cpu_to_le64(uint64_t v);
73 * uint16_t cpu_to_be16(uint16_t v);
74 * uint32_t cpu_to_be32(uint32_t v);
75 * uint64_t cpu_to_be64(uint64_t v);
77 * Convert the value @v from the native endianness of the host CPU to
78 * the specified format by byteswapping if necessary, and return
79 * the converted value.
81 * void le16_to_cpus(uint16_t *v);
82 * void le32_to_cpus(uint32_t *v);
83 * void le64_to_cpus(uint64_t *v);
84 * void be16_to_cpus(uint16_t *v);
85 * void be32_to_cpus(uint32_t *v);
86 * void be64_to_cpus(uint64_t *v);
88 * Do an in-place conversion of the value pointed to by @v from the
89 * specified format to the native endianness of the host CPU.
91 * void cpu_to_le16s(uint16_t *v);
92 * void cpu_to_le32s(uint32_t *v);
93 * void cpu_to_le64s(uint64_t *v);
94 * void cpu_to_be16s(uint16_t *v);
95 * void cpu_to_be32s(uint32_t *v);
96 * void cpu_to_be64s(uint64_t *v);
98 * Do an in-place conversion of the value pointed to by @v from the
99 * native endianness of the host CPU to the specified format.
101 * Both X_to_cpu() and cpu_to_X() perform the same operation; you
102 * should use whichever one is better documenting of the function your
103 * code is performing.
105 * Do not use these functions for conversion of values which are in guest
106 * memory, since the data may not be sufficiently aligned for the host CPU's
107 * load and store instructions. Instead you should use the ld*_p() and
108 * st*_p() functions, which perform loads and stores of data of any
109 * required size and endianness and handle possible misalignment.
112 #define CPU_CONVERT(endian, size, type)\
113 static inline type endian ## size ## _to_cpu(type v)\
115 return glue(endian, _bswap)(v, size);\
118 static inline type cpu_to_ ## endian ## size(type v)\
120 return glue(endian, _bswap)(v, size);\
123 static inline void endian ## size ## _to_cpus(type *p)\
125 glue(endian, _bswaps)(p, size);\
128 static inline void cpu_to_ ## endian ## size ## s(type *p)\
130 glue(endian, _bswaps)(p, size);\
133 CPU_CONVERT(be
, 16, uint16_t)
134 CPU_CONVERT(be
, 32, uint32_t)
135 CPU_CONVERT(be
, 64, uint64_t)
137 CPU_CONVERT(le
, 16, uint16_t)
138 CPU_CONVERT(le
, 32, uint32_t)
139 CPU_CONVERT(le
, 64, uint64_t)
142 * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
143 * a compile-time constant if you pass in a constant. So this can be
144 * used to initialize static variables.
147 # define const_le64(_x) \
148 ((((_x) & 0x00000000000000ffULL) << 56) | \
149 (((_x) & 0x000000000000ff00ULL) << 40) | \
150 (((_x) & 0x0000000000ff0000ULL) << 24) | \
151 (((_x) & 0x00000000ff000000ULL) << 8) | \
152 (((_x) & 0x000000ff00000000ULL) >> 8) | \
153 (((_x) & 0x0000ff0000000000ULL) >> 24) | \
154 (((_x) & 0x00ff000000000000ULL) >> 40) | \
155 (((_x) & 0xff00000000000000ULL) >> 56))
156 # define const_le32(_x) \
157 ((((_x) & 0x000000ffU) << 24) | \
158 (((_x) & 0x0000ff00U) << 8) | \
159 (((_x) & 0x00ff0000U) >> 8) | \
160 (((_x) & 0xff000000U) >> 24))
161 # define const_le16(_x) \
162 ((((_x) & 0x00ff) << 8) | \
163 (((_x) & 0xff00) >> 8))
165 # define const_le64(_x) (_x)
166 # define const_le32(_x) (_x)
167 # define const_le16(_x) (_x)
170 /* unaligned/endian-independent pointer access */
173 * the generic syntax is:
175 * load: ld{type}{sign}{size}_{endian}_p(ptr)
177 * store: st{type}{size}_{endian}_p(ptr, val)
179 * Note there are small differences with the softmmu access API!
182 * (empty): integer access
186 * (empty): for 32 or 64 bit sizes (including floats and doubles)
202 * (except for byte accesses, which have no endian infix).
204 * The target endian accessors are obviously only available to source
205 * files which are built per-target; they are defined in cpu-all.h.
207 * In all cases these functions take a host pointer.
208 * For accessors that take a guest address rather than a
209 * host address, see the cpu_{ld,st}_* accessors defined in
212 * For cases where the size to be used is not fixed at compile time,
214 * stn_{endian}_p(ptr, sz, val)
215 * which stores @val to @ptr as an @endian-order number @sz bytes in size
217 * ldn_{endian}_p(ptr, sz)
218 * which loads @sz bytes from @ptr as an unsigned @endian-order number
219 * and returns it in a uint64_t.
222 static inline int ldub_p(const void *ptr
)
224 return *(uint8_t *)ptr
;
227 static inline int ldsb_p(const void *ptr
)
229 return *(int8_t *)ptr
;
232 static inline void stb_p(void *ptr
, uint8_t v
)
238 * Any compiler worth its salt will turn these memcpy into native unaligned
239 * operations. Thus we don't need to play games with packed attributes, or
240 * inline byte-by-byte stores.
241 * Some compilation environments (eg some fortify-source implementations)
242 * may intercept memcpy() in a way that defeats the compiler optimization,
243 * though, so we use __builtin_memcpy() to give ourselves the best chance
244 * of good performance.
247 static inline int lduw_he_p(const void *ptr
)
250 __builtin_memcpy(&r
, ptr
, sizeof(r
));
254 static inline int ldsw_he_p(const void *ptr
)
257 __builtin_memcpy(&r
, ptr
, sizeof(r
));
261 static inline void stw_he_p(void *ptr
, uint16_t v
)
263 __builtin_memcpy(ptr
, &v
, sizeof(v
));
266 static inline void st24_he_p(void *ptr
, uint32_t v
)
268 __builtin_memcpy(ptr
, &v
, 3);
271 static inline int ldl_he_p(const void *ptr
)
274 __builtin_memcpy(&r
, ptr
, sizeof(r
));
278 static inline void stl_he_p(void *ptr
, uint32_t v
)
280 __builtin_memcpy(ptr
, &v
, sizeof(v
));
283 static inline uint64_t ldq_he_p(const void *ptr
)
286 __builtin_memcpy(&r
, ptr
, sizeof(r
));
290 static inline void stq_he_p(void *ptr
, uint64_t v
)
292 __builtin_memcpy(ptr
, &v
, sizeof(v
));
295 static inline int lduw_le_p(const void *ptr
)
297 return (uint16_t)le_bswap(lduw_he_p(ptr
), 16);
300 static inline int ldsw_le_p(const void *ptr
)
302 return (int16_t)le_bswap(lduw_he_p(ptr
), 16);
305 static inline int ldl_le_p(const void *ptr
)
307 return le_bswap(ldl_he_p(ptr
), 32);
310 static inline uint64_t ldq_le_p(const void *ptr
)
312 return le_bswap(ldq_he_p(ptr
), 64);
315 static inline void stw_le_p(void *ptr
, uint16_t v
)
317 stw_he_p(ptr
, le_bswap(v
, 16));
320 static inline void st24_le_p(void *ptr
, uint32_t v
)
322 st24_he_p(ptr
, le_bswap24(v
));
325 static inline void stl_le_p(void *ptr
, uint32_t v
)
327 stl_he_p(ptr
, le_bswap(v
, 32));
330 static inline void stq_le_p(void *ptr
, uint64_t v
)
332 stq_he_p(ptr
, le_bswap(v
, 64));
335 static inline int lduw_be_p(const void *ptr
)
337 return (uint16_t)be_bswap(lduw_he_p(ptr
), 16);
340 static inline int ldsw_be_p(const void *ptr
)
342 return (int16_t)be_bswap(lduw_he_p(ptr
), 16);
345 static inline int ldl_be_p(const void *ptr
)
347 return be_bswap(ldl_he_p(ptr
), 32);
350 static inline uint64_t ldq_be_p(const void *ptr
)
352 return be_bswap(ldq_he_p(ptr
), 64);
355 static inline void stw_be_p(void *ptr
, uint16_t v
)
357 stw_he_p(ptr
, be_bswap(v
, 16));
360 static inline void stl_be_p(void *ptr
, uint32_t v
)
362 stl_he_p(ptr
, be_bswap(v
, 32));
365 static inline void stq_be_p(void *ptr
, uint64_t v
)
367 stq_he_p(ptr
, be_bswap(v
, 64));
370 static inline unsigned long leul_to_cpu(unsigned long v
)
372 #if HOST_LONG_BITS == 32
373 return le_bswap(v
, 32);
374 #elif HOST_LONG_BITS == 64
375 return le_bswap(v
, 64);
377 # error Unknown sizeof long
381 /* Store v to p as a sz byte value in host order */
382 #define DO_STN_LDN_P(END) \
383 static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
390 stw_ ## END ## _p(ptr, v); \
393 stl_ ## END ## _p(ptr, v); \
396 stq_ ## END ## _p(ptr, v); \
399 g_assert_not_reached(); \
402 static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
406 return ldub_p(ptr); \
408 return lduw_ ## END ## _p(ptr); \
410 return (uint32_t)ldl_ ## END ## _p(ptr); \
412 return ldq_ ## END ## _p(ptr); \
414 g_assert_not_reached(); \