]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/bswap.h
ca2b4c3f1556f87bc2145da3a79afa358e2cf7be
[mirror_qemu.git] / include / qemu / bswap.h
1 #ifndef BSWAP_H
2 #define BSWAP_H
3
4 #ifdef CONFIG_MACHINE_BSWAP_H
5 # include <sys/endian.h>
6 # include <machine/bswap.h>
7 #elif defined(__FreeBSD__)
8 # include <sys/endian.h>
9 #elif defined(__HAIKU__)
10 # include <endian.h>
11 #elif defined(CONFIG_BYTESWAP_H)
12 # include <byteswap.h>
13 #define BSWAP_FROM_BYTESWAP
14 # else
15 #define BSWAP_FROM_FALLBACKS
16 #endif /* ! CONFIG_MACHINE_BSWAP_H */
17
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21
22 #ifdef BSWAP_FROM_BYTESWAP
23 static inline uint16_t bswap16(uint16_t x)
24 {
25 return bswap_16(x);
26 }
27
28 static inline uint32_t bswap32(uint32_t x)
29 {
30 return bswap_32(x);
31 }
32
33 static inline uint64_t bswap64(uint64_t x)
34 {
35 return bswap_64(x);
36 }
37 #endif
38
39 #ifdef BSWAP_FROM_FALLBACKS
40 #undef bswap16
41 #define bswap16(_x) __builtin_bswap16(_x)
42 #undef bswap32
43 #define bswap32(_x) __builtin_bswap32(_x)
44 #undef bswap64
45 #define bswap64(_x) __builtin_bswap64(_x)
46 #endif
47
48 #undef BSWAP_FROM_BYTESWAP
49 #undef BSWAP_FROM_FALLBACKS
50
51 static inline void bswap16s(uint16_t *s)
52 {
53 *s = bswap16(*s);
54 }
55
56 static inline void bswap32s(uint32_t *s)
57 {
58 *s = bswap32(*s);
59 }
60
61 static inline void bswap64s(uint64_t *s)
62 {
63 *s = bswap64(*s);
64 }
65
66 #if HOST_BIG_ENDIAN
67 #define be_bswap(v, size) (v)
68 #define le_bswap(v, size) glue(bswap, size)(v)
69 #define be_bswaps(v, size)
70 #define le_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
71 #else
72 #define le_bswap(v, size) (v)
73 #define be_bswap(v, size) glue(bswap, size)(v)
74 #define le_bswaps(v, size)
75 #define be_bswaps(p, size) do { *p = glue(bswap, size)(*p); } while(0)
76 #endif
77
78 /**
79 * Endianness conversion functions between host cpu and specified endianness.
80 * (We list the complete set of prototypes produced by the macros below
81 * to assist people who search the headers to find their definitions.)
82 *
83 * uint16_t le16_to_cpu(uint16_t v);
84 * uint32_t le32_to_cpu(uint32_t v);
85 * uint64_t le64_to_cpu(uint64_t v);
86 * uint16_t be16_to_cpu(uint16_t v);
87 * uint32_t be32_to_cpu(uint32_t v);
88 * uint64_t be64_to_cpu(uint64_t v);
89 *
90 * Convert the value @v from the specified format to the native
91 * endianness of the host CPU by byteswapping if necessary, and
92 * return the converted value.
93 *
94 * uint16_t cpu_to_le16(uint16_t v);
95 * uint32_t cpu_to_le32(uint32_t v);
96 * uint64_t cpu_to_le64(uint64_t v);
97 * uint16_t cpu_to_be16(uint16_t v);
98 * uint32_t cpu_to_be32(uint32_t v);
99 * uint64_t cpu_to_be64(uint64_t v);
100 *
101 * Convert the value @v from the native endianness of the host CPU to
102 * the specified format by byteswapping if necessary, and return
103 * the converted value.
104 *
105 * void le16_to_cpus(uint16_t *v);
106 * void le32_to_cpus(uint32_t *v);
107 * void le64_to_cpus(uint64_t *v);
108 * void be16_to_cpus(uint16_t *v);
109 * void be32_to_cpus(uint32_t *v);
110 * void be64_to_cpus(uint64_t *v);
111 *
112 * Do an in-place conversion of the value pointed to by @v from the
113 * specified format to the native endianness of the host CPU.
114 *
115 * void cpu_to_le16s(uint16_t *v);
116 * void cpu_to_le32s(uint32_t *v);
117 * void cpu_to_le64s(uint64_t *v);
118 * void cpu_to_be16s(uint16_t *v);
119 * void cpu_to_be32s(uint32_t *v);
120 * void cpu_to_be64s(uint64_t *v);
121 *
122 * Do an in-place conversion of the value pointed to by @v from the
123 * native endianness of the host CPU to the specified format.
124 *
125 * Both X_to_cpu() and cpu_to_X() perform the same operation; you
126 * should use whichever one is better documenting of the function your
127 * code is performing.
128 *
129 * Do not use these functions for conversion of values which are in guest
130 * memory, since the data may not be sufficiently aligned for the host CPU's
131 * load and store instructions. Instead you should use the ld*_p() and
132 * st*_p() functions, which perform loads and stores of data of any
133 * required size and endianness and handle possible misalignment.
134 */
135
136 #define CPU_CONVERT(endian, size, type)\
137 static inline type endian ## size ## _to_cpu(type v)\
138 {\
139 return glue(endian, _bswap)(v, size);\
140 }\
141 \
142 static inline type cpu_to_ ## endian ## size(type v)\
143 {\
144 return glue(endian, _bswap)(v, size);\
145 }\
146 \
147 static inline void endian ## size ## _to_cpus(type *p)\
148 {\
149 glue(endian, _bswaps)(p, size);\
150 }\
151 \
152 static inline void cpu_to_ ## endian ## size ## s(type *p)\
153 {\
154 glue(endian, _bswaps)(p, size);\
155 }
156
157 CPU_CONVERT(be, 16, uint16_t)
158 CPU_CONVERT(be, 32, uint32_t)
159 CPU_CONVERT(be, 64, uint64_t)
160
161 CPU_CONVERT(le, 16, uint16_t)
162 CPU_CONVERT(le, 32, uint32_t)
163 CPU_CONVERT(le, 64, uint64_t)
164
165 /*
166 * Same as cpu_to_le{16,32}, except that gcc will figure the result is
167 * a compile-time constant if you pass in a constant. So this can be
168 * used to initialize static variables.
169 */
170 #if HOST_BIG_ENDIAN
171 # define const_le32(_x) \
172 ((((_x) & 0x000000ffU) << 24) | \
173 (((_x) & 0x0000ff00U) << 8) | \
174 (((_x) & 0x00ff0000U) >> 8) | \
175 (((_x) & 0xff000000U) >> 24))
176 # define const_le16(_x) \
177 ((((_x) & 0x00ff) << 8) | \
178 (((_x) & 0xff00) >> 8))
179 #else
180 # define const_le32(_x) (_x)
181 # define const_le16(_x) (_x)
182 #endif
183
184 /* unaligned/endian-independent pointer access */
185
186 /*
187 * the generic syntax is:
188 *
189 * load: ld{type}{sign}{size}_{endian}_p(ptr)
190 *
191 * store: st{type}{size}_{endian}_p(ptr, val)
192 *
193 * Note there are small differences with the softmmu access API!
194 *
195 * type is:
196 * (empty): integer access
197 * f : float access
198 *
199 * sign is:
200 * (empty): for 32 or 64 bit sizes (including floats and doubles)
201 * u : unsigned
202 * s : signed
203 *
204 * size is:
205 * b: 8 bits
206 * w: 16 bits
207 * l: 32 bits
208 * q: 64 bits
209 *
210 * endian is:
211 * he : host endian
212 * be : big endian
213 * le : little endian
214 * te : target endian
215 * (except for byte accesses, which have no endian infix).
216 *
217 * The target endian accessors are obviously only available to source
218 * files which are built per-target; they are defined in cpu-all.h.
219 *
220 * In all cases these functions take a host pointer.
221 * For accessors that take a guest address rather than a
222 * host address, see the cpu_{ld,st}_* accessors defined in
223 * cpu_ldst.h.
224 *
225 * For cases where the size to be used is not fixed at compile time,
226 * there are
227 * stn_{endian}_p(ptr, sz, val)
228 * which stores @val to @ptr as an @endian-order number @sz bytes in size
229 * and
230 * ldn_{endian}_p(ptr, sz)
231 * which loads @sz bytes from @ptr as an unsigned @endian-order number
232 * and returns it in a uint64_t.
233 */
234
235 static inline int ldub_p(const void *ptr)
236 {
237 return *(uint8_t *)ptr;
238 }
239
240 static inline int ldsb_p(const void *ptr)
241 {
242 return *(int8_t *)ptr;
243 }
244
245 static inline void stb_p(void *ptr, uint8_t v)
246 {
247 *(uint8_t *)ptr = v;
248 }
249
250 /*
251 * Any compiler worth its salt will turn these memcpy into native unaligned
252 * operations. Thus we don't need to play games with packed attributes, or
253 * inline byte-by-byte stores.
254 * Some compilation environments (eg some fortify-source implementations)
255 * may intercept memcpy() in a way that defeats the compiler optimization,
256 * though, so we use __builtin_memcpy() to give ourselves the best chance
257 * of good performance.
258 */
259
260 static inline int lduw_he_p(const void *ptr)
261 {
262 uint16_t r;
263 __builtin_memcpy(&r, ptr, sizeof(r));
264 return r;
265 }
266
267 static inline int ldsw_he_p(const void *ptr)
268 {
269 int16_t r;
270 __builtin_memcpy(&r, ptr, sizeof(r));
271 return r;
272 }
273
274 static inline void stw_he_p(void *ptr, uint16_t v)
275 {
276 __builtin_memcpy(ptr, &v, sizeof(v));
277 }
278
279 static inline int ldl_he_p(const void *ptr)
280 {
281 int32_t r;
282 __builtin_memcpy(&r, ptr, sizeof(r));
283 return r;
284 }
285
286 static inline void stl_he_p(void *ptr, uint32_t v)
287 {
288 __builtin_memcpy(ptr, &v, sizeof(v));
289 }
290
291 static inline uint64_t ldq_he_p(const void *ptr)
292 {
293 uint64_t r;
294 __builtin_memcpy(&r, ptr, sizeof(r));
295 return r;
296 }
297
298 static inline void stq_he_p(void *ptr, uint64_t v)
299 {
300 __builtin_memcpy(ptr, &v, sizeof(v));
301 }
302
303 static inline int lduw_le_p(const void *ptr)
304 {
305 return (uint16_t)le_bswap(lduw_he_p(ptr), 16);
306 }
307
308 static inline int ldsw_le_p(const void *ptr)
309 {
310 return (int16_t)le_bswap(lduw_he_p(ptr), 16);
311 }
312
313 static inline int ldl_le_p(const void *ptr)
314 {
315 return le_bswap(ldl_he_p(ptr), 32);
316 }
317
318 static inline uint64_t ldq_le_p(const void *ptr)
319 {
320 return le_bswap(ldq_he_p(ptr), 64);
321 }
322
323 static inline void stw_le_p(void *ptr, uint16_t v)
324 {
325 stw_he_p(ptr, le_bswap(v, 16));
326 }
327
328 static inline void stl_le_p(void *ptr, uint32_t v)
329 {
330 stl_he_p(ptr, le_bswap(v, 32));
331 }
332
333 static inline void stq_le_p(void *ptr, uint64_t v)
334 {
335 stq_he_p(ptr, le_bswap(v, 64));
336 }
337
338 static inline int lduw_be_p(const void *ptr)
339 {
340 return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
341 }
342
343 static inline int ldsw_be_p(const void *ptr)
344 {
345 return (int16_t)be_bswap(lduw_he_p(ptr), 16);
346 }
347
348 static inline int ldl_be_p(const void *ptr)
349 {
350 return be_bswap(ldl_he_p(ptr), 32);
351 }
352
353 static inline uint64_t ldq_be_p(const void *ptr)
354 {
355 return be_bswap(ldq_he_p(ptr), 64);
356 }
357
358 static inline void stw_be_p(void *ptr, uint16_t v)
359 {
360 stw_he_p(ptr, be_bswap(v, 16));
361 }
362
363 static inline void stl_be_p(void *ptr, uint32_t v)
364 {
365 stl_he_p(ptr, be_bswap(v, 32));
366 }
367
368 static inline void stq_be_p(void *ptr, uint64_t v)
369 {
370 stq_he_p(ptr, be_bswap(v, 64));
371 }
372
373 static inline unsigned long leul_to_cpu(unsigned long v)
374 {
375 #if HOST_LONG_BITS == 32
376 return le_bswap(v, 32);
377 #elif HOST_LONG_BITS == 64
378 return le_bswap(v, 64);
379 #else
380 # error Unknown sizeof long
381 #endif
382 }
383
384 /* Store v to p as a sz byte value in host order */
385 #define DO_STN_LDN_P(END) \
386 static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
387 { \
388 switch (sz) { \
389 case 1: \
390 stb_p(ptr, v); \
391 break; \
392 case 2: \
393 stw_ ## END ## _p(ptr, v); \
394 break; \
395 case 4: \
396 stl_ ## END ## _p(ptr, v); \
397 break; \
398 case 8: \
399 stq_ ## END ## _p(ptr, v); \
400 break; \
401 default: \
402 g_assert_not_reached(); \
403 } \
404 } \
405 static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
406 { \
407 switch (sz) { \
408 case 1: \
409 return ldub_p(ptr); \
410 case 2: \
411 return lduw_ ## END ## _p(ptr); \
412 case 4: \
413 return (uint32_t)ldl_ ## END ## _p(ptr); \
414 case 8: \
415 return ldq_ ## END ## _p(ptr); \
416 default: \
417 g_assert_not_reached(); \
418 } \
419 }
420
421 DO_STN_LDN_P(he)
422 DO_STN_LDN_P(le)
423 DO_STN_LDN_P(be)
424
425 #undef DO_STN_LDN_P
426
427 #undef le_bswap
428 #undef be_bswap
429 #undef le_bswaps
430 #undef be_bswaps
431
432 #ifdef __cplusplus
433 }
434 #endif
435
436 #endif /* BSWAP_H */