]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/bswap.h
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / include / qemu / bswap.h
1 #ifndef BSWAP_H
2 #define BSWAP_H
3
4 #undef bswap16
5 #define bswap16(_x) __builtin_bswap16(_x)
6 #undef bswap32
7 #define bswap32(_x) __builtin_bswap32(_x)
8 #undef bswap64
9 #define bswap64(_x) __builtin_bswap64(_x)
10
11 static inline uint32_t bswap24(uint32_t x)
12 {
13 return (((x & 0x000000ffU) << 16) |
14 ((x & 0x0000ff00U) << 0) |
15 ((x & 0x00ff0000U) >> 16));
16 }
17
18 static inline void bswap16s(uint16_t *s)
19 {
20 *s = __builtin_bswap16(*s);
21 }
22
23 static inline void bswap24s(uint32_t *s)
24 {
25 *s = bswap24(*s & 0x00ffffffU);
26 }
27
28 static inline void bswap32s(uint32_t *s)
29 {
30 *s = __builtin_bswap32(*s);
31 }
32
33 static inline void bswap64s(uint64_t *s)
34 {
35 *s = __builtin_bswap64(*s);
36 }
37
38 #if HOST_BIG_ENDIAN
39 #define be_bswap(v, size) (v)
40 #define le_bswap(v, size) glue(__builtin_bswap, size)(v)
41 #define le_bswap24(v) bswap24(v)
42 #define be_bswaps(v, size)
43 #define le_bswaps(p, size) \
44 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
45 #else
46 #define le_bswap(v, size) (v)
47 #define le_bswap24(v) (v)
48 #define be_bswap(v, size) glue(__builtin_bswap, size)(v)
49 #define le_bswaps(v, size)
50 #define be_bswaps(p, size) \
51 do { *p = glue(__builtin_bswap, size)(*p); } while (0)
52 #endif
53
54 /**
55 * Endianness conversion functions between host cpu and specified endianness.
56 * (We list the complete set of prototypes produced by the macros below
57 * to assist people who search the headers to find their definitions.)
58 *
59 * uint16_t le16_to_cpu(uint16_t v);
60 * uint32_t le32_to_cpu(uint32_t v);
61 * uint64_t le64_to_cpu(uint64_t v);
62 * uint16_t be16_to_cpu(uint16_t v);
63 * uint32_t be32_to_cpu(uint32_t v);
64 * uint64_t be64_to_cpu(uint64_t v);
65 *
66 * Convert the value @v from the specified format to the native
67 * endianness of the host CPU by byteswapping if necessary, and
68 * return the converted value.
69 *
70 * uint16_t cpu_to_le16(uint16_t v);
71 * uint32_t cpu_to_le32(uint32_t v);
72 * uint64_t cpu_to_le64(uint64_t v);
73 * uint16_t cpu_to_be16(uint16_t v);
74 * uint32_t cpu_to_be32(uint32_t v);
75 * uint64_t cpu_to_be64(uint64_t v);
76 *
77 * Convert the value @v from the native endianness of the host CPU to
78 * the specified format by byteswapping if necessary, and return
79 * the converted value.
80 *
81 * void le16_to_cpus(uint16_t *v);
82 * void le32_to_cpus(uint32_t *v);
83 * void le64_to_cpus(uint64_t *v);
84 * void be16_to_cpus(uint16_t *v);
85 * void be32_to_cpus(uint32_t *v);
86 * void be64_to_cpus(uint64_t *v);
87 *
88 * Do an in-place conversion of the value pointed to by @v from the
89 * specified format to the native endianness of the host CPU.
90 *
91 * void cpu_to_le16s(uint16_t *v);
92 * void cpu_to_le32s(uint32_t *v);
93 * void cpu_to_le64s(uint64_t *v);
94 * void cpu_to_be16s(uint16_t *v);
95 * void cpu_to_be32s(uint32_t *v);
96 * void cpu_to_be64s(uint64_t *v);
97 *
98 * Do an in-place conversion of the value pointed to by @v from the
99 * native endianness of the host CPU to the specified format.
100 *
101 * Both X_to_cpu() and cpu_to_X() perform the same operation; you
102 * should use whichever one is better documenting of the function your
103 * code is performing.
104 *
105 * Do not use these functions for conversion of values which are in guest
106 * memory, since the data may not be sufficiently aligned for the host CPU's
107 * load and store instructions. Instead you should use the ld*_p() and
108 * st*_p() functions, which perform loads and stores of data of any
109 * required size and endianness and handle possible misalignment.
110 */
111
112 #define CPU_CONVERT(endian, size, type)\
113 static inline type endian ## size ## _to_cpu(type v)\
114 {\
115 return glue(endian, _bswap)(v, size);\
116 }\
117 \
118 static inline type cpu_to_ ## endian ## size(type v)\
119 {\
120 return glue(endian, _bswap)(v, size);\
121 }\
122 \
123 static inline void endian ## size ## _to_cpus(type *p)\
124 {\
125 glue(endian, _bswaps)(p, size);\
126 }\
127 \
128 static inline void cpu_to_ ## endian ## size ## s(type *p)\
129 {\
130 glue(endian, _bswaps)(p, size);\
131 }
132
133 CPU_CONVERT(be, 16, uint16_t)
134 CPU_CONVERT(be, 32, uint32_t)
135 CPU_CONVERT(be, 64, uint64_t)
136
137 CPU_CONVERT(le, 16, uint16_t)
138 CPU_CONVERT(le, 32, uint32_t)
139 CPU_CONVERT(le, 64, uint64_t)
140
141 /*
142 * Same as cpu_to_le{16,32,64}, except that gcc will figure the result is
143 * a compile-time constant if you pass in a constant. So this can be
144 * used to initialize static variables.
145 */
146 #if HOST_BIG_ENDIAN
147 # define const_le64(_x) \
148 ((((_x) & 0x00000000000000ffULL) << 56) | \
149 (((_x) & 0x000000000000ff00ULL) << 40) | \
150 (((_x) & 0x0000000000ff0000ULL) << 24) | \
151 (((_x) & 0x00000000ff000000ULL) << 8) | \
152 (((_x) & 0x000000ff00000000ULL) >> 8) | \
153 (((_x) & 0x0000ff0000000000ULL) >> 24) | \
154 (((_x) & 0x00ff000000000000ULL) >> 40) | \
155 (((_x) & 0xff00000000000000ULL) >> 56))
156 # define const_le32(_x) \
157 ((((_x) & 0x000000ffU) << 24) | \
158 (((_x) & 0x0000ff00U) << 8) | \
159 (((_x) & 0x00ff0000U) >> 8) | \
160 (((_x) & 0xff000000U) >> 24))
161 # define const_le16(_x) \
162 ((((_x) & 0x00ff) << 8) | \
163 (((_x) & 0xff00) >> 8))
164 #else
165 # define const_le64(_x) (_x)
166 # define const_le32(_x) (_x)
167 # define const_le16(_x) (_x)
168 #endif
169
170 /* unaligned/endian-independent pointer access */
171
172 /*
173 * the generic syntax is:
174 *
175 * load: ld{type}{sign}{size}_{endian}_p(ptr)
176 *
177 * store: st{type}{size}_{endian}_p(ptr, val)
178 *
179 * Note there are small differences with the softmmu access API!
180 *
181 * type is:
182 * (empty): integer access
183 * f : float access
184 *
185 * sign is:
186 * (empty): for 32 or 64 bit sizes (including floats and doubles)
187 * u : unsigned
188 * s : signed
189 *
190 * size is:
191 * b: 8 bits
192 * w: 16 bits
193 * 24: 24 bits
194 * l: 32 bits
195 * q: 64 bits
196 *
197 * endian is:
198 * he : host endian
199 * be : big endian
200 * le : little endian
201 * te : target endian
202 * (except for byte accesses, which have no endian infix).
203 *
204 * The target endian accessors are obviously only available to source
205 * files which are built per-target; they are defined in cpu-all.h.
206 *
207 * In all cases these functions take a host pointer.
208 * For accessors that take a guest address rather than a
209 * host address, see the cpu_{ld,st}_* accessors defined in
210 * cpu_ldst.h.
211 *
212 * For cases where the size to be used is not fixed at compile time,
213 * there are
214 * stn_{endian}_p(ptr, sz, val)
215 * which stores @val to @ptr as an @endian-order number @sz bytes in size
216 * and
217 * ldn_{endian}_p(ptr, sz)
218 * which loads @sz bytes from @ptr as an unsigned @endian-order number
219 * and returns it in a uint64_t.
220 */
221
222 static inline int ldub_p(const void *ptr)
223 {
224 return *(uint8_t *)ptr;
225 }
226
227 static inline int ldsb_p(const void *ptr)
228 {
229 return *(int8_t *)ptr;
230 }
231
232 static inline void stb_p(void *ptr, uint8_t v)
233 {
234 *(uint8_t *)ptr = v;
235 }
236
237 /*
238 * Any compiler worth its salt will turn these memcpy into native unaligned
239 * operations. Thus we don't need to play games with packed attributes, or
240 * inline byte-by-byte stores.
241 * Some compilation environments (eg some fortify-source implementations)
242 * may intercept memcpy() in a way that defeats the compiler optimization,
243 * though, so we use __builtin_memcpy() to give ourselves the best chance
244 * of good performance.
245 */
246
247 static inline int lduw_he_p(const void *ptr)
248 {
249 uint16_t r;
250 __builtin_memcpy(&r, ptr, sizeof(r));
251 return r;
252 }
253
254 static inline int ldsw_he_p(const void *ptr)
255 {
256 int16_t r;
257 __builtin_memcpy(&r, ptr, sizeof(r));
258 return r;
259 }
260
261 static inline void stw_he_p(void *ptr, uint16_t v)
262 {
263 __builtin_memcpy(ptr, &v, sizeof(v));
264 }
265
266 static inline void st24_he_p(void *ptr, uint32_t v)
267 {
268 __builtin_memcpy(ptr, &v, 3);
269 }
270
271 static inline int ldl_he_p(const void *ptr)
272 {
273 int32_t r;
274 __builtin_memcpy(&r, ptr, sizeof(r));
275 return r;
276 }
277
278 static inline void stl_he_p(void *ptr, uint32_t v)
279 {
280 __builtin_memcpy(ptr, &v, sizeof(v));
281 }
282
283 static inline uint64_t ldq_he_p(const void *ptr)
284 {
285 uint64_t r;
286 __builtin_memcpy(&r, ptr, sizeof(r));
287 return r;
288 }
289
290 static inline void stq_he_p(void *ptr, uint64_t v)
291 {
292 __builtin_memcpy(ptr, &v, sizeof(v));
293 }
294
295 static inline int lduw_le_p(const void *ptr)
296 {
297 return (uint16_t)le_bswap(lduw_he_p(ptr), 16);
298 }
299
300 static inline int ldsw_le_p(const void *ptr)
301 {
302 return (int16_t)le_bswap(lduw_he_p(ptr), 16);
303 }
304
305 static inline int ldl_le_p(const void *ptr)
306 {
307 return le_bswap(ldl_he_p(ptr), 32);
308 }
309
310 static inline uint64_t ldq_le_p(const void *ptr)
311 {
312 return le_bswap(ldq_he_p(ptr), 64);
313 }
314
315 static inline void stw_le_p(void *ptr, uint16_t v)
316 {
317 stw_he_p(ptr, le_bswap(v, 16));
318 }
319
320 static inline void st24_le_p(void *ptr, uint32_t v)
321 {
322 st24_he_p(ptr, le_bswap24(v));
323 }
324
325 static inline void stl_le_p(void *ptr, uint32_t v)
326 {
327 stl_he_p(ptr, le_bswap(v, 32));
328 }
329
330 static inline void stq_le_p(void *ptr, uint64_t v)
331 {
332 stq_he_p(ptr, le_bswap(v, 64));
333 }
334
335 static inline int lduw_be_p(const void *ptr)
336 {
337 return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
338 }
339
340 static inline int ldsw_be_p(const void *ptr)
341 {
342 return (int16_t)be_bswap(lduw_he_p(ptr), 16);
343 }
344
345 static inline int ldl_be_p(const void *ptr)
346 {
347 return be_bswap(ldl_he_p(ptr), 32);
348 }
349
350 static inline uint64_t ldq_be_p(const void *ptr)
351 {
352 return be_bswap(ldq_he_p(ptr), 64);
353 }
354
355 static inline void stw_be_p(void *ptr, uint16_t v)
356 {
357 stw_he_p(ptr, be_bswap(v, 16));
358 }
359
360 static inline void stl_be_p(void *ptr, uint32_t v)
361 {
362 stl_he_p(ptr, be_bswap(v, 32));
363 }
364
365 static inline void stq_be_p(void *ptr, uint64_t v)
366 {
367 stq_he_p(ptr, be_bswap(v, 64));
368 }
369
370 static inline unsigned long leul_to_cpu(unsigned long v)
371 {
372 #if HOST_LONG_BITS == 32
373 return le_bswap(v, 32);
374 #elif HOST_LONG_BITS == 64
375 return le_bswap(v, 64);
376 #else
377 # error Unknown sizeof long
378 #endif
379 }
380
381 /* Store v to p as a sz byte value in host order */
382 #define DO_STN_LDN_P(END) \
383 static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
384 { \
385 switch (sz) { \
386 case 1: \
387 stb_p(ptr, v); \
388 break; \
389 case 2: \
390 stw_ ## END ## _p(ptr, v); \
391 break; \
392 case 4: \
393 stl_ ## END ## _p(ptr, v); \
394 break; \
395 case 8: \
396 stq_ ## END ## _p(ptr, v); \
397 break; \
398 default: \
399 g_assert_not_reached(); \
400 } \
401 } \
402 static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
403 { \
404 switch (sz) { \
405 case 1: \
406 return ldub_p(ptr); \
407 case 2: \
408 return lduw_ ## END ## _p(ptr); \
409 case 4: \
410 return (uint32_t)ldl_ ## END ## _p(ptr); \
411 case 8: \
412 return ldq_ ## END ## _p(ptr); \
413 default: \
414 g_assert_not_reached(); \
415 } \
416 }
417
418 DO_STN_LDN_P(he)
419 DO_STN_LDN_P(le)
420 DO_STN_LDN_P(be)
421
422 #undef DO_STN_LDN_P
423
424 #undef le_bswap
425 #undef be_bswap
426 #undef le_bswaps
427 #undef be_bswaps
428
429 #endif /* BSWAP_H */