]>
git.proxmox.com Git - mirror_smartmontools-debian.git/blob - sg_unaligned.h
5 * Copyright (c) 2014-2018 Douglas Gilbert.
7 * Use of this source code is governed by a BSD-style
8 * license that can be found in the BSD_LICENSE file.
12 #include <stdint.h> /* for uint8_t and friends */
13 #include <string.h> /* for memcpy */
19 /* These inline functions convert integers (always unsigned) to byte streams
20 * and vice versa. They have two goals:
21 * - change the byte ordering of integers between host order and big
22 * endian ("_be") or little endian ("_le")
23 * - copy the big or little endian byte stream so it complies with any
24 * alignment that host integers require
26 * Host integer to given endian byte stream is a "_put_" function taking
27 * two arguments (integer and pointer to byte stream) returning void.
28 * Given endian byte stream to host integer is a "_get_" function that takes
29 * one argument and returns an integer of appropriate size (uint32_t for 24
30 * bit operations, uint64_t for 48 bit operations).
32 * Big endian byte format "on the wire" is the default used by SCSI
33 * standards (www.t10.org). Big endian is also the network byte order.
34 * Little endian is used by ATA, PCI and NVMe.
37 /* The generic form of these routines was borrowed from the Linux kernel,
38 * via mhvtl. There is a specialised version of the main functions for
39 * little endian or big endian provided that not-quite-standard defines for
40 * endianness are available from the compiler and the <byteswap.h> header
41 * (a GNU extension) has been detected by ./configure . To force the
42 * generic version, use './configure --disable-fast-lebe ' . */
44 /* Note: Assumes that the source and destination locations do not overlap.
45 * An example of overlapping source and destination:
46 * sg_put_unaligned_le64(j, ((uint8_t *)&j) + 1);
47 * Best not to do things like that.
52 #include "config.h" /* need this to see if HAVE_BYTESWAP_H */
55 #undef GOT_UNALIGNED_SPECIALS /* just in case */
57 #if defined(__BYTE_ORDER__) && defined(HAVE_BYTESWAP_H) && \
58 ! defined(IGNORE_FAST_LEBE)
60 #if defined(__LITTLE_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
62 #define GOT_UNALIGNED_SPECIALS 1
64 #include <byteswap.h> /* for bswap_16(), bswap_32() and bswap_64() */
66 // #warning ">>>>>> Doing Little endian special unaligneds"
68 static inline uint16_t sg_get_unaligned_be16(const void *p
)
76 static inline uint32_t sg_get_unaligned_be32(const void *p
)
84 static inline uint64_t sg_get_unaligned_be64(const void *p
)
92 static inline void sg_put_unaligned_be16(uint16_t val
, void *p
)
94 uint16_t u
= bswap_16(val
);
99 static inline void sg_put_unaligned_be32(uint32_t val
, void *p
)
101 uint32_t u
= bswap_32(val
);
106 static inline void sg_put_unaligned_be64(uint64_t val
, void *p
)
108 uint64_t u
= bswap_64(val
);
113 static inline uint16_t sg_get_unaligned_le16(const void *p
)
121 static inline uint32_t sg_get_unaligned_le32(const void *p
)
129 static inline uint64_t sg_get_unaligned_le64(const void *p
)
137 static inline void sg_put_unaligned_le16(uint16_t val
, void *p
)
142 static inline void sg_put_unaligned_le32(uint32_t val
, void *p
)
147 static inline void sg_put_unaligned_le64(uint64_t val
, void *p
)
152 #elif defined(__BIG_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
154 #define GOT_UNALIGNED_SPECIALS 1
156 #include <byteswap.h>
158 // #warning ">>>>>> Doing BIG endian special unaligneds"
160 static inline uint16_t sg_get_unaligned_le16(const void *p
)
168 static inline uint32_t sg_get_unaligned_le32(const void *p
)
176 static inline uint64_t sg_get_unaligned_le64(const void *p
)
184 static inline void sg_put_unaligned_le16(uint16_t val
, void *p
)
186 uint16_t u
= bswap_16(val
);
191 static inline void sg_put_unaligned_le32(uint32_t val
, void *p
)
193 uint32_t u
= bswap_32(val
);
198 static inline void sg_put_unaligned_le64(uint64_t val
, void *p
)
200 uint64_t u
= bswap_64(val
);
205 static inline uint16_t sg_get_unaligned_be16(const void *p
)
213 static inline uint32_t sg_get_unaligned_be32(const void *p
)
221 static inline uint64_t sg_get_unaligned_be64(const void *p
)
229 static inline void sg_put_unaligned_be16(uint16_t val
, void *p
)
234 static inline void sg_put_unaligned_be32(uint32_t val
, void *p
)
239 static inline void sg_put_unaligned_be64(uint64_t val
, void *p
)
244 #endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
245 #endif /* #if defined __BYTE_ORDER__ && defined <byteswap.h> &&
246 * ! defined IGNORE_FAST_LEBE */
249 #ifndef GOT_UNALIGNED_SPECIALS
251 /* Now we have no tricks left, so use the only way this can be done
252 * correctly in C safely: lots of shifts. */
254 // #warning ">>>>>> Doing GENERIC unaligneds"
256 static inline uint16_t sg_get_unaligned_be16(const void *p
)
258 return ((const uint8_t *)p
)[0] << 8 | ((const uint8_t *)p
)[1];
261 static inline uint32_t sg_get_unaligned_be32(const void *p
)
263 return ((const uint8_t *)p
)[0] << 24 | ((const uint8_t *)p
)[1] << 16 |
264 ((const uint8_t *)p
)[2] << 8 | ((const uint8_t *)p
)[3];
267 static inline uint64_t sg_get_unaligned_be64(const void *p
)
269 return (uint64_t)sg_get_unaligned_be32(p
) << 32 |
270 sg_get_unaligned_be32((const uint8_t *)p
+ 4);
273 static inline void sg_put_unaligned_be16(uint16_t val
, void *p
)
275 ((uint8_t *)p
)[0] = (uint8_t)(val
>> 8);
276 ((uint8_t *)p
)[1] = (uint8_t)val
;
279 static inline void sg_put_unaligned_be32(uint32_t val
, void *p
)
281 sg_put_unaligned_be16(val
>> 16, p
);
282 sg_put_unaligned_be16(val
, (uint8_t *)p
+ 2);
285 static inline void sg_put_unaligned_be64(uint64_t val
, void *p
)
287 sg_put_unaligned_be32(val
>> 32, p
);
288 sg_put_unaligned_be32(val
, (uint8_t *)p
+ 4);
292 static inline uint16_t sg_get_unaligned_le16(const void *p
)
294 return ((const uint8_t *)p
)[1] << 8 | ((const uint8_t *)p
)[0];
297 static inline uint32_t sg_get_unaligned_le32(const void *p
)
299 return ((const uint8_t *)p
)[3] << 24 | ((const uint8_t *)p
)[2] << 16 |
300 ((const uint8_t *)p
)[1] << 8 | ((const uint8_t *)p
)[0];
303 static inline uint64_t sg_get_unaligned_le64(const void *p
)
305 return (uint64_t)sg_get_unaligned_le32((const uint8_t *)p
+ 4) << 32 |
306 sg_get_unaligned_le32(p
);
309 static inline void sg_put_unaligned_le16(uint16_t val
, void *p
)
311 ((uint8_t *)p
)[0] = val
& 0xff;
312 ((uint8_t *)p
)[1] = val
>> 8;
315 static inline void sg_put_unaligned_le32(uint32_t val
, void *p
)
317 sg_put_unaligned_le16(val
>> 16, (uint8_t *)p
+ 2);
318 sg_put_unaligned_le16(val
, p
);
321 static inline void sg_put_unaligned_le64(uint64_t val
, void *p
)
323 sg_put_unaligned_le32(val
>> 32, (uint8_t *)p
+ 4);
324 sg_put_unaligned_le32(val
, p
);
327 #endif /* #ifndef GOT_UNALIGNED_SPECIALS */
329 /* Following are lesser used conversions that don't have specializations
330 * for endianness; big endian first. In summary these are the 24, 48 bit and
331 * given-length conversions plus the "nz" conditional put conversions. */
333 /* Now big endian, get 24+48 then put 24+48 */
334 static inline uint32_t sg_get_unaligned_be24(const void *p
)
336 return ((const uint8_t *)p
)[0] << 16 | ((const uint8_t *)p
)[1] << 8 |
337 ((const uint8_t *)p
)[2];
340 /* Assume 48 bit value placed in uint64_t */
341 static inline uint64_t sg_get_unaligned_be48(const void *p
)
343 return (uint64_t)sg_get_unaligned_be16(p
) << 32 |
344 sg_get_unaligned_be32((const uint8_t *)p
+ 2);
347 /* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
348 * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
349 * an 8 byte unsigned integer. */
350 static inline uint64_t sg_get_unaligned_be(int num_bytes
, const void *p
)
352 if ((num_bytes
<= 0) || (num_bytes
> (int)sizeof(uint64_t)))
355 const uint8_t * xp
= (const uint8_t *)p
;
358 for (++xp
; num_bytes
> 1; ++xp
, --num_bytes
)
359 res
= (res
<< 8) | *xp
;
364 static inline void sg_put_unaligned_be24(uint32_t val
, void *p
)
366 ((uint8_t *)p
)[0] = (val
>> 16) & 0xff;
367 ((uint8_t *)p
)[1] = (val
>> 8) & 0xff;
368 ((uint8_t *)p
)[2] = val
& 0xff;
371 /* Assume 48 bit value placed in uint64_t */
372 static inline void sg_put_unaligned_be48(uint64_t val
, void *p
)
374 sg_put_unaligned_be16(val
>> 32, p
);
375 sg_put_unaligned_be32(val
, (uint8_t *)p
+ 2);
378 /* Now little endian, get 24+48 then put 24+48 */
379 static inline uint32_t sg_get_unaligned_le24(const void *p
)
381 return (uint32_t)sg_get_unaligned_le16(p
) |
382 ((const uint8_t *)p
)[2] << 16;
385 /* Assume 48 bit value placed in uint64_t */
386 static inline uint64_t sg_get_unaligned_le48(const void *p
)
388 return (uint64_t)sg_get_unaligned_le16((const uint8_t *)p
+ 4) << 32 |
389 sg_get_unaligned_le32(p
);
392 static inline void sg_put_unaligned_le24(uint32_t val
, void *p
)
394 ((uint8_t *)p
)[2] = (val
>> 16) & 0xff;
395 ((uint8_t *)p
)[1] = (val
>> 8) & 0xff;
396 ((uint8_t *)p
)[0] = val
& 0xff;
399 /* Assume 48 bit value placed in uint64_t */
400 static inline void sg_put_unaligned_le48(uint64_t val
, void *p
)
402 ((uint8_t *)p
)[5] = (val
>> 40) & 0xff;
403 ((uint8_t *)p
)[4] = (val
>> 32) & 0xff;
404 ((uint8_t *)p
)[3] = (val
>> 24) & 0xff;
405 ((uint8_t *)p
)[2] = (val
>> 16) & 0xff;
406 ((uint8_t *)p
)[1] = (val
>> 8) & 0xff;
407 ((uint8_t *)p
)[0] = val
& 0xff;
410 /* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
411 * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
412 * an 8 byte unsigned integer. */
413 static inline uint64_t sg_get_unaligned_le(int num_bytes
, const void *p
)
415 if ((num_bytes
<= 0) || (num_bytes
> (int)sizeof(uint64_t)))
418 const uint8_t * xp
= (const uint8_t *)p
+ (num_bytes
- 1);
421 for (--xp
; num_bytes
> 1; --xp
, --num_bytes
)
422 res
= (res
<< 8) | *xp
;
427 /* Since cdb and parameter blocks are often memset to zero before these
428 * unaligned function partially fill them, then check for a val of zero
429 * and ignore if it is with these variants. First big endian, then little */
430 static inline void sg_nz_put_unaligned_be16(uint16_t val
, void *p
)
433 sg_put_unaligned_be16(val
, p
);
436 static inline void sg_nz_put_unaligned_be24(uint32_t val
, void *p
)
439 ((uint8_t *)p
)[0] = (val
>> 16) & 0xff;
440 ((uint8_t *)p
)[1] = (val
>> 8) & 0xff;
441 ((uint8_t *)p
)[2] = val
& 0xff;
445 static inline void sg_nz_put_unaligned_be32(uint32_t val
, void *p
)
448 sg_put_unaligned_be32(val
, p
);
451 static inline void sg_nz_put_unaligned_be64(uint64_t val
, void *p
)
454 sg_put_unaligned_be64(val
, p
);
457 static inline void sg_nz_put_unaligned_le16(uint16_t val
, void *p
)
460 sg_put_unaligned_le16(val
, p
);
463 static inline void sg_nz_put_unaligned_le24(uint32_t val
, void *p
)
466 ((uint8_t *)p
)[2] = (val
>> 16) & 0xff;
467 ((uint8_t *)p
)[1] = (val
>> 8) & 0xff;
468 ((uint8_t *)p
)[0] = val
& 0xff;
472 static inline void sg_nz_put_unaligned_le32(uint32_t val
, void *p
)
475 sg_put_unaligned_le32(val
, p
);
478 static inline void sg_nz_put_unaligned_le64(uint64_t val
, void *p
)
481 sg_put_unaligned_le64(val
, p
);
489 #endif /* SG_UNALIGNED_H */