]> git.proxmox.com Git - mirror_smartmontools-debian.git/blob - sg_unaligned.h
import smartmontools 7.0
[mirror_smartmontools-debian.git] / sg_unaligned.h
1 #ifndef SG_UNALIGNED_H
2 #define SG_UNALIGNED_H
3
4 /*
5 * Copyright (c) 2014-2018 Douglas Gilbert.
6 * All rights reserved.
7 * Use of this source code is governed by a BSD-style
8 * license that can be found in the BSD_LICENSE file.
9 */
10
11 #include <stdbool.h>
12 #include <stdint.h> /* for uint8_t and friends */
13 #include <string.h> /* for memcpy */
14
15 #ifdef __cplusplus
16 extern "C" {
17 #endif
18
19 /* These inline functions convert integers (always unsigned) to byte streams
20 * and vice versa. They have two goals:
21 * - change the byte ordering of integers between host order and big
22 * endian ("_be") or little endian ("_le")
23 * - copy the big or little endian byte stream so it complies with any
24 * alignment that host integers require
25 *
26 * Host integer to given endian byte stream is a "_put_" function taking
27 * two arguments (integer and pointer to byte stream) returning void.
28 * Given endian byte stream to host integer is a "_get_" function that takes
29 * one argument and returns an integer of appropriate size (uint32_t for 24
30 * bit operations, uint64_t for 48 bit operations).
31 *
32 * Big endian byte format "on the wire" is the default used by SCSI
33 * standards (www.t10.org). Big endian is also the network byte order.
34 * Little endian is used by ATA, PCI and NVMe.
35 */
36
37 /* The generic form of these routines was borrowed from the Linux kernel,
38 * via mhvtl. There is a specialised version of the main functions for
39 * little endian or big endian provided that not-quite-standard defines for
40 * endianness are available from the compiler and the <byteswap.h> header
41 * (a GNU extension) has been detected by ./configure . To force the
42 * generic version, use './configure --disable-fast-lebe ' . */
43
44 /* Note: Assumes that the source and destination locations do not overlap.
45 * An example of overlapping source and destination:
46 * sg_put_unaligned_le64(j, ((uint8_t *)&j) + 1);
47 * Best not to do things like that.
48 */
49
50
51 #ifdef HAVE_CONFIG_H
52 #include "config.h" /* need this to see if HAVE_BYTESWAP_H */
53 #endif
54
55 #undef GOT_UNALIGNED_SPECIALS /* just in case */
56
57 #if defined(__BYTE_ORDER__) && defined(HAVE_BYTESWAP_H) && \
58 ! defined(IGNORE_FAST_LEBE)
59
60 #if defined(__LITTLE_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
61
62 #define GOT_UNALIGNED_SPECIALS 1
63
64 #include <byteswap.h> /* for bswap_16(), bswap_32() and bswap_64() */
65
66 // #warning ">>>>>> Doing Little endian special unaligneds"
67
68 static inline uint16_t sg_get_unaligned_be16(const void *p)
69 {
70 uint16_t u;
71
72 memcpy(&u, p, 2);
73 return bswap_16(u);
74 }
75
76 static inline uint32_t sg_get_unaligned_be32(const void *p)
77 {
78 uint32_t u;
79
80 memcpy(&u, p, 4);
81 return bswap_32(u);
82 }
83
84 static inline uint64_t sg_get_unaligned_be64(const void *p)
85 {
86 uint64_t u;
87
88 memcpy(&u, p, 8);
89 return bswap_64(u);
90 }
91
92 static inline void sg_put_unaligned_be16(uint16_t val, void *p)
93 {
94 uint16_t u = bswap_16(val);
95
96 memcpy(p, &u, 2);
97 }
98
99 static inline void sg_put_unaligned_be32(uint32_t val, void *p)
100 {
101 uint32_t u = bswap_32(val);
102
103 memcpy(p, &u, 4);
104 }
105
106 static inline void sg_put_unaligned_be64(uint64_t val, void *p)
107 {
108 uint64_t u = bswap_64(val);
109
110 memcpy(p, &u, 8);
111 }
112
113 static inline uint16_t sg_get_unaligned_le16(const void *p)
114 {
115 uint16_t u;
116
117 memcpy(&u, p, 2);
118 return u;
119 }
120
121 static inline uint32_t sg_get_unaligned_le32(const void *p)
122 {
123 uint32_t u;
124
125 memcpy(&u, p, 4);
126 return u;
127 }
128
129 static inline uint64_t sg_get_unaligned_le64(const void *p)
130 {
131 uint64_t u;
132
133 memcpy(&u, p, 8);
134 return u;
135 }
136
137 static inline void sg_put_unaligned_le16(uint16_t val, void *p)
138 {
139 memcpy(p, &val, 2);
140 }
141
142 static inline void sg_put_unaligned_le32(uint32_t val, void *p)
143 {
144 memcpy(p, &val, 4);
145 }
146
147 static inline void sg_put_unaligned_le64(uint64_t val, void *p)
148 {
149 memcpy(p, &val, 8);
150 }
151
152 #elif defined(__BIG_ENDIAN__) || (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
153
154 #define GOT_UNALIGNED_SPECIALS 1
155
156 #include <byteswap.h>
157
158 // #warning ">>>>>> Doing BIG endian special unaligneds"
159
160 static inline uint16_t sg_get_unaligned_le16(const void *p)
161 {
162 uint16_t u;
163
164 memcpy(&u, p, 2);
165 return bswap_16(u);
166 }
167
168 static inline uint32_t sg_get_unaligned_le32(const void *p)
169 {
170 uint32_t u;
171
172 memcpy(&u, p, 4);
173 return bswap_32(u);
174 }
175
176 static inline uint64_t sg_get_unaligned_le64(const void *p)
177 {
178 uint64_t u;
179
180 memcpy(&u, p, 8);
181 return bswap_64(u);
182 }
183
184 static inline void sg_put_unaligned_le16(uint16_t val, void *p)
185 {
186 uint16_t u = bswap_16(val);
187
188 memcpy(p, &u, 2);
189 }
190
191 static inline void sg_put_unaligned_le32(uint32_t val, void *p)
192 {
193 uint32_t u = bswap_32(val);
194
195 memcpy(p, &u, 4);
196 }
197
198 static inline void sg_put_unaligned_le64(uint64_t val, void *p)
199 {
200 uint64_t u = bswap_64(val);
201
202 memcpy(p, &u, 8);
203 }
204
205 static inline uint16_t sg_get_unaligned_be16(const void *p)
206 {
207 uint16_t u;
208
209 memcpy(&u, p, 2);
210 return u;
211 }
212
213 static inline uint32_t sg_get_unaligned_be32(const void *p)
214 {
215 uint32_t u;
216
217 memcpy(&u, p, 4);
218 return u;
219 }
220
221 static inline uint64_t sg_get_unaligned_be64(const void *p)
222 {
223 uint64_t u;
224
225 memcpy(&u, p, 8);
226 return u;
227 }
228
229 static inline void sg_put_unaligned_be16(uint16_t val, void *p)
230 {
231 memcpy(p, &val, 2);
232 }
233
234 static inline void sg_put_unaligned_be32(uint32_t val, void *p)
235 {
236 memcpy(p, &val, 4);
237 }
238
239 static inline void sg_put_unaligned_be64(uint64_t val, void *p)
240 {
241 memcpy(p, &val, 8);
242 }
243
244 #endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
245 #endif /* #if defined __BYTE_ORDER__ && defined <byteswap.h> &&
246 * ! defined IGNORE_FAST_LEBE */
247
248
249 #ifndef GOT_UNALIGNED_SPECIALS
250
251 /* Now we have no tricks left, so use the only way this can be done
252 * correctly in C safely: lots of shifts. */
253
254 // #warning ">>>>>> Doing GENERIC unaligneds"
255
256 static inline uint16_t sg_get_unaligned_be16(const void *p)
257 {
258 return ((const uint8_t *)p)[0] << 8 | ((const uint8_t *)p)[1];
259 }
260
261 static inline uint32_t sg_get_unaligned_be32(const void *p)
262 {
263 return ((const uint8_t *)p)[0] << 24 | ((const uint8_t *)p)[1] << 16 |
264 ((const uint8_t *)p)[2] << 8 | ((const uint8_t *)p)[3];
265 }
266
267 static inline uint64_t sg_get_unaligned_be64(const void *p)
268 {
269 return (uint64_t)sg_get_unaligned_be32(p) << 32 |
270 sg_get_unaligned_be32((const uint8_t *)p + 4);
271 }
272
273 static inline void sg_put_unaligned_be16(uint16_t val, void *p)
274 {
275 ((uint8_t *)p)[0] = (uint8_t)(val >> 8);
276 ((uint8_t *)p)[1] = (uint8_t)val;
277 }
278
279 static inline void sg_put_unaligned_be32(uint32_t val, void *p)
280 {
281 sg_put_unaligned_be16(val >> 16, p);
282 sg_put_unaligned_be16(val, (uint8_t *)p + 2);
283 }
284
285 static inline void sg_put_unaligned_be64(uint64_t val, void *p)
286 {
287 sg_put_unaligned_be32(val >> 32, p);
288 sg_put_unaligned_be32(val, (uint8_t *)p + 4);
289 }
290
291
292 static inline uint16_t sg_get_unaligned_le16(const void *p)
293 {
294 return ((const uint8_t *)p)[1] << 8 | ((const uint8_t *)p)[0];
295 }
296
297 static inline uint32_t sg_get_unaligned_le32(const void *p)
298 {
299 return ((const uint8_t *)p)[3] << 24 | ((const uint8_t *)p)[2] << 16 |
300 ((const uint8_t *)p)[1] << 8 | ((const uint8_t *)p)[0];
301 }
302
303 static inline uint64_t sg_get_unaligned_le64(const void *p)
304 {
305 return (uint64_t)sg_get_unaligned_le32((const uint8_t *)p + 4) << 32 |
306 sg_get_unaligned_le32(p);
307 }
308
309 static inline void sg_put_unaligned_le16(uint16_t val, void *p)
310 {
311 ((uint8_t *)p)[0] = val & 0xff;
312 ((uint8_t *)p)[1] = val >> 8;
313 }
314
315 static inline void sg_put_unaligned_le32(uint32_t val, void *p)
316 {
317 sg_put_unaligned_le16(val >> 16, (uint8_t *)p + 2);
318 sg_put_unaligned_le16(val, p);
319 }
320
321 static inline void sg_put_unaligned_le64(uint64_t val, void *p)
322 {
323 sg_put_unaligned_le32(val >> 32, (uint8_t *)p + 4);
324 sg_put_unaligned_le32(val, p);
325 }
326
327 #endif /* #ifndef GOT_UNALIGNED_SPECIALS */
328
329 /* Following are lesser used conversions that don't have specializations
330 * for endianness; big endian first. In summary these are the 24, 48 bit and
331 * given-length conversions plus the "nz" conditional put conversions. */
332
333 /* Now big endian, get 24+48 then put 24+48 */
334 static inline uint32_t sg_get_unaligned_be24(const void *p)
335 {
336 return ((const uint8_t *)p)[0] << 16 | ((const uint8_t *)p)[1] << 8 |
337 ((const uint8_t *)p)[2];
338 }
339
340 /* Assume 48 bit value placed in uint64_t */
341 static inline uint64_t sg_get_unaligned_be48(const void *p)
342 {
343 return (uint64_t)sg_get_unaligned_be16(p) << 32 |
344 sg_get_unaligned_be32((const uint8_t *)p + 2);
345 }
346
347 /* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
348 * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
349 * an 8 byte unsigned integer. */
350 static inline uint64_t sg_get_unaligned_be(int num_bytes, const void *p)
351 {
352 if ((num_bytes <= 0) || (num_bytes > (int)sizeof(uint64_t)))
353 return 0;
354 else {
355 const uint8_t * xp = (const uint8_t *)p;
356 uint64_t res = *xp;
357
358 for (++xp; num_bytes > 1; ++xp, --num_bytes)
359 res = (res << 8) | *xp;
360 return res;
361 }
362 }
363
364 static inline void sg_put_unaligned_be24(uint32_t val, void *p)
365 {
366 ((uint8_t *)p)[0] = (val >> 16) & 0xff;
367 ((uint8_t *)p)[1] = (val >> 8) & 0xff;
368 ((uint8_t *)p)[2] = val & 0xff;
369 }
370
371 /* Assume 48 bit value placed in uint64_t */
372 static inline void sg_put_unaligned_be48(uint64_t val, void *p)
373 {
374 sg_put_unaligned_be16(val >> 32, p);
375 sg_put_unaligned_be32(val, (uint8_t *)p + 2);
376 }
377
378 /* Now little endian, get 24+48 then put 24+48 */
379 static inline uint32_t sg_get_unaligned_le24(const void *p)
380 {
381 return (uint32_t)sg_get_unaligned_le16(p) |
382 ((const uint8_t *)p)[2] << 16;
383 }
384
385 /* Assume 48 bit value placed in uint64_t */
386 static inline uint64_t sg_get_unaligned_le48(const void *p)
387 {
388 return (uint64_t)sg_get_unaligned_le16((const uint8_t *)p + 4) << 32 |
389 sg_get_unaligned_le32(p);
390 }
391
392 static inline void sg_put_unaligned_le24(uint32_t val, void *p)
393 {
394 ((uint8_t *)p)[2] = (val >> 16) & 0xff;
395 ((uint8_t *)p)[1] = (val >> 8) & 0xff;
396 ((uint8_t *)p)[0] = val & 0xff;
397 }
398
399 /* Assume 48 bit value placed in uint64_t */
400 static inline void sg_put_unaligned_le48(uint64_t val, void *p)
401 {
402 ((uint8_t *)p)[5] = (val >> 40) & 0xff;
403 ((uint8_t *)p)[4] = (val >> 32) & 0xff;
404 ((uint8_t *)p)[3] = (val >> 24) & 0xff;
405 ((uint8_t *)p)[2] = (val >> 16) & 0xff;
406 ((uint8_t *)p)[1] = (val >> 8) & 0xff;
407 ((uint8_t *)p)[0] = val & 0xff;
408 }
409
410 /* Returns 0 if 'num_bytes' is less than or equal to 0 or greater than
411 * 8 (i.e. sizeof(uint64_t)). Else returns result in uint64_t which is
412 * an 8 byte unsigned integer. */
413 static inline uint64_t sg_get_unaligned_le(int num_bytes, const void *p)
414 {
415 if ((num_bytes <= 0) || (num_bytes > (int)sizeof(uint64_t)))
416 return 0;
417 else {
418 const uint8_t * xp = (const uint8_t *)p + (num_bytes - 1);
419 uint64_t res = *xp;
420
421 for (--xp; num_bytes > 1; --xp, --num_bytes)
422 res = (res << 8) | *xp;
423 return res;
424 }
425 }
426
427 /* Since cdb and parameter blocks are often memset to zero before these
428 * unaligned function partially fill them, then check for a val of zero
429 * and ignore if it is with these variants. First big endian, then little */
430 static inline void sg_nz_put_unaligned_be16(uint16_t val, void *p)
431 {
432 if (val)
433 sg_put_unaligned_be16(val, p);
434 }
435
436 static inline void sg_nz_put_unaligned_be24(uint32_t val, void *p)
437 {
438 if (val) {
439 ((uint8_t *)p)[0] = (val >> 16) & 0xff;
440 ((uint8_t *)p)[1] = (val >> 8) & 0xff;
441 ((uint8_t *)p)[2] = val & 0xff;
442 }
443 }
444
445 static inline void sg_nz_put_unaligned_be32(uint32_t val, void *p)
446 {
447 if (val)
448 sg_put_unaligned_be32(val, p);
449 }
450
451 static inline void sg_nz_put_unaligned_be64(uint64_t val, void *p)
452 {
453 if (val)
454 sg_put_unaligned_be64(val, p);
455 }
456
457 static inline void sg_nz_put_unaligned_le16(uint16_t val, void *p)
458 {
459 if (val)
460 sg_put_unaligned_le16(val, p);
461 }
462
463 static inline void sg_nz_put_unaligned_le24(uint32_t val, void *p)
464 {
465 if (val) {
466 ((uint8_t *)p)[2] = (val >> 16) & 0xff;
467 ((uint8_t *)p)[1] = (val >> 8) & 0xff;
468 ((uint8_t *)p)[0] = val & 0xff;
469 }
470 }
471
472 static inline void sg_nz_put_unaligned_le32(uint32_t val, void *p)
473 {
474 if (val)
475 sg_put_unaligned_le32(val, p);
476 }
477
478 static inline void sg_nz_put_unaligned_le64(uint64_t val, void *p)
479 {
480 if (val)
481 sg_put_unaligned_le64(val, p);
482 }
483
484
485 #ifdef __cplusplus
486 }
487 #endif
488
489 #endif /* SG_UNALIGNED_H */