]> git.proxmox.com Git - mirror_edk2.git/blame - BaseTools/Source/C/BrotliCompress/enc/port.h
BaseTools: silence parentheses-equality warning
[mirror_edk2.git] / BaseTools / Source / C / BrotliCompress / enc / port.h
CommitLineData
11b7501a
SB
1/* Copyright 2013 Google Inc. All Rights Reserved.\r
2\r
3 Distributed under MIT license.\r
4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT\r
5*/\r
6\r
7/* Macros for endianness, branch prediction and unaligned loads and stores. */\r
8\r
9#ifndef BROTLI_ENC_PORT_H_\r
10#define BROTLI_ENC_PORT_H_\r
11\r
12#include <assert.h>\r
13#include <string.h> /* memcpy */\r
14\r
15#include "../common/port.h"\r
16#include "../common/types.h"\r
17\r
18#if defined OS_LINUX || defined OS_CYGWIN\r
19#include <endian.h>\r
20#elif defined OS_FREEBSD\r
21#include <machine/endian.h>\r
22#elif defined OS_MACOSX\r
23#include <machine/endian.h>\r
24/* Let's try and follow the Linux convention */\r
25#define __BYTE_ORDER BYTE_ORDER\r
26#define __LITTLE_ENDIAN LITTLE_ENDIAN\r
27#endif\r
28\r
29/* define the macro IS_LITTLE_ENDIAN\r
30 using the above endian definitions from endian.h if\r
31 endian.h was included */\r
32#ifdef __BYTE_ORDER\r
33#if __BYTE_ORDER == __LITTLE_ENDIAN\r
34#define IS_LITTLE_ENDIAN\r
35#endif\r
36\r
37#else\r
38\r
39#if defined(__LITTLE_ENDIAN__)\r
40#define IS_LITTLE_ENDIAN\r
41#endif\r
42#endif /* __BYTE_ORDER */\r
43\r
44#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)\r
45#define IS_LITTLE_ENDIAN\r
46#endif\r
47\r
48/* Enable little-endian optimization for x64 architecture on Windows. */\r
49#if (defined(_WIN32) || defined(_WIN64)) && defined(_M_X64)\r
50#define IS_LITTLE_ENDIAN\r
51#endif\r
52\r
53/* Portable handling of unaligned loads, stores, and copies.\r
54 On some platforms, like ARM, the copy functions can be more efficient\r
55 then a load and a store. */\r
56\r
57#if defined(ARCH_PIII) || \\r
58 defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)\r
59\r
60/* x86 and x86-64 can perform unaligned loads/stores directly;\r
61 modern PowerPC hardware can also do unaligned integer loads and stores;\r
62 but note: the FPU still sends unaligned loads and stores to a trap handler!\r
63*/\r
64\r
65#define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))\r
66#define BROTLI_UNALIGNED_LOAD64(_p) (*(const uint64_t *)(_p))\r
67\r
68#define BROTLI_UNALIGNED_STORE32(_p, _val) \\r
69 (*(uint32_t *)(_p) = (_val))\r
70#define BROTLI_UNALIGNED_STORE64(_p, _val) \\r
71 (*(uint64_t *)(_p) = (_val))\r
72\r
73#elif defined(__arm__) && \\r
74 !defined(__ARM_ARCH_5__) && \\r
75 !defined(__ARM_ARCH_5T__) && \\r
76 !defined(__ARM_ARCH_5TE__) && \\r
77 !defined(__ARM_ARCH_5TEJ__) && \\r
78 !defined(__ARM_ARCH_6__) && \\r
79 !defined(__ARM_ARCH_6J__) && \\r
80 !defined(__ARM_ARCH_6K__) && \\r
81 !defined(__ARM_ARCH_6Z__) && \\r
82 !defined(__ARM_ARCH_6ZK__) && \\r
83 !defined(__ARM_ARCH_6T2__)\r
84\r
85/* ARMv7 and newer support native unaligned accesses, but only of 16-bit\r
86 and 32-bit values (not 64-bit); older versions either raise a fatal signal,\r
87 do an unaligned read and rotate the words around a bit, or do the reads very\r
88 slowly (trip through kernel mode). */\r
89\r
90#define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))\r
91#define BROTLI_UNALIGNED_STORE32(_p, _val) \\r
92 (*(uint32_t *)(_p) = (_val))\r
93\r
94static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {\r
95 uint64_t t;\r
96 memcpy(&t, p, sizeof t);\r
97 return t;\r
98}\r
99\r
100static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {\r
101 memcpy(p, &v, sizeof v);\r
102}\r
103\r
104#else\r
105\r
106/* These functions are provided for architectures that don't support */\r
107/* unaligned loads and stores. */\r
108\r
109static BROTLI_INLINE uint32_t BROTLI_UNALIGNED_LOAD32(const void *p) {\r
110 uint32_t t;\r
111 memcpy(&t, p, sizeof t);\r
112 return t;\r
113}\r
114\r
115static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {\r
116 uint64_t t;\r
117 memcpy(&t, p, sizeof t);\r
118 return t;\r
119}\r
120\r
121static BROTLI_INLINE void BROTLI_UNALIGNED_STORE32(void *p, uint32_t v) {\r
122 memcpy(p, &v, sizeof v);\r
123}\r
124\r
125static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {\r
126 memcpy(p, &v, sizeof v);\r
127}\r
128\r
129#endif\r
130\r
131#if !defined(__cplusplus) && !defined(c_plusplus) && __STDC_VERSION__ >= 199901L\r
132#define BROTLI_RESTRICT restrict\r
133#elif BROTLI_GCC_VERSION > 295 || defined(__llvm__)\r
134#define BROTLI_RESTRICT __restrict\r
135#else\r
136#define BROTLI_RESTRICT\r
137#endif\r
138\r
139#define _TEMPLATE(T) \\r
140 static BROTLI_INLINE T brotli_min_ ## T (T a, T b) { return a < b ? a : b; } \\r
141 static BROTLI_INLINE T brotli_max_ ## T (T a, T b) { return a > b ? a : b; }\r
142_TEMPLATE(double) _TEMPLATE(float) _TEMPLATE(int)\r
143_TEMPLATE(size_t) _TEMPLATE(uint32_t) _TEMPLATE(uint8_t)\r
144#undef _TEMPLATE\r
145#define BROTLI_MIN(T, A, B) (brotli_min_ ## T((A), (B)))\r
146#define BROTLI_MAX(T, A, B) (brotli_max_ ## T((A), (B)))\r
147\r
148#define BROTLI_SWAP(T, A, I, J) { \\r
149 T __brotli_swap_tmp = (A)[(I)]; \\r
150 (A)[(I)] = (A)[(J)]; \\r
151 (A)[(J)] = __brotli_swap_tmp; \\r
152}\r
153\r
154#define BROTLI_ENSURE_CAPACITY(M, T, A, C, R) { \\r
155 if (C < (R)) { \\r
156 size_t _new_size = (C == 0) ? (R) : C; \\r
157 T* new_array; \\r
158 while (_new_size < (R)) _new_size *= 2; \\r
159 new_array = BROTLI_ALLOC((M), T, _new_size); \\r
160 if (!BROTLI_IS_OOM(m) && C != 0) \\r
161 memcpy(new_array, A, C * sizeof(T)); \\r
162 BROTLI_FREE((M), A); \\r
163 A = new_array; \\r
164 C = _new_size; \\r
165 } \\r
166}\r
167\r
168#endif /* BROTLI_ENC_PORT_H_ */\r