]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) | |
2 | * | |
3 | * Copyright 2011 Freescale Semiconductor, Inc. | |
4 | * All rights reserved. | |
5 | * Copyright 2019 NXP | |
6 | * | |
7 | */ | |
8 | ||
9 | #ifndef __COMPAT_H | |
10 | #define __COMPAT_H | |
11 | ||
12 | #include <sched.h> | |
13 | ||
14 | #ifndef _GNU_SOURCE | |
15 | #define _GNU_SOURCE | |
16 | #endif | |
17 | #include <stdint.h> | |
18 | #include <stdlib.h> | |
19 | #include <stddef.h> | |
20 | #include <stdio.h> | |
21 | #include <errno.h> | |
22 | #include <string.h> | |
23 | #include <pthread.h> | |
24 | #include <linux/types.h> | |
25 | #include <stdbool.h> | |
26 | #include <ctype.h> | |
27 | #include <malloc.h> | |
28 | #include <sys/types.h> | |
29 | #include <sys/stat.h> | |
30 | #include <fcntl.h> | |
31 | #include <unistd.h> | |
32 | #include <sys/mman.h> | |
33 | #include <limits.h> | |
34 | #include <assert.h> | |
35 | #include <dirent.h> | |
36 | #include <inttypes.h> | |
37 | #include <error.h> | |
38 | #include <rte_byteorder.h> | |
39 | #include <rte_atomic.h> | |
40 | #include <rte_spinlock.h> | |
41 | #include <rte_common.h> | |
42 | #include <rte_debug.h> | |
43 | #include <rte_cycles.h> | |
44 | #include <rte_malloc.h> | |
45 | ||
46 | /* The following definitions are primarily to allow the single-source driver | |
47 | * interfaces to be included by arbitrary program code. Ie. for interfaces that | |
48 | * are also available in kernel-space, these definitions provide compatibility | |
49 | * with certain attributes and types used in those interfaces. | |
50 | */ | |
51 | ||
52 | /* Required compiler attributes */ | |
53 | #ifndef __maybe_unused | |
54 | #define __maybe_unused __rte_unused | |
55 | #endif | |
56 | #ifndef __always_unused | |
57 | #define __always_unused __rte_unused | |
58 | #endif | |
59 | #ifndef __packed | |
60 | #define __packed __rte_packed | |
61 | #endif | |
62 | #ifndef noinline | |
63 | #define noinline __rte_noinline | |
64 | #endif | |
65 | #define L1_CACHE_BYTES 64 | |
66 | #define ____cacheline_aligned __rte_aligned(L1_CACHE_BYTES) | |
67 | #define __stringify_1(x) #x | |
68 | #define __stringify(x) __stringify_1(x) | |
69 | ||
70 | #ifdef ARRAY_SIZE | |
71 | #undef ARRAY_SIZE | |
72 | #endif | |
73 | #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) | |
74 | ||
75 | /* Debugging */ | |
76 | #define prflush(fmt, args...) \ | |
77 | do { \ | |
78 | printf(fmt, ##args); \ | |
79 | fflush(stdout); \ | |
80 | } while (0) | |
81 | #ifndef pr_crit | |
82 | #define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args) | |
83 | #endif | |
84 | #ifndef pr_err | |
85 | #define pr_err(fmt, args...) prflush("ERR:" fmt, ##args) | |
86 | #endif | |
87 | #ifndef pr_warn | |
88 | #define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args) | |
89 | #endif | |
90 | #ifndef pr_info | |
91 | #define pr_info(fmt, args...) prflush(fmt, ##args) | |
92 | #endif | |
93 | #ifndef pr_debug | |
94 | #ifdef RTE_LIBRTE_DPAA_DEBUG_BUS | |
95 | #define pr_debug(fmt, args...) printf(fmt, ##args) | |
96 | #else | |
97 | #define pr_debug(fmt, args...) {} | |
98 | #endif | |
99 | #endif | |
100 | ||
101 | #define DPAA_BUG_ON(x) RTE_ASSERT(x) | |
102 | ||
103 | /* Required types */ | |
104 | typedef uint8_t u8; | |
105 | typedef uint16_t u16; | |
106 | typedef uint32_t u32; | |
107 | typedef uint64_t u64; | |
108 | typedef uint64_t dma_addr_t; | |
109 | typedef cpu_set_t cpumask_t; | |
110 | typedef uint32_t phandle; | |
111 | typedef uint32_t gfp_t; | |
112 | typedef uint32_t irqreturn_t; | |
113 | ||
114 | #define ETHER_ADDR_LEN 6 | |
115 | ||
116 | #define IRQ_HANDLED 0 | |
117 | #define request_irq qbman_request_irq | |
118 | #define free_irq qbman_free_irq | |
119 | ||
120 | #define __iomem | |
121 | #define GFP_KERNEL 0 | |
122 | #define __raw_readb(p) (*(const volatile unsigned char *)(p)) | |
123 | #define __raw_readl(p) (*(const volatile unsigned int *)(p)) | |
124 | #define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); } | |
125 | ||
126 | /* to be used as an upper-limit only */ | |
127 | #define NR_CPUS 64 | |
128 | ||
129 | /* Waitqueue stuff */ | |
130 | typedef struct { } wait_queue_head_t; | |
131 | #define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused | |
132 | #define wake_up(x) do { } while (0) | |
133 | ||
134 | /* I/O operations */ | |
135 | static inline u32 in_be32(volatile void *__p) | |
136 | { | |
137 | volatile u32 *p = __p; | |
138 | return rte_be_to_cpu_32(*p); | |
139 | } | |
140 | ||
141 | static inline void out_be32(volatile void *__p, u32 val) | |
142 | { | |
143 | volatile u32 *p = __p; | |
144 | *p = rte_cpu_to_be_32(val); | |
145 | } | |
146 | ||
147 | #define hwsync() rte_rmb() | |
148 | #define lwsync() rte_wmb() | |
149 | ||
150 | #define dcbt_ro(p) __builtin_prefetch(p, 0) | |
151 | #define dcbt_rw(p) __builtin_prefetch(p, 1) | |
152 | ||
153 | #if defined(RTE_ARCH_ARM64) | |
154 | #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } | |
155 | #define dcbz_64(p) dcbz(p) | |
156 | #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); } | |
157 | #define dcbf_64(p) dcbf(p) | |
158 | #define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); } | |
159 | ||
160 | #define dcbit_ro(p) \ | |
161 | do { \ | |
162 | dccivac(p); \ | |
163 | asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \ | |
164 | } while (0) | |
165 | ||
166 | #elif defined(RTE_ARCH_ARM) | |
167 | #define dcbz(p) memset((p), 0, 32) | |
168 | #define dcbz_64(p) memset((p), 0, 64) | |
169 | #define dcbf(p) RTE_SET_USED(p) | |
170 | #define dcbf_64(p) dcbf(p) | |
171 | #define dccivac(p) RTE_SET_USED(p) | |
172 | #define dcbit_ro(p) RTE_SET_USED(p) | |
173 | ||
174 | #else | |
175 | #define dcbz(p) RTE_SET_USED(p) | |
176 | #define dcbz_64(p) dcbz(p) | |
177 | #define dcbf(p) RTE_SET_USED(p) | |
178 | #define dcbf_64(p) dcbf(p) | |
179 | #define dccivac(p) RTE_SET_USED(p) | |
180 | #define dcbit_ro(p) RTE_SET_USED(p) | |
181 | #endif | |
182 | ||
183 | #define barrier() { asm volatile ("" : : : "memory"); } | |
184 | #define cpu_relax barrier | |
185 | ||
186 | #if defined(RTE_ARCH_ARM64) | |
187 | static inline uint64_t mfatb(void) | |
188 | { | |
189 | uint64_t ret, ret_new, timeout = 200; | |
190 | ||
191 | asm volatile ("mrs %0, cntvct_el0" : "=r" (ret)); | |
192 | asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new)); | |
193 | while (ret != ret_new && timeout--) { | |
194 | ret = ret_new; | |
195 | asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new)); | |
196 | } | |
197 | DPAA_BUG_ON(!timeout && (ret != ret_new)); | |
198 | return ret * 64; | |
199 | } | |
200 | #else | |
201 | ||
202 | #define mfatb rte_rdtsc | |
203 | ||
204 | #endif | |
205 | ||
206 | /* Spin for a few cycles without bothering the bus */ | |
207 | static inline void cpu_spin(int cycles) | |
208 | { | |
209 | uint64_t now = mfatb(); | |
210 | ||
211 | while (mfatb() < (now + cycles)) | |
212 | ; | |
213 | } | |
214 | ||
215 | /* Qman/Bman API inlines and macros; */ | |
216 | #ifdef lower_32_bits | |
217 | #undef lower_32_bits | |
218 | #endif | |
219 | #define lower_32_bits(x) ((u32)(x)) | |
220 | ||
221 | #ifdef upper_32_bits | |
222 | #undef upper_32_bits | |
223 | #endif | |
224 | #define upper_32_bits(x) ((u32)(((x) >> 16) >> 16)) | |
225 | ||
226 | /* | |
227 | * Swap bytes of a 48-bit value. | |
228 | */ | |
229 | static inline uint64_t | |
230 | __bswap_48(uint64_t x) | |
231 | { | |
232 | return ((x & 0x0000000000ffULL) << 40) | | |
233 | ((x & 0x00000000ff00ULL) << 24) | | |
234 | ((x & 0x000000ff0000ULL) << 8) | | |
235 | ((x & 0x0000ff000000ULL) >> 8) | | |
236 | ((x & 0x00ff00000000ULL) >> 24) | | |
237 | ((x & 0xff0000000000ULL) >> 40); | |
238 | } | |
239 | ||
240 | /* | |
241 | * Swap bytes of a 40-bit value. | |
242 | */ | |
243 | static inline uint64_t | |
244 | __bswap_40(uint64_t x) | |
245 | { | |
246 | return ((x & 0x00000000ffULL) << 32) | | |
247 | ((x & 0x000000ff00ULL) << 16) | | |
248 | ((x & 0x0000ff0000ULL)) | | |
249 | ((x & 0x00ff000000ULL) >> 16) | | |
250 | ((x & 0xff00000000ULL) >> 32); | |
251 | } | |
252 | ||
253 | /* | |
254 | * Swap bytes of a 24-bit value. | |
255 | */ | |
256 | static inline uint32_t | |
257 | __bswap_24(uint32_t x) | |
258 | { | |
259 | return ((x & 0x0000ffULL) << 16) | | |
260 | ((x & 0x00ff00ULL)) | | |
261 | ((x & 0xff0000ULL) >> 16); | |
262 | } | |
263 | ||
264 | #define be64_to_cpu(x) rte_be_to_cpu_64(x) | |
265 | #define be32_to_cpu(x) rte_be_to_cpu_32(x) | |
266 | #define be16_to_cpu(x) rte_be_to_cpu_16(x) | |
267 | ||
268 | #define cpu_to_be64(x) rte_cpu_to_be_64(x) | |
269 | #if !defined(cpu_to_be32) | |
270 | #define cpu_to_be32(x) rte_cpu_to_be_32(x) | |
271 | #endif | |
272 | #define cpu_to_be16(x) rte_cpu_to_be_16(x) | |
273 | ||
274 | #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN | |
275 | ||
276 | #define cpu_to_be48(x) __bswap_48(x) | |
277 | #define be48_to_cpu(x) __bswap_48(x) | |
278 | ||
279 | #define cpu_to_be40(x) __bswap_40(x) | |
280 | #define be40_to_cpu(x) __bswap_40(x) | |
281 | ||
282 | #define cpu_to_be24(x) __bswap_24(x) | |
283 | #define be24_to_cpu(x) __bswap_24(x) | |
284 | ||
285 | #else /* RTE_BIG_ENDIAN */ | |
286 | ||
287 | #define cpu_to_be48(x) (x) | |
288 | #define be48_to_cpu(x) (x) | |
289 | ||
290 | #define cpu_to_be40(x) (x) | |
291 | #define be40_to_cpu(x) (x) | |
292 | ||
293 | #define cpu_to_be24(x) (x) | |
294 | #define be24_to_cpu(x) (x) | |
295 | ||
296 | #endif /* RTE_BIG_ENDIAN */ | |
297 | ||
298 | /* When copying aligned words or shorts, try to avoid memcpy() */ | |
299 | /* memcpy() stuff - when you know alignments in advance */ | |
300 | #define CONFIG_TRY_BETTER_MEMCPY | |
301 | ||
302 | #ifdef CONFIG_TRY_BETTER_MEMCPY | |
303 | static inline void copy_words(void *dest, const void *src, size_t sz) | |
304 | { | |
305 | u32 *__dest = dest; | |
306 | const u32 *__src = src; | |
307 | size_t __sz = sz >> 2; | |
308 | ||
309 | DPAA_BUG_ON((unsigned long)dest & 0x3); | |
310 | DPAA_BUG_ON((unsigned long)src & 0x3); | |
311 | DPAA_BUG_ON(sz & 0x3); | |
312 | while (__sz--) | |
313 | *(__dest++) = *(__src++); | |
314 | } | |
315 | ||
316 | static inline void copy_shorts(void *dest, const void *src, size_t sz) | |
317 | { | |
318 | u16 *__dest = dest; | |
319 | const u16 *__src = src; | |
320 | size_t __sz = sz >> 1; | |
321 | ||
322 | DPAA_BUG_ON((unsigned long)dest & 0x1); | |
323 | DPAA_BUG_ON((unsigned long)src & 0x1); | |
324 | DPAA_BUG_ON(sz & 0x1); | |
325 | while (__sz--) | |
326 | *(__dest++) = *(__src++); | |
327 | } | |
328 | ||
329 | static inline void copy_bytes(void *dest, const void *src, size_t sz) | |
330 | { | |
331 | u8 *__dest = dest; | |
332 | const u8 *__src = src; | |
333 | ||
334 | while (sz--) | |
335 | *(__dest++) = *(__src++); | |
336 | } | |
337 | #else | |
338 | #define copy_words memcpy | |
339 | #define copy_shorts memcpy | |
340 | #define copy_bytes memcpy | |
341 | #endif | |
342 | ||
343 | /* Allocator stuff */ | |
344 | #define kmalloc(sz, t) rte_malloc(NULL, sz, 0) | |
345 | #define vmalloc(sz) rte_malloc(NULL, sz, 0) | |
346 | #define kfree(p) { if (p) rte_free(p); } | |
347 | static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused) | |
348 | { | |
349 | void *ptr = rte_malloc(NULL, sz, 0); | |
350 | ||
351 | if (ptr) | |
352 | memset(ptr, 0, sz); | |
353 | return ptr; | |
354 | } | |
355 | ||
356 | static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused) | |
357 | { | |
358 | void *p; | |
359 | ||
360 | if (posix_memalign(&p, 4096, 4096)) | |
361 | return 0; | |
362 | memset(p, 0, 4096); | |
363 | return (unsigned long)p; | |
364 | } | |
365 | ||
366 | /* Spinlock stuff */ | |
367 | #define spinlock_t rte_spinlock_t | |
368 | #define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER | |
369 | #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) | |
370 | #define spin_lock_init(x) rte_spinlock_init(x) | |
371 | #define spin_lock_destroy(x) | |
372 | #define spin_lock(x) rte_spinlock_lock(x) | |
373 | #define spin_unlock(x) rte_spinlock_unlock(x) | |
374 | #define spin_lock_irq(x) spin_lock(x) | |
375 | #define spin_unlock_irq(x) spin_unlock(x) | |
376 | #define spin_lock_irqsave(x, f) spin_lock_irq(x) | |
377 | #define spin_unlock_irqrestore(x, f) spin_unlock_irq(x) | |
378 | ||
379 | #define atomic_t rte_atomic32_t | |
380 | #define atomic_read(v) rte_atomic32_read(v) | |
381 | #define atomic_set(v, i) rte_atomic32_set(v, i) | |
382 | ||
383 | #define atomic_inc(v) rte_atomic32_add(v, 1) | |
384 | #define atomic_dec(v) rte_atomic32_sub(v, 1) | |
385 | ||
386 | #define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v) | |
387 | #define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v) | |
388 | ||
389 | #define atomic_inc_return(v) rte_atomic32_add_return(v, 1) | |
390 | #define atomic_dec_return(v) rte_atomic32_sub_return(v, 1) | |
391 | #define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0) | |
392 | ||
393 | #endif /* __COMPAT_H */ |