]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/sfc/efsys.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / sfc / efsys.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
4 * All rights reserved.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 #ifndef _SFC_COMMON_EFSYS_H
11 #define _SFC_COMMON_EFSYS_H
12
13 #include <stdbool.h>
14
15 #include <rte_spinlock.h>
16 #include <rte_byteorder.h>
17 #include <rte_debug.h>
18 #include <rte_memzone.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
21 #include <rte_cycles.h>
22 #include <rte_prefetch.h>
23 #include <rte_common.h>
24 #include <rte_malloc.h>
25 #include <rte_log.h>
26 #include <rte_io.h>
27
28 #include "sfc_debug.h"
29 #include "sfc_log.h"
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 #define EFSYS_HAS_UINT64 1
36 #define EFSYS_USE_UINT64 1
37 #define EFSYS_HAS_SSE2_M128 1
38
39 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
40 #define EFSYS_IS_BIG_ENDIAN 1
41 #define EFSYS_IS_LITTLE_ENDIAN 0
42 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
43 #define EFSYS_IS_BIG_ENDIAN 0
44 #define EFSYS_IS_LITTLE_ENDIAN 1
45 #else
46 #error "Cannot determine system endianness"
47 #endif
48 #include "efx_types.h"
49
50
51 typedef bool boolean_t;
52
53 #ifndef B_FALSE
54 #define B_FALSE false
55 #endif
56 #ifndef B_TRUE
57 #define B_TRUE true
58 #endif
59
60 /*
61 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
62 * expression allowed only inside a function, but MAX() is used as
63 * a number of elements in array.
64 */
65 #ifndef MAX
66 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
67 #endif
68 #ifndef MIN
69 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
70 #endif
71
72 /* There are macros for alignment in DPDK, but we need to make a proper
73 * correspondence here, if we want to re-use them at all
74 */
75 #ifndef IS_P2ALIGNED
76 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
77 #endif
78
79 #ifndef P2ROUNDUP
80 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
81 #endif
82
83 #ifndef P2ALIGN
84 #define P2ALIGN(_x, _a) ((_x) & -(_a))
85 #endif
86
87 #ifndef ISP2
88 #define ISP2(x) rte_is_power_of_2(x)
89 #endif
90
91 #define ENOTACTIVE ENOTCONN
92
93 static inline void
94 prefetch_read_many(const volatile void *addr)
95 {
96 rte_prefetch0(addr);
97 }
98
99 static inline void
100 prefetch_read_once(const volatile void *addr)
101 {
102 rte_prefetch_non_temporal(addr);
103 }
104
105 /* Code inclusion options */
106
107
108 #define EFSYS_OPT_NAMES 1
109
110 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
111 #define EFSYS_OPT_SIENA 0
112 /* Enable SFN7xxx support */
113 #define EFSYS_OPT_HUNTINGTON 1
114 /* Enable SFN8xxx support */
115 #define EFSYS_OPT_MEDFORD 1
116 /* Enable SFN2xxx support */
117 #define EFSYS_OPT_MEDFORD2 1
118 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
119 #define EFSYS_OPT_CHECK_REG 1
120 #else
121 #define EFSYS_OPT_CHECK_REG 0
122 #endif
123
124 /* MCDI is required for SFN7xxx and SFN8xx */
125 #define EFSYS_OPT_MCDI 1
126 #define EFSYS_OPT_MCDI_LOGGING 1
127 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
128
129 #define EFSYS_OPT_MAC_STATS 1
130
131 #define EFSYS_OPT_LOOPBACK 1
132
133 #define EFSYS_OPT_MON_MCDI 0
134 #define EFSYS_OPT_MON_STATS 0
135
136 #define EFSYS_OPT_PHY_STATS 0
137 #define EFSYS_OPT_BIST 0
138 #define EFSYS_OPT_PHY_LED_CONTROL 0
139 #define EFSYS_OPT_PHY_FLAGS 0
140
141 #define EFSYS_OPT_VPD 0
142 #define EFSYS_OPT_NVRAM 0
143 #define EFSYS_OPT_BOOTCFG 0
144 #define EFSYS_OPT_IMAGE_LAYOUT 0
145
146 #define EFSYS_OPT_DIAG 0
147 #define EFSYS_OPT_RX_SCALE 1
148 #define EFSYS_OPT_QSTATS 0
149 /* Filters support is required for SFN7xxx and SFN8xx */
150 #define EFSYS_OPT_FILTER 1
151 #define EFSYS_OPT_RX_SCATTER 0
152
153 #define EFSYS_OPT_EV_PREFETCH 0
154
155 #define EFSYS_OPT_DECODE_INTR_FATAL 0
156
157 #define EFSYS_OPT_LICENSING 0
158
159 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
160
161 #define EFSYS_OPT_RX_PACKED_STREAM 0
162
163 #define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
164
165 #define EFSYS_OPT_TUNNEL 1
166
167 #define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
168
169 /* ID */
170
171 typedef struct __efsys_identifier_s efsys_identifier_t;
172
173
174 #define EFSYS_PROBE(_name) \
175 do { } while (0)
176
177 #define EFSYS_PROBE1(_name, _type1, _arg1) \
178 do { } while (0)
179
180 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
181 do { } while (0)
182
183 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
184 _type3, _arg3) \
185 do { } while (0)
186
187 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
188 _type3, _arg3, _type4, _arg4) \
189 do { } while (0)
190
191 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
192 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
193 do { } while (0)
194
195 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
196 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
197 _type6, _arg6) \
198 do { } while (0)
199
200 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
201 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
202 _type6, _arg6, _type7, _arg7) \
203 do { } while (0)
204
205
206 /* DMA */
207
208 typedef rte_iova_t efsys_dma_addr_t;
209
210 typedef struct efsys_mem_s {
211 const struct rte_memzone *esm_mz;
212 /*
213 * Ideally it should have volatile qualifier to denote that
214 * the memory may be updated by someone else. However, it adds
215 * qualifier discard warnings when the pointer or its derivative
216 * is passed to memset() or rte_mov16().
217 * So, skip the qualifier here, but make sure that it is added
218 * below in access macros.
219 */
220 void *esm_base;
221 efsys_dma_addr_t esm_addr;
222 } efsys_mem_t;
223
224
225 #define EFSYS_MEM_ZERO(_esmp, _size) \
226 do { \
227 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
228 \
229 _NOTE(CONSTANTCONDITION); \
230 } while (B_FALSE)
231
232 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
233 do { \
234 volatile uint8_t *_base = (_esmp)->esm_base; \
235 volatile uint32_t *_addr; \
236 \
237 _NOTE(CONSTANTCONDITION); \
238 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
239 \
240 _addr = (volatile uint32_t *)(_base + (_offset)); \
241 (_edp)->ed_u32[0] = _addr[0]; \
242 \
243 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
244 uint32_t, (_edp)->ed_u32[0]); \
245 \
246 _NOTE(CONSTANTCONDITION); \
247 } while (B_FALSE)
248
249 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
250 do { \
251 volatile uint8_t *_base = (_esmp)->esm_base; \
252 volatile uint64_t *_addr; \
253 \
254 _NOTE(CONSTANTCONDITION); \
255 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
256 \
257 _addr = (volatile uint64_t *)(_base + (_offset)); \
258 (_eqp)->eq_u64[0] = _addr[0]; \
259 \
260 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
261 uint32_t, (_eqp)->eq_u32[1], \
262 uint32_t, (_eqp)->eq_u32[0]); \
263 \
264 _NOTE(CONSTANTCONDITION); \
265 } while (B_FALSE)
266
267 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
268 do { \
269 volatile uint8_t *_base = (_esmp)->esm_base; \
270 volatile __m128i *_addr; \
271 \
272 _NOTE(CONSTANTCONDITION); \
273 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
274 \
275 _addr = (volatile __m128i *)(_base + (_offset)); \
276 (_eop)->eo_u128[0] = _addr[0]; \
277 \
278 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
279 uint32_t, (_eop)->eo_u32[3], \
280 uint32_t, (_eop)->eo_u32[2], \
281 uint32_t, (_eop)->eo_u32[1], \
282 uint32_t, (_eop)->eo_u32[0]); \
283 \
284 _NOTE(CONSTANTCONDITION); \
285 } while (B_FALSE)
286
287
288 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
289 do { \
290 volatile uint8_t *_base = (_esmp)->esm_base; \
291 volatile uint32_t *_addr; \
292 \
293 _NOTE(CONSTANTCONDITION); \
294 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
295 \
296 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
297 uint32_t, (_edp)->ed_u32[0]); \
298 \
299 _addr = (volatile uint32_t *)(_base + (_offset)); \
300 _addr[0] = (_edp)->ed_u32[0]; \
301 \
302 _NOTE(CONSTANTCONDITION); \
303 } while (B_FALSE)
304
305 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
306 do { \
307 volatile uint8_t *_base = (_esmp)->esm_base; \
308 volatile uint64_t *_addr; \
309 \
310 _NOTE(CONSTANTCONDITION); \
311 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
312 \
313 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
314 uint32_t, (_eqp)->eq_u32[1], \
315 uint32_t, (_eqp)->eq_u32[0]); \
316 \
317 _addr = (volatile uint64_t *)(_base + (_offset)); \
318 _addr[0] = (_eqp)->eq_u64[0]; \
319 \
320 _NOTE(CONSTANTCONDITION); \
321 } while (B_FALSE)
322
323 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
324 do { \
325 volatile uint8_t *_base = (_esmp)->esm_base; \
326 volatile __m128i *_addr; \
327 \
328 _NOTE(CONSTANTCONDITION); \
329 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
330 \
331 \
332 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
333 uint32_t, (_eop)->eo_u32[3], \
334 uint32_t, (_eop)->eo_u32[2], \
335 uint32_t, (_eop)->eo_u32[1], \
336 uint32_t, (_eop)->eo_u32[0]); \
337 \
338 _addr = (volatile __m128i *)(_base + (_offset)); \
339 _addr[0] = (_eop)->eo_u128[0]; \
340 \
341 _NOTE(CONSTANTCONDITION); \
342 } while (B_FALSE)
343
344
345 #define EFSYS_MEM_SIZE(_esmp) \
346 ((_esmp)->esm_mz->len)
347
348 #define EFSYS_MEM_ADDR(_esmp) \
349 ((_esmp)->esm_addr)
350
351 #define EFSYS_MEM_IS_NULL(_esmp) \
352 ((_esmp)->esm_base == NULL)
353
354 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
355 do { \
356 volatile uint8_t *_base = (_esmp)->esm_base; \
357 \
358 rte_prefetch0(_base + (_offset)); \
359 } while (0)
360
361
362 /* BAR */
363
364 typedef struct efsys_bar_s {
365 rte_spinlock_t esb_lock;
366 int esb_rid;
367 struct rte_pci_device *esb_dev;
368 /*
369 * Ideally it should have volatile qualifier to denote that
370 * the memory may be updated by someone else. However, it adds
371 * qualifier discard warnings when the pointer or its derivative
372 * is passed to memset() or rte_mov16().
373 * So, skip the qualifier here, but make sure that it is added
374 * below in access macros.
375 */
376 void *esb_base;
377 } efsys_bar_t;
378
379 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
380 do { \
381 rte_spinlock_init(&(_esbp)->esb_lock); \
382 _NOTE(CONSTANTCONDITION); \
383 } while (B_FALSE)
384 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
385 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
386 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
387
388 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
389 do { \
390 volatile uint8_t *_base = (_esbp)->esb_base; \
391 volatile uint32_t *_addr; \
392 \
393 _NOTE(CONSTANTCONDITION); \
394 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
395 _NOTE(CONSTANTCONDITION); \
396 if (_lock) \
397 SFC_BAR_LOCK(_esbp); \
398 \
399 _addr = (volatile uint32_t *)(_base + (_offset)); \
400 rte_rmb(); \
401 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
402 \
403 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
404 uint32_t, (_edp)->ed_u32[0]); \
405 \
406 _NOTE(CONSTANTCONDITION); \
407 if (_lock) \
408 SFC_BAR_UNLOCK(_esbp); \
409 _NOTE(CONSTANTCONDITION); \
410 } while (B_FALSE)
411
412 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
413 do { \
414 volatile uint8_t *_base = (_esbp)->esb_base; \
415 volatile uint64_t *_addr; \
416 \
417 _NOTE(CONSTANTCONDITION); \
418 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
419 \
420 SFC_BAR_LOCK(_esbp); \
421 \
422 _addr = (volatile uint64_t *)(_base + (_offset)); \
423 rte_rmb(); \
424 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
425 \
426 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
427 uint32_t, (_eqp)->eq_u32[1], \
428 uint32_t, (_eqp)->eq_u32[0]); \
429 \
430 SFC_BAR_UNLOCK(_esbp); \
431 _NOTE(CONSTANTCONDITION); \
432 } while (B_FALSE)
433
434 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
435 do { \
436 volatile uint8_t *_base = (_esbp)->esb_base; \
437 volatile __m128i *_addr; \
438 \
439 _NOTE(CONSTANTCONDITION); \
440 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
441 \
442 _NOTE(CONSTANTCONDITION); \
443 if (_lock) \
444 SFC_BAR_LOCK(_esbp); \
445 \
446 _addr = (volatile __m128i *)(_base + (_offset)); \
447 rte_rmb(); \
448 /* There is no rte_read128_relaxed() yet */ \
449 (_eop)->eo_u128[0] = _addr[0]; \
450 \
451 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
452 uint32_t, (_eop)->eo_u32[3], \
453 uint32_t, (_eop)->eo_u32[2], \
454 uint32_t, (_eop)->eo_u32[1], \
455 uint32_t, (_eop)->eo_u32[0]); \
456 \
457 _NOTE(CONSTANTCONDITION); \
458 if (_lock) \
459 SFC_BAR_UNLOCK(_esbp); \
460 _NOTE(CONSTANTCONDITION); \
461 } while (B_FALSE)
462
463
464 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
465 do { \
466 volatile uint8_t *_base = (_esbp)->esb_base; \
467 volatile uint32_t *_addr; \
468 \
469 _NOTE(CONSTANTCONDITION); \
470 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
471 \
472 _NOTE(CONSTANTCONDITION); \
473 if (_lock) \
474 SFC_BAR_LOCK(_esbp); \
475 \
476 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
477 uint32_t, (_edp)->ed_u32[0]); \
478 \
479 _addr = (volatile uint32_t *)(_base + (_offset)); \
480 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
481 rte_wmb(); \
482 \
483 _NOTE(CONSTANTCONDITION); \
484 if (_lock) \
485 SFC_BAR_UNLOCK(_esbp); \
486 _NOTE(CONSTANTCONDITION); \
487 } while (B_FALSE)
488
489 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
490 do { \
491 volatile uint8_t *_base = (_esbp)->esb_base; \
492 volatile uint64_t *_addr; \
493 \
494 _NOTE(CONSTANTCONDITION); \
495 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
496 \
497 SFC_BAR_LOCK(_esbp); \
498 \
499 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
500 uint32_t, (_eqp)->eq_u32[1], \
501 uint32_t, (_eqp)->eq_u32[0]); \
502 \
503 _addr = (volatile uint64_t *)(_base + (_offset)); \
504 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
505 rte_wmb(); \
506 \
507 SFC_BAR_UNLOCK(_esbp); \
508 _NOTE(CONSTANTCONDITION); \
509 } while (B_FALSE)
510
511 /*
512 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
513 * (required by PIO hardware).
514 *
515 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
516 * write-combined memory mapped to user-land, so just abort if used.
517 */
518 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
519 do { \
520 rte_panic("Write-combined BAR access not supported"); \
521 } while (B_FALSE)
522
523 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
524 do { \
525 volatile uint8_t *_base = (_esbp)->esb_base; \
526 volatile __m128i *_addr; \
527 \
528 _NOTE(CONSTANTCONDITION); \
529 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
530 \
531 _NOTE(CONSTANTCONDITION); \
532 if (_lock) \
533 SFC_BAR_LOCK(_esbp); \
534 \
535 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
536 uint32_t, (_eop)->eo_u32[3], \
537 uint32_t, (_eop)->eo_u32[2], \
538 uint32_t, (_eop)->eo_u32[1], \
539 uint32_t, (_eop)->eo_u32[0]); \
540 \
541 _addr = (volatile __m128i *)(_base + (_offset)); \
542 /* There is no rte_write128_relaxed() yet */ \
543 _addr[0] = (_eop)->eo_u128[0]; \
544 rte_wmb(); \
545 \
546 _NOTE(CONSTANTCONDITION); \
547 if (_lock) \
548 SFC_BAR_UNLOCK(_esbp); \
549 _NOTE(CONSTANTCONDITION); \
550 } while (B_FALSE)
551
552 /* Use the standard octo-word write for doorbell writes */
553 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
554 do { \
555 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
556 _NOTE(CONSTANTCONDITION); \
557 } while (B_FALSE)
558
559 /* SPIN */
560
561 #define EFSYS_SPIN(_us) \
562 do { \
563 rte_delay_us(_us); \
564 _NOTE(CONSTANTCONDITION); \
565 } while (B_FALSE)
566
567 #define EFSYS_SLEEP EFSYS_SPIN
568
569 /* BARRIERS */
570
571 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
572 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
573
574 /* DMA SYNC */
575
576 /*
577 * DPDK does not provide any DMA syncing API, and no PMD drivers
578 * have any traces of explicit DMA syncing.
579 * DMA mapping is assumed to be coherent.
580 */
581
582 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
583
584 /* Just avoid store and compiler (impliciltly) reordering */
585 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
586
587 /* TIMESTAMP */
588
589 typedef uint64_t efsys_timestamp_t;
590
591 #define EFSYS_TIMESTAMP(_usp) \
592 do { \
593 *(_usp) = rte_get_timer_cycles() * 1000000 / \
594 rte_get_timer_hz(); \
595 _NOTE(CONSTANTCONDITION); \
596 } while (B_FALSE)
597
598 /* KMEM */
599
600 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
601 do { \
602 (_esip) = (_esip); \
603 (_p) = rte_zmalloc("sfc", (_size), 0); \
604 _NOTE(CONSTANTCONDITION); \
605 } while (B_FALSE)
606
607 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
608 do { \
609 (void)(_esip); \
610 (void)(_size); \
611 rte_free((_p)); \
612 _NOTE(CONSTANTCONDITION); \
613 } while (B_FALSE)
614
615 /* LOCK */
616
617 typedef rte_spinlock_t efsys_lock_t;
618
619 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
620 rte_spinlock_init((_eslp))
621 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
622 #define SFC_EFSYS_LOCK(_eslp) \
623 rte_spinlock_lock((_eslp))
624 #define SFC_EFSYS_UNLOCK(_eslp) \
625 rte_spinlock_unlock((_eslp))
626 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
627 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
628
629 typedef int efsys_lock_state_t;
630
631 #define EFSYS_LOCK_MAGIC 0x000010c4
632
633 #define EFSYS_LOCK(_lockp, _state) \
634 do { \
635 SFC_EFSYS_LOCK(_lockp); \
636 (_state) = EFSYS_LOCK_MAGIC; \
637 _NOTE(CONSTANTCONDITION); \
638 } while (B_FALSE)
639
640 #define EFSYS_UNLOCK(_lockp, _state) \
641 do { \
642 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
643 SFC_EFSYS_UNLOCK(_lockp); \
644 _NOTE(CONSTANTCONDITION); \
645 } while (B_FALSE)
646
647 /* STAT */
648
649 typedef uint64_t efsys_stat_t;
650
651 #define EFSYS_STAT_INCR(_knp, _delta) \
652 do { \
653 *(_knp) += (_delta); \
654 _NOTE(CONSTANTCONDITION); \
655 } while (B_FALSE)
656
657 #define EFSYS_STAT_DECR(_knp, _delta) \
658 do { \
659 *(_knp) -= (_delta); \
660 _NOTE(CONSTANTCONDITION); \
661 } while (B_FALSE)
662
663 #define EFSYS_STAT_SET(_knp, _val) \
664 do { \
665 *(_knp) = (_val); \
666 _NOTE(CONSTANTCONDITION); \
667 } while (B_FALSE)
668
669 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
670 do { \
671 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
672 _NOTE(CONSTANTCONDITION); \
673 } while (B_FALSE)
674
675 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
676 do { \
677 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
678 _NOTE(CONSTANTCONDITION); \
679 } while (B_FALSE)
680
681 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
682 do { \
683 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
684 _NOTE(CONSTANTCONDITION); \
685 } while (B_FALSE)
686
687 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
688 do { \
689 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
690 _NOTE(CONSTANTCONDITION); \
691 } while (B_FALSE)
692
693 /* ERR */
694
695 #if EFSYS_OPT_DECODE_INTR_FATAL
696 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
697 do { \
698 (void)(_esip); \
699 SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
700 (_code), (_dword0), (_dword1)); \
701 _NOTE(CONSTANTCONDITION); \
702 } while (B_FALSE)
703 #endif
704
705 /* ASSERT */
706
707 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
708 * so we re-implement it here
709 */
710 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
711 #define EFSYS_ASSERT(_exp) \
712 do { \
713 if (unlikely(!(_exp))) \
714 rte_panic("line %d\tassert \"%s\" failed\n", \
715 __LINE__, (#_exp)); \
716 } while (0)
717 #else
718 #define EFSYS_ASSERT(_exp) (void)(_exp)
719 #endif
720
721 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
722
723 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
724 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
725 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
726
727 /* ROTATE */
728
729 #define EFSYS_HAS_ROTL_DWORD 0
730
731 #ifdef __cplusplus
732 }
733 #endif
734
735 #endif /* _SFC_COMMON_EFSYS_H */