]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/sfc/efsys.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / sfc / efsys.h
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 *
f67539c2
TL
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
11fdf7f2
TL
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10#ifndef _SFC_COMMON_EFSYS_H
11#define _SFC_COMMON_EFSYS_H
12
13#include <stdbool.h>
14
15#include <rte_spinlock.h>
16#include <rte_byteorder.h>
17#include <rte_debug.h>
18#include <rte_memzone.h>
19#include <rte_memory.h>
20#include <rte_memcpy.h>
21#include <rte_cycles.h>
22#include <rte_prefetch.h>
23#include <rte_common.h>
24#include <rte_malloc.h>
25#include <rte_log.h>
26#include <rte_io.h>
27
28#include "sfc_debug.h"
29#include "sfc_log.h"
30
31#ifdef __cplusplus
32extern "C" {
33#endif
34
35#define EFSYS_HAS_UINT64 1
36#define EFSYS_USE_UINT64 1
37#define EFSYS_HAS_SSE2_M128 1
38
39#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
40#define EFSYS_IS_BIG_ENDIAN 1
41#define EFSYS_IS_LITTLE_ENDIAN 0
42#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
43#define EFSYS_IS_BIG_ENDIAN 0
44#define EFSYS_IS_LITTLE_ENDIAN 1
45#else
46#error "Cannot determine system endianness"
47#endif
48#include "efx_types.h"
49
50
11fdf7f2
TL
51typedef bool boolean_t;
52
53#ifndef B_FALSE
54#define B_FALSE false
55#endif
56#ifndef B_TRUE
57#define B_TRUE true
58#endif
59
60/*
61 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
62 * expression allowed only inside a function, but MAX() is used as
63 * a number of elements in array.
64 */
65#ifndef MAX
66#define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
67#endif
68#ifndef MIN
69#define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
70#endif
71
9f95a23c 72#ifndef ISP2
11fdf7f2
TL
73#define ISP2(x) rte_is_power_of_2(x)
74#endif
75
76#define ENOTACTIVE ENOTCONN
77
78static inline void
79prefetch_read_many(const volatile void *addr)
80{
81 rte_prefetch0(addr);
82}
83
84static inline void
85prefetch_read_once(const volatile void *addr)
86{
87 rte_prefetch_non_temporal(addr);
88}
89
11fdf7f2
TL
90/* Code inclusion options */
91
92
93#define EFSYS_OPT_NAMES 1
94
95/* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
96#define EFSYS_OPT_SIENA 0
97/* Enable SFN7xxx support */
98#define EFSYS_OPT_HUNTINGTON 1
99/* Enable SFN8xxx support */
100#define EFSYS_OPT_MEDFORD 1
101/* Enable SFN2xxx support */
102#define EFSYS_OPT_MEDFORD2 1
103#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
104#define EFSYS_OPT_CHECK_REG 1
105#else
106#define EFSYS_OPT_CHECK_REG 0
107#endif
108
109/* MCDI is required for SFN7xxx and SFN8xx */
110#define EFSYS_OPT_MCDI 1
111#define EFSYS_OPT_MCDI_LOGGING 1
112#define EFSYS_OPT_MCDI_PROXY_AUTH 1
113
114#define EFSYS_OPT_MAC_STATS 1
115
116#define EFSYS_OPT_LOOPBACK 1
117
118#define EFSYS_OPT_MON_MCDI 0
119#define EFSYS_OPT_MON_STATS 0
120
121#define EFSYS_OPT_PHY_STATS 0
122#define EFSYS_OPT_BIST 0
123#define EFSYS_OPT_PHY_LED_CONTROL 0
124#define EFSYS_OPT_PHY_FLAGS 0
125
126#define EFSYS_OPT_VPD 0
127#define EFSYS_OPT_NVRAM 0
128#define EFSYS_OPT_BOOTCFG 0
129#define EFSYS_OPT_IMAGE_LAYOUT 0
130
131#define EFSYS_OPT_DIAG 0
132#define EFSYS_OPT_RX_SCALE 1
133#define EFSYS_OPT_QSTATS 0
134/* Filters support is required for SFN7xxx and SFN8xx */
135#define EFSYS_OPT_FILTER 1
136#define EFSYS_OPT_RX_SCATTER 0
137
138#define EFSYS_OPT_EV_PREFETCH 0
139
140#define EFSYS_OPT_DECODE_INTR_FATAL 0
141
142#define EFSYS_OPT_LICENSING 0
143
144#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
145
146#define EFSYS_OPT_RX_PACKED_STREAM 0
147
148#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
149
150#define EFSYS_OPT_TUNNEL 1
151
152#define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
153
f67539c2
TL
154#define EFSYS_OPT_EVB 0
155
156#define EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 0
157
11fdf7f2
TL
158/* ID */
159
160typedef struct __efsys_identifier_s efsys_identifier_t;
161
162
163#define EFSYS_PROBE(_name) \
164 do { } while (0)
165
166#define EFSYS_PROBE1(_name, _type1, _arg1) \
167 do { } while (0)
168
169#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
170 do { } while (0)
171
172#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
173 _type3, _arg3) \
174 do { } while (0)
175
176#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
177 _type3, _arg3, _type4, _arg4) \
178 do { } while (0)
179
180#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
181 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
182 do { } while (0)
183
184#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
185 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
186 _type6, _arg6) \
187 do { } while (0)
188
189#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
190 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
191 _type6, _arg6, _type7, _arg7) \
192 do { } while (0)
193
194
195/* DMA */
196
197typedef rte_iova_t efsys_dma_addr_t;
198
199typedef struct efsys_mem_s {
200 const struct rte_memzone *esm_mz;
201 /*
202 * Ideally it should have volatile qualifier to denote that
203 * the memory may be updated by someone else. However, it adds
204 * qualifier discard warnings when the pointer or its derivative
205 * is passed to memset() or rte_mov16().
206 * So, skip the qualifier here, but make sure that it is added
207 * below in access macros.
208 */
209 void *esm_base;
210 efsys_dma_addr_t esm_addr;
211} efsys_mem_t;
212
213
214#define EFSYS_MEM_ZERO(_esmp, _size) \
215 do { \
216 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
217 \
218 _NOTE(CONSTANTCONDITION); \
219 } while (B_FALSE)
220
221#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
222 do { \
223 volatile uint8_t *_base = (_esmp)->esm_base; \
224 volatile uint32_t *_addr; \
225 \
226 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
227 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
228 sizeof(efx_dword_t))); \
11fdf7f2
TL
229 \
230 _addr = (volatile uint32_t *)(_base + (_offset)); \
231 (_edp)->ed_u32[0] = _addr[0]; \
232 \
233 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
234 uint32_t, (_edp)->ed_u32[0]); \
235 \
236 _NOTE(CONSTANTCONDITION); \
237 } while (B_FALSE)
238
239#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
240 do { \
241 volatile uint8_t *_base = (_esmp)->esm_base; \
242 volatile uint64_t *_addr; \
243 \
244 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
245 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
246 sizeof(efx_qword_t))); \
11fdf7f2
TL
247 \
248 _addr = (volatile uint64_t *)(_base + (_offset)); \
249 (_eqp)->eq_u64[0] = _addr[0]; \
250 \
251 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
252 uint32_t, (_eqp)->eq_u32[1], \
253 uint32_t, (_eqp)->eq_u32[0]); \
254 \
255 _NOTE(CONSTANTCONDITION); \
256 } while (B_FALSE)
257
258#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
259 do { \
260 volatile uint8_t *_base = (_esmp)->esm_base; \
261 volatile __m128i *_addr; \
262 \
263 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
264 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
265 sizeof(efx_oword_t))); \
11fdf7f2
TL
266 \
267 _addr = (volatile __m128i *)(_base + (_offset)); \
268 (_eop)->eo_u128[0] = _addr[0]; \
269 \
270 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
271 uint32_t, (_eop)->eo_u32[3], \
272 uint32_t, (_eop)->eo_u32[2], \
273 uint32_t, (_eop)->eo_u32[1], \
274 uint32_t, (_eop)->eo_u32[0]); \
275 \
276 _NOTE(CONSTANTCONDITION); \
277 } while (B_FALSE)
278
279
280#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
281 do { \
282 volatile uint8_t *_base = (_esmp)->esm_base; \
283 volatile uint32_t *_addr; \
284 \
285 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
286 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
287 sizeof(efx_dword_t))); \
11fdf7f2
TL
288 \
289 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
290 uint32_t, (_edp)->ed_u32[0]); \
291 \
292 _addr = (volatile uint32_t *)(_base + (_offset)); \
293 _addr[0] = (_edp)->ed_u32[0]; \
294 \
295 _NOTE(CONSTANTCONDITION); \
296 } while (B_FALSE)
297
298#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
299 do { \
300 volatile uint8_t *_base = (_esmp)->esm_base; \
301 volatile uint64_t *_addr; \
302 \
303 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
304 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
305 sizeof(efx_qword_t))); \
11fdf7f2
TL
306 \
307 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
308 uint32_t, (_eqp)->eq_u32[1], \
309 uint32_t, (_eqp)->eq_u32[0]); \
310 \
311 _addr = (volatile uint64_t *)(_base + (_offset)); \
312 _addr[0] = (_eqp)->eq_u64[0]; \
313 \
314 _NOTE(CONSTANTCONDITION); \
315 } while (B_FALSE)
316
317#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
318 do { \
319 volatile uint8_t *_base = (_esmp)->esm_base; \
320 volatile __m128i *_addr; \
321 \
322 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
323 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
324 sizeof(efx_oword_t))); \
11fdf7f2
TL
325 \
326 \
327 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
328 uint32_t, (_eop)->eo_u32[3], \
329 uint32_t, (_eop)->eo_u32[2], \
330 uint32_t, (_eop)->eo_u32[1], \
331 uint32_t, (_eop)->eo_u32[0]); \
332 \
333 _addr = (volatile __m128i *)(_base + (_offset)); \
334 _addr[0] = (_eop)->eo_u128[0]; \
335 \
336 _NOTE(CONSTANTCONDITION); \
337 } while (B_FALSE)
338
339
340#define EFSYS_MEM_SIZE(_esmp) \
341 ((_esmp)->esm_mz->len)
342
343#define EFSYS_MEM_ADDR(_esmp) \
344 ((_esmp)->esm_addr)
345
346#define EFSYS_MEM_IS_NULL(_esmp) \
347 ((_esmp)->esm_base == NULL)
348
349#define EFSYS_MEM_PREFETCH(_esmp, _offset) \
350 do { \
351 volatile uint8_t *_base = (_esmp)->esm_base; \
352 \
353 rte_prefetch0(_base + (_offset)); \
354 } while (0)
355
356
357/* BAR */
358
359typedef struct efsys_bar_s {
360 rte_spinlock_t esb_lock;
361 int esb_rid;
362 struct rte_pci_device *esb_dev;
363 /*
364 * Ideally it should have volatile qualifier to denote that
365 * the memory may be updated by someone else. However, it adds
366 * qualifier discard warnings when the pointer or its derivative
367 * is passed to memset() or rte_mov16().
368 * So, skip the qualifier here, but make sure that it is added
369 * below in access macros.
370 */
371 void *esb_base;
372} efsys_bar_t;
373
374#define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
375 do { \
376 rte_spinlock_init(&(_esbp)->esb_lock); \
377 _NOTE(CONSTANTCONDITION); \
378 } while (B_FALSE)
379#define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
380#define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
381#define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
382
383#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
384 do { \
385 volatile uint8_t *_base = (_esbp)->esb_base; \
386 volatile uint32_t *_addr; \
387 \
388 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
389 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
390 sizeof(efx_dword_t))); \
11fdf7f2
TL
391 _NOTE(CONSTANTCONDITION); \
392 if (_lock) \
393 SFC_BAR_LOCK(_esbp); \
394 \
395 _addr = (volatile uint32_t *)(_base + (_offset)); \
396 rte_rmb(); \
397 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
398 \
399 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
400 uint32_t, (_edp)->ed_u32[0]); \
401 \
402 _NOTE(CONSTANTCONDITION); \
403 if (_lock) \
404 SFC_BAR_UNLOCK(_esbp); \
405 _NOTE(CONSTANTCONDITION); \
406 } while (B_FALSE)
407
408#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
409 do { \
410 volatile uint8_t *_base = (_esbp)->esb_base; \
411 volatile uint64_t *_addr; \
412 \
413 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
414 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
415 sizeof(efx_qword_t))); \
11fdf7f2
TL
416 \
417 SFC_BAR_LOCK(_esbp); \
418 \
419 _addr = (volatile uint64_t *)(_base + (_offset)); \
420 rte_rmb(); \
421 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
422 \
423 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
424 uint32_t, (_eqp)->eq_u32[1], \
425 uint32_t, (_eqp)->eq_u32[0]); \
426 \
427 SFC_BAR_UNLOCK(_esbp); \
428 _NOTE(CONSTANTCONDITION); \
429 } while (B_FALSE)
430
431#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
432 do { \
433 volatile uint8_t *_base = (_esbp)->esb_base; \
434 volatile __m128i *_addr; \
435 \
436 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
437 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
438 sizeof(efx_oword_t))); \
11fdf7f2
TL
439 \
440 _NOTE(CONSTANTCONDITION); \
441 if (_lock) \
442 SFC_BAR_LOCK(_esbp); \
443 \
444 _addr = (volatile __m128i *)(_base + (_offset)); \
445 rte_rmb(); \
446 /* There is no rte_read128_relaxed() yet */ \
447 (_eop)->eo_u128[0] = _addr[0]; \
448 \
449 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
450 uint32_t, (_eop)->eo_u32[3], \
451 uint32_t, (_eop)->eo_u32[2], \
452 uint32_t, (_eop)->eo_u32[1], \
453 uint32_t, (_eop)->eo_u32[0]); \
454 \
455 _NOTE(CONSTANTCONDITION); \
456 if (_lock) \
457 SFC_BAR_UNLOCK(_esbp); \
458 _NOTE(CONSTANTCONDITION); \
459 } while (B_FALSE)
460
461
462#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
463 do { \
464 volatile uint8_t *_base = (_esbp)->esb_base; \
465 volatile uint32_t *_addr; \
466 \
467 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
468 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
469 sizeof(efx_dword_t))); \
11fdf7f2
TL
470 \
471 _NOTE(CONSTANTCONDITION); \
472 if (_lock) \
473 SFC_BAR_LOCK(_esbp); \
474 \
475 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
476 uint32_t, (_edp)->ed_u32[0]); \
477 \
478 _addr = (volatile uint32_t *)(_base + (_offset)); \
479 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
480 rte_wmb(); \
481 \
482 _NOTE(CONSTANTCONDITION); \
483 if (_lock) \
484 SFC_BAR_UNLOCK(_esbp); \
485 _NOTE(CONSTANTCONDITION); \
486 } while (B_FALSE)
487
488#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
489 do { \
490 volatile uint8_t *_base = (_esbp)->esb_base; \
491 volatile uint64_t *_addr; \
492 \
493 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
494 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
495 sizeof(efx_qword_t))); \
11fdf7f2
TL
496 \
497 SFC_BAR_LOCK(_esbp); \
498 \
499 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
500 uint32_t, (_eqp)->eq_u32[1], \
501 uint32_t, (_eqp)->eq_u32[0]); \
502 \
503 _addr = (volatile uint64_t *)(_base + (_offset)); \
504 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
505 rte_wmb(); \
506 \
507 SFC_BAR_UNLOCK(_esbp); \
508 _NOTE(CONSTANTCONDITION); \
509 } while (B_FALSE)
510
511/*
512 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
513 * (required by PIO hardware).
514 *
515 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
516 * write-combined memory mapped to user-land, so just abort if used.
517 */
518#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
519 do { \
520 rte_panic("Write-combined BAR access not supported"); \
521 } while (B_FALSE)
522
523#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
524 do { \
525 volatile uint8_t *_base = (_esbp)->esb_base; \
526 volatile __m128i *_addr; \
527 \
528 _NOTE(CONSTANTCONDITION); \
f67539c2
TL
529 SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \
530 sizeof(efx_oword_t))); \
11fdf7f2
TL
531 \
532 _NOTE(CONSTANTCONDITION); \
533 if (_lock) \
534 SFC_BAR_LOCK(_esbp); \
535 \
536 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
537 uint32_t, (_eop)->eo_u32[3], \
538 uint32_t, (_eop)->eo_u32[2], \
539 uint32_t, (_eop)->eo_u32[1], \
540 uint32_t, (_eop)->eo_u32[0]); \
541 \
542 _addr = (volatile __m128i *)(_base + (_offset)); \
543 /* There is no rte_write128_relaxed() yet */ \
544 _addr[0] = (_eop)->eo_u128[0]; \
545 rte_wmb(); \
546 \
547 _NOTE(CONSTANTCONDITION); \
548 if (_lock) \
549 SFC_BAR_UNLOCK(_esbp); \
550 _NOTE(CONSTANTCONDITION); \
551 } while (B_FALSE)
552
553/* Use the standard octo-word write for doorbell writes */
554#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
555 do { \
556 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
557 _NOTE(CONSTANTCONDITION); \
558 } while (B_FALSE)
559
560/* SPIN */
561
562#define EFSYS_SPIN(_us) \
563 do { \
564 rte_delay_us(_us); \
565 _NOTE(CONSTANTCONDITION); \
566 } while (B_FALSE)
567
568#define EFSYS_SLEEP EFSYS_SPIN
569
570/* BARRIERS */
571
572#define EFSYS_MEM_READ_BARRIER() rte_rmb()
573#define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
574
575/* DMA SYNC */
576
577/*
578 * DPDK does not provide any DMA syncing API, and no PMD drivers
579 * have any traces of explicit DMA syncing.
580 * DMA mapping is assumed to be coherent.
581 */
582
583#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
584
585/* Just avoid store and compiler (impliciltly) reordering */
586#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
587
588/* TIMESTAMP */
589
590typedef uint64_t efsys_timestamp_t;
591
592#define EFSYS_TIMESTAMP(_usp) \
593 do { \
594 *(_usp) = rte_get_timer_cycles() * 1000000 / \
595 rte_get_timer_hz(); \
596 _NOTE(CONSTANTCONDITION); \
597 } while (B_FALSE)
598
599/* KMEM */
600
601#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
602 do { \
603 (_esip) = (_esip); \
604 (_p) = rte_zmalloc("sfc", (_size), 0); \
605 _NOTE(CONSTANTCONDITION); \
606 } while (B_FALSE)
607
608#define EFSYS_KMEM_FREE(_esip, _size, _p) \
609 do { \
610 (void)(_esip); \
611 (void)(_size); \
612 rte_free((_p)); \
613 _NOTE(CONSTANTCONDITION); \
614 } while (B_FALSE)
615
616/* LOCK */
617
618typedef rte_spinlock_t efsys_lock_t;
619
620#define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
621 rte_spinlock_init((_eslp))
622#define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
623#define SFC_EFSYS_LOCK(_eslp) \
624 rte_spinlock_lock((_eslp))
625#define SFC_EFSYS_UNLOCK(_eslp) \
626 rte_spinlock_unlock((_eslp))
627#define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
628 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
629
630typedef int efsys_lock_state_t;
631
632#define EFSYS_LOCK_MAGIC 0x000010c4
633
634#define EFSYS_LOCK(_lockp, _state) \
635 do { \
636 SFC_EFSYS_LOCK(_lockp); \
637 (_state) = EFSYS_LOCK_MAGIC; \
638 _NOTE(CONSTANTCONDITION); \
639 } while (B_FALSE)
640
641#define EFSYS_UNLOCK(_lockp, _state) \
642 do { \
643 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
644 SFC_EFSYS_UNLOCK(_lockp); \
645 _NOTE(CONSTANTCONDITION); \
646 } while (B_FALSE)
647
648/* STAT */
649
650typedef uint64_t efsys_stat_t;
651
652#define EFSYS_STAT_INCR(_knp, _delta) \
653 do { \
654 *(_knp) += (_delta); \
655 _NOTE(CONSTANTCONDITION); \
656 } while (B_FALSE)
657
658#define EFSYS_STAT_DECR(_knp, _delta) \
659 do { \
660 *(_knp) -= (_delta); \
661 _NOTE(CONSTANTCONDITION); \
662 } while (B_FALSE)
663
664#define EFSYS_STAT_SET(_knp, _val) \
665 do { \
666 *(_knp) = (_val); \
667 _NOTE(CONSTANTCONDITION); \
668 } while (B_FALSE)
669
670#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
671 do { \
672 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
673 _NOTE(CONSTANTCONDITION); \
674 } while (B_FALSE)
675
676#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
677 do { \
678 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
679 _NOTE(CONSTANTCONDITION); \
680 } while (B_FALSE)
681
682#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
683 do { \
684 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
685 _NOTE(CONSTANTCONDITION); \
686 } while (B_FALSE)
687
688#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
689 do { \
690 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
691 _NOTE(CONSTANTCONDITION); \
692 } while (B_FALSE)
693
694/* ERR */
695
696#if EFSYS_OPT_DECODE_INTR_FATAL
697#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
698 do { \
699 (void)(_esip); \
700 SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
701 (_code), (_dword0), (_dword1)); \
702 _NOTE(CONSTANTCONDITION); \
703 } while (B_FALSE)
704#endif
705
706/* ASSERT */
707
708/* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
709 * so we re-implement it here
710 */
711#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
712#define EFSYS_ASSERT(_exp) \
713 do { \
714 if (unlikely(!(_exp))) \
715 rte_panic("line %d\tassert \"%s\" failed\n", \
716 __LINE__, (#_exp)); \
717 } while (0)
718#else
719#define EFSYS_ASSERT(_exp) (void)(_exp)
720#endif
721
722#define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
723
724#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
725#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
726#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
727
728/* ROTATE */
729
730#define EFSYS_HAS_ROTL_DWORD 0
731
732#ifdef __cplusplus
733}
734#endif
735
736#endif /* _SFC_COMMON_EFSYS_H */