]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/sfc/efsys.h
0405d02bb275366aced57538880ffd4dc2ab3180
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / sfc / efsys.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
5 * All rights reserved.
6 *
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _SFC_COMMON_EFSYS_H
33 #define _SFC_COMMON_EFSYS_H
34
35 #include <stdbool.h>
36
37 #include <rte_spinlock.h>
38 #include <rte_byteorder.h>
39 #include <rte_debug.h>
40 #include <rte_memzone.h>
41 #include <rte_memory.h>
42 #include <rte_memcpy.h>
43 #include <rte_cycles.h>
44 #include <rte_prefetch.h>
45 #include <rte_common.h>
46 #include <rte_malloc.h>
47 #include <rte_log.h>
48 #include <rte_io.h>
49
50 #include "sfc_debug.h"
51
52 #ifdef __cplusplus
53 extern "C" {
54 #endif
55
56 #define EFSYS_HAS_UINT64 1
57 #define EFSYS_USE_UINT64 1
58 #define EFSYS_HAS_SSE2_M128 1
59
60 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
61 #define EFSYS_IS_BIG_ENDIAN 1
62 #define EFSYS_IS_LITTLE_ENDIAN 0
63 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
64 #define EFSYS_IS_BIG_ENDIAN 0
65 #define EFSYS_IS_LITTLE_ENDIAN 1
66 #else
67 #error "Cannot determine system endianness"
68 #endif
69 #include "efx_types.h"
70
71
72 #ifndef _NOTE
73 #define _NOTE(s)
74 #endif
75
76 typedef bool boolean_t;
77
78 #ifndef B_FALSE
79 #define B_FALSE false
80 #endif
81 #ifndef B_TRUE
82 #define B_TRUE true
83 #endif
84
85 /*
86 * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
87 * expression allowed only inside a function, but MAX() is used as
88 * a number of elements in array.
89 */
90 #ifndef MAX
91 #define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
92 #endif
93 #ifndef MIN
94 #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
95 #endif
96
97 /* There are macros for alignment in DPDK, but we need to make a proper
98 * correspondence here, if we want to re-use them at all
99 */
100 #ifndef IS_P2ALIGNED
101 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
102 #endif
103
104 #ifndef P2ROUNDUP
105 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
106 #endif
107
108 #ifndef P2ALIGN
109 #define P2ALIGN(_x, _a) ((_x) & -(_a))
110 #endif
111
112 #ifndef IS2P
113 #define ISP2(x) rte_is_power_of_2(x)
114 #endif
115
116 #define ENOTACTIVE ENOTCONN
117
118 static inline void
119 prefetch_read_many(const volatile void *addr)
120 {
121 rte_prefetch0(addr);
122 }
123
124 static inline void
125 prefetch_read_once(const volatile void *addr)
126 {
127 rte_prefetch_non_temporal(addr);
128 }
129
130 /* Modifiers used for Windows builds */
131 #define __in
132 #define __in_opt
133 #define __in_ecount(_n)
134 #define __in_ecount_opt(_n)
135 #define __in_bcount(_n)
136 #define __in_bcount_opt(_n)
137
138 #define __out
139 #define __out_opt
140 #define __out_ecount(_n)
141 #define __out_ecount_opt(_n)
142 #define __out_bcount(_n)
143 #define __out_bcount_opt(_n)
144
145 #define __deref_out
146
147 #define __inout
148 #define __inout_opt
149 #define __inout_ecount(_n)
150 #define __inout_ecount_opt(_n)
151 #define __inout_bcount(_n)
152 #define __inout_bcount_opt(_n)
153 #define __inout_bcount_full_opt(_n)
154
155 #define __deref_out_bcount_opt(n)
156
157 #define __checkReturn
158 #define __success(_x)
159
160 #define __drv_when(_p, _c)
161
162 /* Code inclusion options */
163
164
165 #define EFSYS_OPT_NAMES 1
166
167 /* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
168 #define EFSYS_OPT_SIENA 0
169 /* Enable SFN7xxx support */
170 #define EFSYS_OPT_HUNTINGTON 1
171 /* Enable SFN8xxx support */
172 #define EFSYS_OPT_MEDFORD 1
173 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
174 #define EFSYS_OPT_CHECK_REG 1
175 #else
176 #define EFSYS_OPT_CHECK_REG 0
177 #endif
178
179 /* MCDI is required for SFN7xxx and SFN8xx */
180 #define EFSYS_OPT_MCDI 1
181 #define EFSYS_OPT_MCDI_LOGGING 1
182 #define EFSYS_OPT_MCDI_PROXY_AUTH 1
183
184 #define EFSYS_OPT_MAC_STATS 1
185
186 #define EFSYS_OPT_LOOPBACK 0
187
188 #define EFSYS_OPT_MON_MCDI 0
189 #define EFSYS_OPT_MON_STATS 0
190
191 #define EFSYS_OPT_PHY_STATS 0
192 #define EFSYS_OPT_BIST 0
193 #define EFSYS_OPT_PHY_LED_CONTROL 0
194 #define EFSYS_OPT_PHY_FLAGS 0
195
196 #define EFSYS_OPT_VPD 0
197 #define EFSYS_OPT_NVRAM 0
198 #define EFSYS_OPT_BOOTCFG 0
199
200 #define EFSYS_OPT_DIAG 0
201 #define EFSYS_OPT_RX_SCALE 1
202 #define EFSYS_OPT_QSTATS 0
203 /* Filters support is required for SFN7xxx and SFN8xx */
204 #define EFSYS_OPT_FILTER 1
205 #define EFSYS_OPT_RX_SCATTER 0
206
207 #define EFSYS_OPT_EV_PREFETCH 0
208
209 #define EFSYS_OPT_DECODE_INTR_FATAL 0
210
211 #define EFSYS_OPT_LICENSING 0
212
213 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
214
215 #define EFSYS_OPT_RX_PACKED_STREAM 0
216
217 /* ID */
218
219 typedef struct __efsys_identifier_s efsys_identifier_t;
220
221
222 #define EFSYS_PROBE(_name) \
223 do { } while (0)
224
225 #define EFSYS_PROBE1(_name, _type1, _arg1) \
226 do { } while (0)
227
228 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
229 do { } while (0)
230
231 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
232 _type3, _arg3) \
233 do { } while (0)
234
235 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
236 _type3, _arg3, _type4, _arg4) \
237 do { } while (0)
238
239 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
240 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
241 do { } while (0)
242
243 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
244 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
245 _type6, _arg6) \
246 do { } while (0)
247
248 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
249 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
250 _type6, _arg6, _type7, _arg7) \
251 do { } while (0)
252
253
254 /* DMA */
255
256 typedef phys_addr_t efsys_dma_addr_t;
257
258 typedef struct efsys_mem_s {
259 const struct rte_memzone *esm_mz;
260 /*
261 * Ideally it should have volatile qualifier to denote that
262 * the memory may be updated by someone else. However, it adds
263 * qualifier discard warnings when the pointer or its derivative
264 * is passed to memset() or rte_mov16().
265 * So, skip the qualifier here, but make sure that it is added
266 * below in access macros.
267 */
268 void *esm_base;
269 efsys_dma_addr_t esm_addr;
270 } efsys_mem_t;
271
272
273 #define EFSYS_MEM_ZERO(_esmp, _size) \
274 do { \
275 (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
276 \
277 _NOTE(CONSTANTCONDITION); \
278 } while (B_FALSE)
279
280 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
281 do { \
282 volatile uint8_t *_base = (_esmp)->esm_base; \
283 volatile uint32_t *_addr; \
284 \
285 _NOTE(CONSTANTCONDITION); \
286 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
287 \
288 _addr = (volatile uint32_t *)(_base + (_offset)); \
289 (_edp)->ed_u32[0] = _addr[0]; \
290 \
291 EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
292 uint32_t, (_edp)->ed_u32[0]); \
293 \
294 _NOTE(CONSTANTCONDITION); \
295 } while (B_FALSE)
296
297 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
298 do { \
299 volatile uint8_t *_base = (_esmp)->esm_base; \
300 volatile uint64_t *_addr; \
301 \
302 _NOTE(CONSTANTCONDITION); \
303 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
304 \
305 _addr = (volatile uint64_t *)(_base + (_offset)); \
306 (_eqp)->eq_u64[0] = _addr[0]; \
307 \
308 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
309 uint32_t, (_eqp)->eq_u32[1], \
310 uint32_t, (_eqp)->eq_u32[0]); \
311 \
312 _NOTE(CONSTANTCONDITION); \
313 } while (B_FALSE)
314
315 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
316 do { \
317 volatile uint8_t *_base = (_esmp)->esm_base; \
318 volatile __m128i *_addr; \
319 \
320 _NOTE(CONSTANTCONDITION); \
321 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
322 \
323 _addr = (volatile __m128i *)(_base + (_offset)); \
324 (_eop)->eo_u128[0] = _addr[0]; \
325 \
326 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
327 uint32_t, (_eop)->eo_u32[3], \
328 uint32_t, (_eop)->eo_u32[2], \
329 uint32_t, (_eop)->eo_u32[1], \
330 uint32_t, (_eop)->eo_u32[0]); \
331 \
332 _NOTE(CONSTANTCONDITION); \
333 } while (B_FALSE)
334
335
336 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
337 do { \
338 volatile uint8_t *_base = (_esmp)->esm_base; \
339 volatile uint32_t *_addr; \
340 \
341 _NOTE(CONSTANTCONDITION); \
342 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
343 \
344 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
345 uint32_t, (_edp)->ed_u32[0]); \
346 \
347 _addr = (volatile uint32_t *)(_base + (_offset)); \
348 _addr[0] = (_edp)->ed_u32[0]; \
349 \
350 _NOTE(CONSTANTCONDITION); \
351 } while (B_FALSE)
352
353 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
354 do { \
355 volatile uint8_t *_base = (_esmp)->esm_base; \
356 volatile uint64_t *_addr; \
357 \
358 _NOTE(CONSTANTCONDITION); \
359 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
360 \
361 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
362 uint32_t, (_eqp)->eq_u32[1], \
363 uint32_t, (_eqp)->eq_u32[0]); \
364 \
365 _addr = (volatile uint64_t *)(_base + (_offset)); \
366 _addr[0] = (_eqp)->eq_u64[0]; \
367 \
368 _NOTE(CONSTANTCONDITION); \
369 } while (B_FALSE)
370
371 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
372 do { \
373 volatile uint8_t *_base = (_esmp)->esm_base; \
374 volatile __m128i *_addr; \
375 \
376 _NOTE(CONSTANTCONDITION); \
377 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
378 \
379 \
380 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
381 uint32_t, (_eop)->eo_u32[3], \
382 uint32_t, (_eop)->eo_u32[2], \
383 uint32_t, (_eop)->eo_u32[1], \
384 uint32_t, (_eop)->eo_u32[0]); \
385 \
386 _addr = (volatile __m128i *)(_base + (_offset)); \
387 _addr[0] = (_eop)->eo_u128[0]; \
388 \
389 _NOTE(CONSTANTCONDITION); \
390 } while (B_FALSE)
391
392
393 #define EFSYS_MEM_ADDR(_esmp) \
394 ((_esmp)->esm_addr)
395
396 #define EFSYS_MEM_IS_NULL(_esmp) \
397 ((_esmp)->esm_base == NULL)
398
399 #define EFSYS_MEM_PREFETCH(_esmp, _offset) \
400 do { \
401 volatile uint8_t *_base = (_esmp)->esm_base; \
402 \
403 rte_prefetch0(_base + (_offset)); \
404 } while (0)
405
406
407 /* BAR */
408
409 typedef struct efsys_bar_s {
410 rte_spinlock_t esb_lock;
411 int esb_rid;
412 struct rte_pci_device *esb_dev;
413 /*
414 * Ideally it should have volatile qualifier to denote that
415 * the memory may be updated by someone else. However, it adds
416 * qualifier discard warnings when the pointer or its derivative
417 * is passed to memset() or rte_mov16().
418 * So, skip the qualifier here, but make sure that it is added
419 * below in access macros.
420 */
421 void *esb_base;
422 } efsys_bar_t;
423
424 #define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
425 do { \
426 rte_spinlock_init(&(_esbp)->esb_lock); \
427 _NOTE(CONSTANTCONDITION); \
428 } while (B_FALSE)
429 #define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
430 #define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
431 #define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
432
433 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
434 do { \
435 volatile uint8_t *_base = (_esbp)->esb_base; \
436 volatile uint32_t *_addr; \
437 \
438 _NOTE(CONSTANTCONDITION); \
439 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
440 _NOTE(CONSTANTCONDITION); \
441 if (_lock) \
442 SFC_BAR_LOCK(_esbp); \
443 \
444 _addr = (volatile uint32_t *)(_base + (_offset)); \
445 rte_rmb(); \
446 (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
447 \
448 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
449 uint32_t, (_edp)->ed_u32[0]); \
450 \
451 _NOTE(CONSTANTCONDITION); \
452 if (_lock) \
453 SFC_BAR_UNLOCK(_esbp); \
454 _NOTE(CONSTANTCONDITION); \
455 } while (B_FALSE)
456
457 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
458 do { \
459 volatile uint8_t *_base = (_esbp)->esb_base; \
460 volatile uint64_t *_addr; \
461 \
462 _NOTE(CONSTANTCONDITION); \
463 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
464 \
465 SFC_BAR_LOCK(_esbp); \
466 \
467 _addr = (volatile uint64_t *)(_base + (_offset)); \
468 rte_rmb(); \
469 (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
470 \
471 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
472 uint32_t, (_eqp)->eq_u32[1], \
473 uint32_t, (_eqp)->eq_u32[0]); \
474 \
475 SFC_BAR_UNLOCK(_esbp); \
476 _NOTE(CONSTANTCONDITION); \
477 } while (B_FALSE)
478
479 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
480 do { \
481 volatile uint8_t *_base = (_esbp)->esb_base; \
482 volatile __m128i *_addr; \
483 \
484 _NOTE(CONSTANTCONDITION); \
485 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
486 \
487 _NOTE(CONSTANTCONDITION); \
488 if (_lock) \
489 SFC_BAR_LOCK(_esbp); \
490 \
491 _addr = (volatile __m128i *)(_base + (_offset)); \
492 rte_rmb(); \
493 /* There is no rte_read128_relaxed() yet */ \
494 (_eop)->eo_u128[0] = _addr[0]; \
495 \
496 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
497 uint32_t, (_eop)->eo_u32[3], \
498 uint32_t, (_eop)->eo_u32[2], \
499 uint32_t, (_eop)->eo_u32[1], \
500 uint32_t, (_eop)->eo_u32[0]); \
501 \
502 _NOTE(CONSTANTCONDITION); \
503 if (_lock) \
504 SFC_BAR_UNLOCK(_esbp); \
505 _NOTE(CONSTANTCONDITION); \
506 } while (B_FALSE)
507
508
509 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
510 do { \
511 volatile uint8_t *_base = (_esbp)->esb_base; \
512 volatile uint32_t *_addr; \
513 \
514 _NOTE(CONSTANTCONDITION); \
515 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
516 \
517 _NOTE(CONSTANTCONDITION); \
518 if (_lock) \
519 SFC_BAR_LOCK(_esbp); \
520 \
521 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
522 uint32_t, (_edp)->ed_u32[0]); \
523 \
524 _addr = (volatile uint32_t *)(_base + (_offset)); \
525 rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
526 rte_wmb(); \
527 \
528 _NOTE(CONSTANTCONDITION); \
529 if (_lock) \
530 SFC_BAR_UNLOCK(_esbp); \
531 _NOTE(CONSTANTCONDITION); \
532 } while (B_FALSE)
533
534 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
535 do { \
536 volatile uint8_t *_base = (_esbp)->esb_base; \
537 volatile uint64_t *_addr; \
538 \
539 _NOTE(CONSTANTCONDITION); \
540 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
541 \
542 SFC_BAR_LOCK(_esbp); \
543 \
544 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
545 uint32_t, (_eqp)->eq_u32[1], \
546 uint32_t, (_eqp)->eq_u32[0]); \
547 \
548 _addr = (volatile uint64_t *)(_base + (_offset)); \
549 rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
550 rte_wmb(); \
551 \
552 SFC_BAR_UNLOCK(_esbp); \
553 _NOTE(CONSTANTCONDITION); \
554 } while (B_FALSE)
555
556 /*
557 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
558 * (required by PIO hardware).
559 *
560 * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
561 * write-combined memory mapped to user-land, so just abort if used.
562 */
563 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
564 do { \
565 rte_panic("Write-combined BAR access not supported"); \
566 } while (B_FALSE)
567
568 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
569 do { \
570 volatile uint8_t *_base = (_esbp)->esb_base; \
571 volatile __m128i *_addr; \
572 \
573 _NOTE(CONSTANTCONDITION); \
574 SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
575 \
576 _NOTE(CONSTANTCONDITION); \
577 if (_lock) \
578 SFC_BAR_LOCK(_esbp); \
579 \
580 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
581 uint32_t, (_eop)->eo_u32[3], \
582 uint32_t, (_eop)->eo_u32[2], \
583 uint32_t, (_eop)->eo_u32[1], \
584 uint32_t, (_eop)->eo_u32[0]); \
585 \
586 _addr = (volatile __m128i *)(_base + (_offset)); \
587 /* There is no rte_write128_relaxed() yet */ \
588 _addr[0] = (_eop)->eo_u128[0]; \
589 rte_wmb(); \
590 \
591 _NOTE(CONSTANTCONDITION); \
592 if (_lock) \
593 SFC_BAR_UNLOCK(_esbp); \
594 _NOTE(CONSTANTCONDITION); \
595 } while (B_FALSE)
596
597 /* Use the standard octo-word write for doorbell writes */
598 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
599 do { \
600 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
601 _NOTE(CONSTANTCONDITION); \
602 } while (B_FALSE)
603
604 /* SPIN */
605
606 #define EFSYS_SPIN(_us) \
607 do { \
608 rte_delay_us(_us); \
609 _NOTE(CONSTANTCONDITION); \
610 } while (B_FALSE)
611
612 #define EFSYS_SLEEP EFSYS_SPIN
613
614 /* BARRIERS */
615
616 #define EFSYS_MEM_READ_BARRIER() rte_rmb()
617 #define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
618
619 /* DMA SYNC */
620
621 /*
622 * DPDK does not provide any DMA syncing API, and no PMD drivers
623 * have any traces of explicit DMA syncing.
624 * DMA mapping is assumed to be coherent.
625 */
626
627 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
628
629 /* Just avoid store and compiler (impliciltly) reordering */
630 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
631
632 /* TIMESTAMP */
633
634 typedef uint64_t efsys_timestamp_t;
635
636 #define EFSYS_TIMESTAMP(_usp) \
637 do { \
638 *(_usp) = rte_get_timer_cycles() * 1000000 / \
639 rte_get_timer_hz(); \
640 _NOTE(CONSTANTCONDITION); \
641 } while (B_FALSE)
642
643 /* KMEM */
644
645 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
646 do { \
647 (_esip) = (_esip); \
648 (_p) = rte_zmalloc("sfc", (_size), 0); \
649 _NOTE(CONSTANTCONDITION); \
650 } while (B_FALSE)
651
652 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
653 do { \
654 (void)(_esip); \
655 (void)(_size); \
656 rte_free((_p)); \
657 _NOTE(CONSTANTCONDITION); \
658 } while (B_FALSE)
659
660 /* LOCK */
661
662 typedef rte_spinlock_t efsys_lock_t;
663
664 #define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
665 rte_spinlock_init((_eslp))
666 #define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
667 #define SFC_EFSYS_LOCK(_eslp) \
668 rte_spinlock_lock((_eslp))
669 #define SFC_EFSYS_UNLOCK(_eslp) \
670 rte_spinlock_unlock((_eslp))
671 #define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
672 SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
673
674 typedef int efsys_lock_state_t;
675
676 #define EFSYS_LOCK_MAGIC 0x000010c4
677
678 #define EFSYS_LOCK(_lockp, _state) \
679 do { \
680 SFC_EFSYS_LOCK(_lockp); \
681 (_state) = EFSYS_LOCK_MAGIC; \
682 _NOTE(CONSTANTCONDITION); \
683 } while (B_FALSE)
684
685 #define EFSYS_UNLOCK(_lockp, _state) \
686 do { \
687 SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
688 SFC_EFSYS_UNLOCK(_lockp); \
689 _NOTE(CONSTANTCONDITION); \
690 } while (B_FALSE)
691
692 /* STAT */
693
694 typedef uint64_t efsys_stat_t;
695
696 #define EFSYS_STAT_INCR(_knp, _delta) \
697 do { \
698 *(_knp) += (_delta); \
699 _NOTE(CONSTANTCONDITION); \
700 } while (B_FALSE)
701
702 #define EFSYS_STAT_DECR(_knp, _delta) \
703 do { \
704 *(_knp) -= (_delta); \
705 _NOTE(CONSTANTCONDITION); \
706 } while (B_FALSE)
707
708 #define EFSYS_STAT_SET(_knp, _val) \
709 do { \
710 *(_knp) = (_val); \
711 _NOTE(CONSTANTCONDITION); \
712 } while (B_FALSE)
713
714 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
715 do { \
716 *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
717 _NOTE(CONSTANTCONDITION); \
718 } while (B_FALSE)
719
720 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
721 do { \
722 *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
723 _NOTE(CONSTANTCONDITION); \
724 } while (B_FALSE)
725
726 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
727 do { \
728 *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
729 _NOTE(CONSTANTCONDITION); \
730 } while (B_FALSE)
731
732 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
733 do { \
734 *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
735 _NOTE(CONSTANTCONDITION); \
736 } while (B_FALSE)
737
738 /* ERR */
739
740 #if EFSYS_OPT_DECODE_INTR_FATAL
741 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
742 do { \
743 (void)(_esip); \
744 RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
745 (_code), (_dword0), (_dword1)); \
746 _NOTE(CONSTANTCONDITION); \
747 } while (B_FALSE)
748 #endif
749
750 /* ASSERT */
751
752 /* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
753 * so we re-implement it here
754 */
755 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
756 #define EFSYS_ASSERT(_exp) \
757 do { \
758 if (unlikely(!(_exp))) \
759 rte_panic("line %d\tassert \"%s\" failed\n", \
760 __LINE__, (#_exp)); \
761 } while (0)
762 #else
763 #define EFSYS_ASSERT(_exp) (void)(_exp)
764 #endif
765
766 #define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
767
768 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
769 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
770 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
771
772 /* ROTATE */
773
774 #define EFSYS_HAS_ROTL_DWORD 0
775
776 #ifdef __cplusplus
777 }
778 #endif
779
780 #endif /* _SFC_COMMON_EFSYS_H */