1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
21 #include "farch_regs.h"
23 #include "workarounds.h"
25 /* Falcon-architecture (SFC4000 and SFC9000-family) support */
27 /**************************************************************************
31 **************************************************************************
34 /* This is set to 16 for a good reason. In summary, if larger than
35 * 16, the descriptor cache holds more than a default socket
36 * buffer's worth of packets (for UDP we can only have at most one
37 * socket buffer's worth outstanding). This combined with the fact
38 * that we only get 1 TX event per descriptor cache means the NIC
41 #define TX_DC_ENTRIES 16
42 #define TX_DC_ENTRIES_ORDER 1
44 #define RX_DC_ENTRIES 64
45 #define RX_DC_ENTRIES_ORDER 3
47 /* If EFX_MAX_INT_ERRORS internal errors occur within
48 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
51 #define EFX_INT_ERROR_EXPIRE 3600
52 #define EFX_MAX_INT_ERRORS 5
54 /* Depth of RX flush request fifo */
55 #define EFX_RX_FLUSH_COUNT 4
57 /* Driver generated events */
58 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
59 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
60 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
61 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
63 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
64 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
66 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
68 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
69 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
70 efx_rx_queue_index(_rx_queue))
71 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
72 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
73 efx_rx_queue_index(_rx_queue))
74 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
75 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
78 static void efx_farch_magic_event(struct efx_channel
*channel
, u32 magic
);
80 /**************************************************************************
84 **************************************************************************/
86 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
89 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
93 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
94 const efx_oword_t
*mask
)
96 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
97 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
100 int efx_farch_test_registers(struct efx_nic
*efx
,
101 const struct efx_farch_register_test
*regs
,
104 unsigned address
= 0, i
, j
;
105 efx_oword_t mask
, imask
, original
, reg
, buf
;
107 for (i
= 0; i
< n_regs
; ++i
) {
108 address
= regs
[i
].address
;
109 mask
= imask
= regs
[i
].mask
;
110 EFX_INVERT_OWORD(imask
);
112 efx_reado(efx
, &original
, address
);
114 /* bit sweep on and off */
115 for (j
= 0; j
< 128; j
++) {
116 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
119 /* Test this testable bit can be set in isolation */
120 EFX_AND_OWORD(reg
, original
, mask
);
121 EFX_SET_OWORD32(reg
, j
, j
, 1);
123 efx_writeo(efx
, ®
, address
);
124 efx_reado(efx
, &buf
, address
);
126 if (efx_masked_compare_oword(®
, &buf
, &mask
))
129 /* Test this testable bit can be cleared in isolation */
130 EFX_OR_OWORD(reg
, original
, mask
);
131 EFX_SET_OWORD32(reg
, j
, j
, 0);
133 efx_writeo(efx
, ®
, address
);
134 efx_reado(efx
, &buf
, address
);
136 if (efx_masked_compare_oword(®
, &buf
, &mask
))
140 efx_writeo(efx
, &original
, address
);
146 netif_err(efx
, hw
, efx
->net_dev
,
147 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
148 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
149 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
153 /**************************************************************************
155 * Special buffer handling
156 * Special buffers are used for event queues and the TX and RX
159 *************************************************************************/
162 * Initialise a special buffer
164 * This will define a buffer (previously allocated via
165 * efx_alloc_special_buffer()) in the buffer table, allowing
166 * it to be used for event queues, descriptor rings etc.
169 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
171 efx_qword_t buf_desc
;
176 EFX_BUG_ON_PARANOID(!buffer
->buf
.addr
);
178 /* Write buffer descriptors to NIC */
179 for (i
= 0; i
< buffer
->entries
; i
++) {
180 index
= buffer
->index
+ i
;
181 dma_addr
= buffer
->buf
.dma_addr
+ (i
* EFX_BUF_SIZE
);
182 netif_dbg(efx
, probe
, efx
->net_dev
,
183 "mapping special buffer %d at %llx\n",
184 index
, (unsigned long long)dma_addr
);
185 EFX_POPULATE_QWORD_3(buf_desc
,
186 FRF_AZ_BUF_ADR_REGION
, 0,
187 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
188 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
189 efx_write_buf_tbl(efx
, &buf_desc
, index
);
193 /* Unmaps a buffer and clears the buffer table entries */
195 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
197 efx_oword_t buf_tbl_upd
;
198 unsigned int start
= buffer
->index
;
199 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
201 if (!buffer
->entries
)
204 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
205 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
207 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
208 FRF_AZ_BUF_UPD_CMD
, 0,
209 FRF_AZ_BUF_CLR_CMD
, 1,
210 FRF_AZ_BUF_CLR_END_ID
, end
,
211 FRF_AZ_BUF_CLR_START_ID
, start
);
212 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
216 * Allocate a new special buffer
218 * This allocates memory for a new buffer, clears it and allocates a
219 * new buffer ID range. It does not write into the buffer table.
221 * This call will allocate 4KB buffers, since 8KB buffers can't be
222 * used for event queues and descriptor rings.
224 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
225 struct efx_special_buffer
*buffer
,
228 len
= ALIGN(len
, EFX_BUF_SIZE
);
230 if (efx_nic_alloc_buffer(efx
, &buffer
->buf
, len
, GFP_KERNEL
))
232 buffer
->entries
= len
/ EFX_BUF_SIZE
;
233 BUG_ON(buffer
->buf
.dma_addr
& (EFX_BUF_SIZE
- 1));
235 /* Select new buffer ID */
236 buffer
->index
= efx
->next_buffer_table
;
237 efx
->next_buffer_table
+= buffer
->entries
;
238 #ifdef CONFIG_SFC_SRIOV
239 BUG_ON(efx_sriov_enabled(efx
) &&
240 efx
->vf_buftbl_base
< efx
->next_buffer_table
);
243 netif_dbg(efx
, probe
, efx
->net_dev
,
244 "allocating special buffers %d-%d at %llx+%x "
245 "(virt %p phys %llx)\n", buffer
->index
,
246 buffer
->index
+ buffer
->entries
- 1,
247 (u64
)buffer
->buf
.dma_addr
, len
,
248 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
254 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
256 if (!buffer
->buf
.addr
)
259 netif_dbg(efx
, hw
, efx
->net_dev
,
260 "deallocating special buffers %d-%d at %llx+%x "
261 "(virt %p phys %llx)\n", buffer
->index
,
262 buffer
->index
+ buffer
->entries
- 1,
263 (u64
)buffer
->buf
.dma_addr
, buffer
->buf
.len
,
264 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
266 efx_nic_free_buffer(efx
, &buffer
->buf
);
270 /**************************************************************************
274 **************************************************************************/
276 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
277 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
282 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
283 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
284 efx_writed_page(tx_queue
->efx
, ®
,
285 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
288 /* Write pointer and first descriptor for TX descriptor ring */
289 static inline void efx_farch_push_tx_desc(struct efx_tx_queue
*tx_queue
,
290 const efx_qword_t
*txd
)
295 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
296 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
298 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
299 EFX_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
300 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
302 efx_writeo_page(tx_queue
->efx
, ®
,
303 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
307 /* For each entry inserted into the software descriptor ring, create a
308 * descriptor in the hardware TX descriptor ring (in host memory), and
311 void efx_farch_tx_write(struct efx_tx_queue
*tx_queue
)
314 struct efx_tx_buffer
*buffer
;
317 unsigned old_write_count
= tx_queue
->write_count
;
319 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
322 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
323 buffer
= &tx_queue
->buffer
[write_ptr
];
324 txd
= efx_tx_desc(tx_queue
, write_ptr
);
325 ++tx_queue
->write_count
;
327 /* Create TX descriptor ring entry */
328 BUILD_BUG_ON(EFX_TX_BUF_CONT
!= 1);
329 EFX_POPULATE_QWORD_4(*txd
,
331 buffer
->flags
& EFX_TX_BUF_CONT
,
332 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
333 FSF_AZ_TX_KER_BUF_REGION
, 0,
334 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
335 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
337 wmb(); /* Ensure descriptors are written before they are fetched */
339 if (efx_nic_may_push_tx_desc(tx_queue
, old_write_count
)) {
340 txd
= efx_tx_desc(tx_queue
,
341 old_write_count
& tx_queue
->ptr_mask
);
342 efx_farch_push_tx_desc(tx_queue
, txd
);
345 efx_farch_notify_tx_desc(tx_queue
);
349 /* Allocate hardware resources for a TX queue */
350 int efx_farch_tx_probe(struct efx_tx_queue
*tx_queue
)
352 struct efx_nic
*efx
= tx_queue
->efx
;
355 entries
= tx_queue
->ptr_mask
+ 1;
356 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
357 entries
* sizeof(efx_qword_t
));
360 void efx_farch_tx_init(struct efx_tx_queue
*tx_queue
)
362 struct efx_nic
*efx
= tx_queue
->efx
;
365 /* Pin TX descriptor ring */
366 efx_init_special_buffer(efx
, &tx_queue
->txd
);
368 /* Push TX descriptor ring to card */
369 EFX_POPULATE_OWORD_10(reg
,
370 FRF_AZ_TX_DESCQ_EN
, 1,
371 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
372 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
373 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
374 FRF_AZ_TX_DESCQ_EVQ_ID
,
375 tx_queue
->channel
->channel
,
376 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
377 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
378 FRF_AZ_TX_DESCQ_SIZE
,
379 __ffs(tx_queue
->txd
.entries
),
380 FRF_AZ_TX_DESCQ_TYPE
, 0,
381 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
383 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
384 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
385 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
386 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
,
390 efx_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
393 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
394 /* Only 128 bits in this register */
395 BUILD_BUG_ON(EFX_MAX_TX_QUEUES
> 128);
397 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
398 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
399 __clear_bit_le(tx_queue
->queue
, ®
);
401 __set_bit_le(tx_queue
->queue
, ®
);
402 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
405 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
406 EFX_POPULATE_OWORD_1(reg
,
408 (tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
410 FFE_BZ_TX_PACE_RESERVED
);
411 efx_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
,
416 static void efx_farch_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
418 struct efx_nic
*efx
= tx_queue
->efx
;
419 efx_oword_t tx_flush_descq
;
421 WARN_ON(atomic_read(&tx_queue
->flush_outstanding
));
422 atomic_set(&tx_queue
->flush_outstanding
, 1);
424 EFX_POPULATE_OWORD_2(tx_flush_descq
,
425 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
426 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
427 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
430 void efx_farch_tx_fini(struct efx_tx_queue
*tx_queue
)
432 struct efx_nic
*efx
= tx_queue
->efx
;
433 efx_oword_t tx_desc_ptr
;
435 /* Remove TX descriptor ring from card */
436 EFX_ZERO_OWORD(tx_desc_ptr
);
437 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
440 /* Unpin TX descriptor ring */
441 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
444 /* Free buffers backing TX queue */
445 void efx_farch_tx_remove(struct efx_tx_queue
*tx_queue
)
447 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
450 /**************************************************************************
454 **************************************************************************/
456 /* This creates an entry in the RX descriptor queue */
458 efx_farch_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
460 struct efx_rx_buffer
*rx_buf
;
463 rxd
= efx_rx_desc(rx_queue
, index
);
464 rx_buf
= efx_rx_buffer(rx_queue
, index
);
465 EFX_POPULATE_QWORD_3(*rxd
,
466 FSF_AZ_RX_KER_BUF_SIZE
,
468 rx_queue
->efx
->type
->rx_buffer_padding
,
469 FSF_AZ_RX_KER_BUF_REGION
, 0,
470 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
473 /* This writes to the RX_DESC_WPTR register for the specified receive
476 void efx_farch_rx_write(struct efx_rx_queue
*rx_queue
)
478 struct efx_nic
*efx
= rx_queue
->efx
;
482 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
483 efx_farch_build_rx_desc(
485 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
486 ++rx_queue
->notified_count
;
490 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
491 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
492 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
493 efx_rx_queue_index(rx_queue
));
496 int efx_farch_rx_probe(struct efx_rx_queue
*rx_queue
)
498 struct efx_nic
*efx
= rx_queue
->efx
;
501 entries
= rx_queue
->ptr_mask
+ 1;
502 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
503 entries
* sizeof(efx_qword_t
));
506 void efx_farch_rx_init(struct efx_rx_queue
*rx_queue
)
508 efx_oword_t rx_desc_ptr
;
509 struct efx_nic
*efx
= rx_queue
->efx
;
510 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
511 bool iscsi_digest_en
= is_b0
;
514 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
515 * DMA to continue after a PCIe page boundary (and scattering
516 * is not possible). In Falcon B0 and Siena, it enables
519 jumbo_en
= !is_b0
|| efx
->rx_scatter
;
521 netif_dbg(efx
, hw
, efx
->net_dev
,
522 "RX queue %d ring in special buffers %d-%d\n",
523 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
524 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
526 rx_queue
->scatter_n
= 0;
528 /* Pin RX descriptor ring */
529 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
531 /* Push RX descriptor ring to card */
532 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
533 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
534 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
535 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
536 FRF_AZ_RX_DESCQ_EVQ_ID
,
537 efx_rx_queue_channel(rx_queue
)->channel
,
538 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
539 FRF_AZ_RX_DESCQ_LABEL
,
540 efx_rx_queue_index(rx_queue
),
541 FRF_AZ_RX_DESCQ_SIZE
,
542 __ffs(rx_queue
->rxd
.entries
),
543 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
544 FRF_AZ_RX_DESCQ_JUMBO
, jumbo_en
,
545 FRF_AZ_RX_DESCQ_EN
, 1);
546 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
547 efx_rx_queue_index(rx_queue
));
550 static void efx_farch_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
552 struct efx_nic
*efx
= rx_queue
->efx
;
553 efx_oword_t rx_flush_descq
;
555 EFX_POPULATE_OWORD_2(rx_flush_descq
,
556 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
557 FRF_AZ_RX_FLUSH_DESCQ
,
558 efx_rx_queue_index(rx_queue
));
559 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
562 void efx_farch_rx_fini(struct efx_rx_queue
*rx_queue
)
564 efx_oword_t rx_desc_ptr
;
565 struct efx_nic
*efx
= rx_queue
->efx
;
567 /* Remove RX descriptor ring from card */
568 EFX_ZERO_OWORD(rx_desc_ptr
);
569 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
570 efx_rx_queue_index(rx_queue
));
572 /* Unpin RX descriptor ring */
573 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
576 /* Free buffers backing RX queue */
577 void efx_farch_rx_remove(struct efx_rx_queue
*rx_queue
)
579 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
582 /**************************************************************************
586 **************************************************************************/
588 /* efx_farch_flush_queues() must be woken up when all flushes are completed,
589 * or more RX flushes can be kicked off.
591 static bool efx_farch_flush_wake(struct efx_nic
*efx
)
593 /* Ensure that all updates are visible to efx_farch_flush_queues() */
596 return (atomic_read(&efx
->drain_pending
) == 0 ||
597 (atomic_read(&efx
->rxq_flush_outstanding
) < EFX_RX_FLUSH_COUNT
598 && atomic_read(&efx
->rxq_flush_pending
) > 0));
601 static bool efx_check_tx_flush_complete(struct efx_nic
*efx
)
604 efx_oword_t txd_ptr_tbl
;
605 struct efx_channel
*channel
;
606 struct efx_tx_queue
*tx_queue
;
608 efx_for_each_channel(channel
, efx
) {
609 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
610 efx_reado_table(efx
, &txd_ptr_tbl
,
611 FR_BZ_TX_DESC_PTR_TBL
, tx_queue
->queue
);
612 if (EFX_OWORD_FIELD(txd_ptr_tbl
,
613 FRF_AZ_TX_DESCQ_FLUSH
) ||
614 EFX_OWORD_FIELD(txd_ptr_tbl
,
615 FRF_AZ_TX_DESCQ_EN
)) {
616 netif_dbg(efx
, hw
, efx
->net_dev
,
617 "flush did not complete on TXQ %d\n",
620 } else if (atomic_cmpxchg(&tx_queue
->flush_outstanding
,
622 /* The flush is complete, but we didn't
623 * receive a flush completion event
625 netif_dbg(efx
, hw
, efx
->net_dev
,
626 "flush complete on TXQ %d, so drain "
627 "the queue\n", tx_queue
->queue
);
628 /* Don't need to increment drain_pending as it
629 * has already been incremented for the queues
630 * which did not drain
632 efx_farch_magic_event(channel
,
633 EFX_CHANNEL_MAGIC_TX_DRAIN(
642 /* Flush all the transmit queues, and continue flushing receive queues until
643 * they're all flushed. Wait for the DRAIN events to be recieved so that there
644 * are no more RX and TX events left on any channel. */
645 static int efx_farch_do_flush(struct efx_nic
*efx
)
647 unsigned timeout
= msecs_to_jiffies(5000); /* 5s for all flushes and drains */
648 struct efx_channel
*channel
;
649 struct efx_rx_queue
*rx_queue
;
650 struct efx_tx_queue
*tx_queue
;
653 efx_for_each_channel(channel
, efx
) {
654 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
655 atomic_inc(&efx
->drain_pending
);
656 efx_farch_flush_tx_queue(tx_queue
);
658 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
659 atomic_inc(&efx
->drain_pending
);
660 rx_queue
->flush_pending
= true;
661 atomic_inc(&efx
->rxq_flush_pending
);
665 while (timeout
&& atomic_read(&efx
->drain_pending
) > 0) {
666 /* If SRIOV is enabled, then offload receive queue flushing to
667 * the firmware (though we will still have to poll for
668 * completion). If that fails, fall back to the old scheme.
670 if (efx_sriov_enabled(efx
)) {
671 rc
= efx_mcdi_flush_rxqs(efx
);
676 /* The hardware supports four concurrent rx flushes, each of
677 * which may need to be retried if there is an outstanding
680 efx_for_each_channel(channel
, efx
) {
681 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
682 if (atomic_read(&efx
->rxq_flush_outstanding
) >=
686 if (rx_queue
->flush_pending
) {
687 rx_queue
->flush_pending
= false;
688 atomic_dec(&efx
->rxq_flush_pending
);
689 atomic_inc(&efx
->rxq_flush_outstanding
);
690 efx_farch_flush_rx_queue(rx_queue
);
696 timeout
= wait_event_timeout(efx
->flush_wq
,
697 efx_farch_flush_wake(efx
),
701 if (atomic_read(&efx
->drain_pending
) &&
702 !efx_check_tx_flush_complete(efx
)) {
703 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues "
704 "(rx %d+%d)\n", atomic_read(&efx
->drain_pending
),
705 atomic_read(&efx
->rxq_flush_outstanding
),
706 atomic_read(&efx
->rxq_flush_pending
));
709 atomic_set(&efx
->drain_pending
, 0);
710 atomic_set(&efx
->rxq_flush_pending
, 0);
711 atomic_set(&efx
->rxq_flush_outstanding
, 0);
717 int efx_farch_fini_dmaq(struct efx_nic
*efx
)
719 struct efx_channel
*channel
;
720 struct efx_tx_queue
*tx_queue
;
721 struct efx_rx_queue
*rx_queue
;
724 /* Do not attempt to write to the NIC during EEH recovery */
725 if (efx
->state
!= STATE_RECOVERY
) {
726 /* Only perform flush if DMA is enabled */
727 if (efx
->pci_dev
->is_busmaster
) {
728 efx
->type
->prepare_flush(efx
);
729 rc
= efx_farch_do_flush(efx
);
730 efx
->type
->finish_flush(efx
);
733 efx_for_each_channel(channel
, efx
) {
734 efx_for_each_channel_rx_queue(rx_queue
, channel
)
735 efx_farch_rx_fini(rx_queue
);
736 efx_for_each_channel_tx_queue(tx_queue
, channel
)
737 efx_farch_tx_fini(tx_queue
);
744 /**************************************************************************
746 * Event queue processing
747 * Event queues are processed by per-channel tasklets.
749 **************************************************************************/
751 /* Update a channel's event queue's read pointer (RPTR) register
753 * This writes the EVQ_RPTR_REG register for the specified channel's
756 void efx_farch_ev_read_ack(struct efx_channel
*channel
)
759 struct efx_nic
*efx
= channel
->efx
;
761 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
,
762 channel
->eventq_read_ptr
& channel
->eventq_mask
);
764 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
765 * of 4 bytes, but it is really 16 bytes just like later revisions.
767 efx_writed(efx
, ®
,
768 efx
->type
->evq_rptr_tbl_base
+
769 FR_BZ_EVQ_RPTR_STEP
* channel
->channel
);
772 /* Use HW to insert a SW defined event */
773 void efx_farch_generate_event(struct efx_nic
*efx
, unsigned int evq
,
776 efx_oword_t drv_ev_reg
;
778 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
779 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
780 drv_ev_reg
.u32
[0] = event
->u32
[0];
781 drv_ev_reg
.u32
[1] = event
->u32
[1];
782 drv_ev_reg
.u32
[2] = 0;
783 drv_ev_reg
.u32
[3] = 0;
784 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, evq
);
785 efx_writeo(efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
788 static void efx_farch_magic_event(struct efx_channel
*channel
, u32 magic
)
792 EFX_POPULATE_QWORD_2(event
, FSF_AZ_EV_CODE
,
793 FSE_AZ_EV_CODE_DRV_GEN_EV
,
794 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
795 efx_farch_generate_event(channel
->efx
, channel
->channel
, &event
);
798 /* Handle a transmit completion event
800 * The NIC batches TX completion events; the message we receive is of
801 * the form "complete all TX events up to this index".
804 efx_farch_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
806 unsigned int tx_ev_desc_ptr
;
807 unsigned int tx_ev_q_label
;
808 struct efx_tx_queue
*tx_queue
;
809 struct efx_nic
*efx
= channel
->efx
;
812 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
815 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
816 /* Transmit completion */
817 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
818 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
819 tx_queue
= efx_channel_get_tx_queue(
820 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
821 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
823 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
824 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
825 /* Rewrite the FIFO write pointer */
826 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
827 tx_queue
= efx_channel_get_tx_queue(
828 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
830 netif_tx_lock(efx
->net_dev
);
831 efx_farch_notify_tx_desc(tx_queue
);
832 netif_tx_unlock(efx
->net_dev
);
833 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
)) {
834 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
836 netif_err(efx
, tx_err
, efx
->net_dev
,
837 "channel %d unexpected TX event "
838 EFX_QWORD_FMT
"\n", channel
->channel
,
839 EFX_QWORD_VAL(*event
));
845 /* Detect errors included in the rx_evt_pkt_ok bit. */
846 static u16
efx_farch_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
847 const efx_qword_t
*event
)
849 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
850 struct efx_nic
*efx
= rx_queue
->efx
;
851 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
852 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
853 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
854 bool rx_ev_other_err
, rx_ev_pause_frm
;
855 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
856 unsigned rx_ev_pkt_type
;
858 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
859 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
860 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
861 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
862 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
863 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
864 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
865 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
866 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
867 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
868 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
869 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
870 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
871 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
872 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
874 /* Every error apart from tobe_disc and pause_frm */
875 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
876 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
877 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
879 /* Count errors that are not in MAC stats. Ignore expected
880 * checksum errors during self-test. */
882 ++channel
->n_rx_frm_trunc
;
883 else if (rx_ev_tobe_disc
)
884 ++channel
->n_rx_tobe_disc
;
885 else if (!efx
->loopback_selftest
) {
886 if (rx_ev_ip_hdr_chksum_err
)
887 ++channel
->n_rx_ip_hdr_chksum_err
;
888 else if (rx_ev_tcp_udp_chksum_err
)
889 ++channel
->n_rx_tcp_udp_chksum_err
;
892 /* TOBE_DISC is expected on unicast mismatches; don't print out an
893 * error message. FRM_TRUNC indicates RXDP dropped the packet due
894 * to a FIFO overflow.
897 if (rx_ev_other_err
&& net_ratelimit()) {
898 netif_dbg(efx
, rx_err
, efx
->net_dev
,
899 " RX queue %d unexpected RX event "
900 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
901 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
902 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
903 rx_ev_ip_hdr_chksum_err
?
904 " [IP_HDR_CHKSUM_ERR]" : "",
905 rx_ev_tcp_udp_chksum_err
?
906 " [TCP_UDP_CHKSUM_ERR]" : "",
907 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
908 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
909 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
910 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
911 rx_ev_pause_frm
? " [PAUSE]" : "");
915 /* The frame must be discarded if any of these are true. */
916 return (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
917 rx_ev_tobe_disc
| rx_ev_pause_frm
) ?
918 EFX_RX_PKT_DISCARD
: 0;
921 /* Handle receive events that are not in-order. Return true if this
922 * can be handled as a partial packet discard, false if it's more
926 efx_farch_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
928 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
929 struct efx_nic
*efx
= rx_queue
->efx
;
930 unsigned expected
, dropped
;
932 if (rx_queue
->scatter_n
&&
933 index
== ((rx_queue
->removed_count
+ rx_queue
->scatter_n
- 1) &
934 rx_queue
->ptr_mask
)) {
935 ++channel
->n_rx_nodesc_trunc
;
939 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
940 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
941 netif_info(efx
, rx_err
, efx
->net_dev
,
942 "dropped %d events (index=%d expected=%d)\n",
943 dropped
, index
, expected
);
945 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
946 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
950 /* Handle a packet received event
952 * The NIC gives a "discard" flag if it's a unicast packet with the
953 * wrong destination address
954 * Also "is multicast" and "matches multicast filter" flags can be used to
955 * discard non-matching multicast packets.
958 efx_farch_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
960 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
961 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
962 unsigned expected_ptr
;
963 bool rx_ev_pkt_ok
, rx_ev_sop
, rx_ev_cont
;
965 struct efx_rx_queue
*rx_queue
;
966 struct efx_nic
*efx
= channel
->efx
;
968 if (unlikely(ACCESS_ONCE(efx
->reset_pending
)))
971 rx_ev_cont
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
);
972 rx_ev_sop
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
);
973 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
976 rx_queue
= efx_channel_get_rx_queue(channel
);
978 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
979 expected_ptr
= ((rx_queue
->removed_count
+ rx_queue
->scatter_n
) &
982 /* Check for partial drops and other errors */
983 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
) ||
984 unlikely(rx_ev_sop
!= (rx_queue
->scatter_n
== 0))) {
985 if (rx_ev_desc_ptr
!= expected_ptr
&&
986 !efx_farch_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
))
989 /* Discard all pending fragments */
990 if (rx_queue
->scatter_n
) {
993 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
994 rx_queue
->scatter_n
, 0, EFX_RX_PKT_DISCARD
);
995 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
996 rx_queue
->scatter_n
= 0;
999 /* Return if there is no new fragment */
1000 if (rx_ev_desc_ptr
!= expected_ptr
)
1003 /* Discard new fragment if not SOP */
1007 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1008 1, 0, EFX_RX_PKT_DISCARD
);
1009 ++rx_queue
->removed_count
;
1014 ++rx_queue
->scatter_n
;
1018 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
1019 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
1020 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
1022 if (likely(rx_ev_pkt_ok
)) {
1023 /* If packet is marked as OK then we can rely on the
1024 * hardware checksum and classification.
1027 switch (rx_ev_hdr_type
) {
1028 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
:
1029 flags
|= EFX_RX_PKT_TCP
;
1031 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
:
1032 flags
|= EFX_RX_PKT_CSUMMED
;
1034 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER
:
1035 case FSE_AZ_RX_EV_HDR_TYPE_OTHER
:
1039 flags
= efx_farch_handle_rx_not_ok(rx_queue
, event
);
1042 /* Detect multicast packets that didn't match the filter */
1043 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
1044 if (rx_ev_mcast_pkt
) {
1045 unsigned int rx_ev_mcast_hash_match
=
1046 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
1048 if (unlikely(!rx_ev_mcast_hash_match
)) {
1049 ++channel
->n_rx_mcast_mismatch
;
1050 flags
|= EFX_RX_PKT_DISCARD
;
1054 channel
->irq_mod_score
+= 2;
1056 /* Handle received packet */
1057 efx_rx_packet(rx_queue
,
1058 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1059 rx_queue
->scatter_n
, rx_ev_byte_cnt
, flags
);
1060 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1061 rx_queue
->scatter_n
= 0;
1064 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1065 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1066 * of all transmit completions.
1069 efx_farch_handle_tx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1071 struct efx_tx_queue
*tx_queue
;
1074 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1075 if (qid
< EFX_TXQ_TYPES
* efx
->n_tx_channels
) {
1076 tx_queue
= efx_get_tx_queue(efx
, qid
/ EFX_TXQ_TYPES
,
1077 qid
% EFX_TXQ_TYPES
);
1078 if (atomic_cmpxchg(&tx_queue
->flush_outstanding
, 1, 0)) {
1079 efx_farch_magic_event(tx_queue
->channel
,
1080 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue
));
1085 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1086 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1087 * the RX queue back to the mask of RX queues in need of flushing.
1090 efx_farch_handle_rx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1092 struct efx_channel
*channel
;
1093 struct efx_rx_queue
*rx_queue
;
1097 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1098 failed
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1099 if (qid
>= efx
->n_channels
)
1101 channel
= efx_get_channel(efx
, qid
);
1102 if (!efx_channel_has_rx_queue(channel
))
1104 rx_queue
= efx_channel_get_rx_queue(channel
);
1107 netif_info(efx
, hw
, efx
->net_dev
,
1108 "RXQ %d flush retry\n", qid
);
1109 rx_queue
->flush_pending
= true;
1110 atomic_inc(&efx
->rxq_flush_pending
);
1112 efx_farch_magic_event(efx_rx_queue_channel(rx_queue
),
1113 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
));
1115 atomic_dec(&efx
->rxq_flush_outstanding
);
1116 if (efx_farch_flush_wake(efx
))
1117 wake_up(&efx
->flush_wq
);
1121 efx_farch_handle_drain_event(struct efx_channel
*channel
)
1123 struct efx_nic
*efx
= channel
->efx
;
1125 WARN_ON(atomic_read(&efx
->drain_pending
) == 0);
1126 atomic_dec(&efx
->drain_pending
);
1127 if (efx_farch_flush_wake(efx
))
1128 wake_up(&efx
->flush_wq
);
1131 static void efx_farch_handle_generated_event(struct efx_channel
*channel
,
1134 struct efx_nic
*efx
= channel
->efx
;
1135 struct efx_rx_queue
*rx_queue
=
1136 efx_channel_has_rx_queue(channel
) ?
1137 efx_channel_get_rx_queue(channel
) : NULL
;
1138 unsigned magic
, code
;
1140 magic
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1141 code
= _EFX_CHANNEL_MAGIC_CODE(magic
);
1143 if (magic
== EFX_CHANNEL_MAGIC_TEST(channel
)) {
1144 channel
->event_test_cpu
= raw_smp_processor_id();
1145 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_FILL(rx_queue
)) {
1146 /* The queue must be empty, so we won't receive any rx
1147 * events, so efx_process_channel() won't refill the
1148 * queue. Refill it here */
1149 efx_fast_push_rx_descriptors(rx_queue
);
1150 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
)) {
1151 efx_farch_handle_drain_event(channel
);
1152 } else if (code
== _EFX_CHANNEL_MAGIC_TX_DRAIN
) {
1153 efx_farch_handle_drain_event(channel
);
1155 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
1156 "generated event "EFX_QWORD_FMT
"\n",
1157 channel
->channel
, EFX_QWORD_VAL(*event
));
1162 efx_farch_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1164 struct efx_nic
*efx
= channel
->efx
;
1165 unsigned int ev_sub_code
;
1166 unsigned int ev_sub_data
;
1168 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
1169 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1171 switch (ev_sub_code
) {
1172 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
1173 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
1174 channel
->channel
, ev_sub_data
);
1175 efx_farch_handle_tx_flush_done(efx
, event
);
1176 efx_sriov_tx_flush_done(efx
, event
);
1178 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
1179 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
1180 channel
->channel
, ev_sub_data
);
1181 efx_farch_handle_rx_flush_done(efx
, event
);
1182 efx_sriov_rx_flush_done(efx
, event
);
1184 case FSE_AZ_EVQ_INIT_DONE_EV
:
1185 netif_dbg(efx
, hw
, efx
->net_dev
,
1186 "channel %d EVQ %d initialised\n",
1187 channel
->channel
, ev_sub_data
);
1189 case FSE_AZ_SRM_UPD_DONE_EV
:
1190 netif_vdbg(efx
, hw
, efx
->net_dev
,
1191 "channel %d SRAM update done\n", channel
->channel
);
1193 case FSE_AZ_WAKE_UP_EV
:
1194 netif_vdbg(efx
, hw
, efx
->net_dev
,
1195 "channel %d RXQ %d wakeup event\n",
1196 channel
->channel
, ev_sub_data
);
1198 case FSE_AZ_TIMER_EV
:
1199 netif_vdbg(efx
, hw
, efx
->net_dev
,
1200 "channel %d RX queue %d timer expired\n",
1201 channel
->channel
, ev_sub_data
);
1203 case FSE_AA_RX_RECOVER_EV
:
1204 netif_err(efx
, rx_err
, efx
->net_dev
,
1205 "channel %d seen DRIVER RX_RESET event. "
1206 "Resetting.\n", channel
->channel
);
1207 atomic_inc(&efx
->rx_reset
);
1208 efx_schedule_reset(efx
,
1209 EFX_WORKAROUND_6555(efx
) ?
1210 RESET_TYPE_RX_RECOVERY
:
1211 RESET_TYPE_DISABLE
);
1213 case FSE_BZ_RX_DSC_ERROR_EV
:
1214 if (ev_sub_data
< EFX_VI_BASE
) {
1215 netif_err(efx
, rx_err
, efx
->net_dev
,
1216 "RX DMA Q %d reports descriptor fetch error."
1217 " RX Q %d is disabled.\n", ev_sub_data
,
1219 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
1221 efx_sriov_desc_fetch_err(efx
, ev_sub_data
);
1223 case FSE_BZ_TX_DSC_ERROR_EV
:
1224 if (ev_sub_data
< EFX_VI_BASE
) {
1225 netif_err(efx
, tx_err
, efx
->net_dev
,
1226 "TX DMA Q %d reports descriptor fetch error."
1227 " TX Q %d is disabled.\n", ev_sub_data
,
1229 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
1231 efx_sriov_desc_fetch_err(efx
, ev_sub_data
);
1234 netif_vdbg(efx
, hw
, efx
->net_dev
,
1235 "channel %d unknown driver event code %d "
1236 "data %04x\n", channel
->channel
, ev_sub_code
,
1242 int efx_farch_ev_process(struct efx_channel
*channel
, int budget
)
1244 struct efx_nic
*efx
= channel
->efx
;
1245 unsigned int read_ptr
;
1246 efx_qword_t event
, *p_event
;
1251 read_ptr
= channel
->eventq_read_ptr
;
1254 p_event
= efx_event(channel
, read_ptr
);
1257 if (!efx_event_present(&event
))
1261 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1262 "channel %d event is "EFX_QWORD_FMT
"\n",
1263 channel
->channel
, EFX_QWORD_VAL(event
));
1265 /* Clear this event by marking it all ones */
1266 EFX_SET_QWORD(*p_event
);
1270 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1273 case FSE_AZ_EV_CODE_RX_EV
:
1274 efx_farch_handle_rx_event(channel
, &event
);
1275 if (++spent
== budget
)
1278 case FSE_AZ_EV_CODE_TX_EV
:
1279 tx_packets
+= efx_farch_handle_tx_event(channel
,
1281 if (tx_packets
> efx
->txq_entries
) {
1286 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1287 efx_farch_handle_generated_event(channel
, &event
);
1289 case FSE_AZ_EV_CODE_DRIVER_EV
:
1290 efx_farch_handle_driver_event(channel
, &event
);
1292 case FSE_CZ_EV_CODE_USER_EV
:
1293 efx_sriov_event(channel
, &event
);
1295 case FSE_CZ_EV_CODE_MCDI_EV
:
1296 efx_mcdi_process_event(channel
, &event
);
1298 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1299 if (efx
->type
->handle_global_event
&&
1300 efx
->type
->handle_global_event(channel
, &event
))
1302 /* else fall through */
1304 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1305 "channel %d unknown event type %d (data "
1306 EFX_QWORD_FMT
")\n", channel
->channel
,
1307 ev_code
, EFX_QWORD_VAL(event
));
1312 channel
->eventq_read_ptr
= read_ptr
;
1316 /* Allocate buffer table entries for event queue */
1317 int efx_farch_ev_probe(struct efx_channel
*channel
)
1319 struct efx_nic
*efx
= channel
->efx
;
1322 entries
= channel
->eventq_mask
+ 1;
1323 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1324 entries
* sizeof(efx_qword_t
));
1327 void efx_farch_ev_init(struct efx_channel
*channel
)
1330 struct efx_nic
*efx
= channel
->efx
;
1332 netif_dbg(efx
, hw
, efx
->net_dev
,
1333 "channel %d event queue in special buffers %d-%d\n",
1334 channel
->channel
, channel
->eventq
.index
,
1335 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1337 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
1338 EFX_POPULATE_OWORD_3(reg
,
1339 FRF_CZ_TIMER_Q_EN
, 1,
1340 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1341 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1342 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1345 /* Pin event queue buffer */
1346 efx_init_special_buffer(efx
, &channel
->eventq
);
1348 /* Fill event queue with all ones (i.e. empty events) */
1349 memset(channel
->eventq
.buf
.addr
, 0xff, channel
->eventq
.buf
.len
);
1351 /* Push event queue to card */
1352 EFX_POPULATE_OWORD_3(reg
,
1354 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1355 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1356 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1359 efx
->type
->push_irq_moderation(channel
);
1362 void efx_farch_ev_fini(struct efx_channel
*channel
)
1365 struct efx_nic
*efx
= channel
->efx
;
1367 /* Remove event queue from card */
1368 EFX_ZERO_OWORD(reg
);
1369 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1371 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1372 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1374 /* Unpin event queue */
1375 efx_fini_special_buffer(efx
, &channel
->eventq
);
1378 /* Free buffers backing event queue */
1379 void efx_farch_ev_remove(struct efx_channel
*channel
)
1381 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1385 void efx_farch_ev_test_generate(struct efx_channel
*channel
)
1387 efx_farch_magic_event(channel
, EFX_CHANNEL_MAGIC_TEST(channel
));
1390 void efx_farch_rx_defer_refill(struct efx_rx_queue
*rx_queue
)
1392 efx_farch_magic_event(efx_rx_queue_channel(rx_queue
),
1393 EFX_CHANNEL_MAGIC_FILL(rx_queue
));
1396 /**************************************************************************
1398 * Hardware interrupts
1399 * The hardware interrupt handler does very little work; all the event
1400 * queue processing is carried out by per-channel tasklets.
1402 **************************************************************************/
1404 /* Enable/disable/generate interrupts */
1405 static inline void efx_farch_interrupts(struct efx_nic
*efx
,
1406 bool enabled
, bool force
)
1408 efx_oword_t int_en_reg_ker
;
1410 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1411 FRF_AZ_KER_INT_LEVE_SEL
, efx
->irq_level
,
1412 FRF_AZ_KER_INT_KER
, force
,
1413 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1414 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1417 void efx_farch_irq_enable_master(struct efx_nic
*efx
)
1419 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1420 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1422 efx_farch_interrupts(efx
, true, false);
1425 void efx_farch_irq_disable_master(struct efx_nic
*efx
)
1427 /* Disable interrupts */
1428 efx_farch_interrupts(efx
, false, false);
1431 /* Generate a test interrupt
1432 * Interrupt must already have been enabled, otherwise nasty things
1435 void efx_farch_irq_test_generate(struct efx_nic
*efx
)
1437 efx_farch_interrupts(efx
, true, true);
1440 /* Process a fatal interrupt
1441 * Disable bus mastering ASAP and schedule a reset
1443 irqreturn_t
efx_farch_fatal_interrupt(struct efx_nic
*efx
)
1445 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1446 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1447 efx_oword_t fatal_intr
;
1448 int error
, mem_perr
;
1450 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1451 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1453 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1454 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1455 EFX_OWORD_VAL(fatal_intr
),
1456 error
? "disabling bus mastering" : "no recognised error");
1458 /* If this is a memory parity error dump which blocks are offending */
1459 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1460 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1463 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1464 netif_err(efx
, hw
, efx
->net_dev
,
1465 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1466 EFX_OWORD_VAL(reg
));
1469 /* Disable both devices */
1470 pci_clear_master(efx
->pci_dev
);
1471 if (efx_nic_is_dual_func(efx
))
1472 pci_clear_master(nic_data
->pci_dev2
);
1473 efx_farch_irq_disable_master(efx
);
1475 /* Count errors and reset or disable the NIC accordingly */
1476 if (efx
->int_error_count
== 0 ||
1477 time_after(jiffies
, efx
->int_error_expire
)) {
1478 efx
->int_error_count
= 0;
1479 efx
->int_error_expire
=
1480 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1482 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1483 netif_err(efx
, hw
, efx
->net_dev
,
1484 "SYSTEM ERROR - reset scheduled\n");
1485 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1487 netif_err(efx
, hw
, efx
->net_dev
,
1488 "SYSTEM ERROR - max number of errors seen."
1489 "NIC will be disabled\n");
1490 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1496 /* Handle a legacy interrupt
1497 * Acknowledges the interrupt and schedule event queue processing.
1499 irqreturn_t
efx_farch_legacy_interrupt(int irq
, void *dev_id
)
1501 struct efx_nic
*efx
= dev_id
;
1502 bool soft_enabled
= ACCESS_ONCE(efx
->irq_soft_enabled
);
1503 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1504 irqreturn_t result
= IRQ_NONE
;
1505 struct efx_channel
*channel
;
1510 /* Read the ISR which also ACKs the interrupts */
1511 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1512 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1514 /* Legacy interrupts are disabled too late by the EEH kernel
1515 * code. Disable them earlier.
1516 * If an EEH error occurred, the read will have returned all ones.
1518 if (EFX_DWORD_IS_ALL_ONES(reg
) && efx_try_recovery(efx
) &&
1519 !efx
->eeh_disabled_legacy_irq
) {
1520 disable_irq_nosync(efx
->legacy_irq
);
1521 efx
->eeh_disabled_legacy_irq
= true;
1524 /* Handle non-event-queue sources */
1525 if (queues
& (1U << efx
->irq_level
) && soft_enabled
) {
1526 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1527 if (unlikely(syserr
))
1528 return efx_farch_fatal_interrupt(efx
);
1529 efx
->last_irq_cpu
= raw_smp_processor_id();
1533 efx
->irq_zero_count
= 0;
1535 /* Schedule processing of any interrupting queues */
1536 if (likely(soft_enabled
)) {
1537 efx_for_each_channel(channel
, efx
) {
1539 efx_schedule_channel_irq(channel
);
1543 result
= IRQ_HANDLED
;
1548 /* Legacy ISR read can return zero once (SF bug 15783) */
1550 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1551 * because this might be a shared interrupt. */
1552 if (efx
->irq_zero_count
++ == 0)
1553 result
= IRQ_HANDLED
;
1555 /* Ensure we schedule or rearm all event queues */
1556 if (likely(soft_enabled
)) {
1557 efx_for_each_channel(channel
, efx
) {
1558 event
= efx_event(channel
,
1559 channel
->eventq_read_ptr
);
1560 if (efx_event_present(event
))
1561 efx_schedule_channel_irq(channel
);
1563 efx_farch_ev_read_ack(channel
);
1568 if (result
== IRQ_HANDLED
)
1569 netif_vdbg(efx
, intr
, efx
->net_dev
,
1570 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1571 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1576 /* Handle an MSI interrupt
1578 * Handle an MSI hardware interrupt. This routine schedules event
1579 * queue processing. No interrupt acknowledgement cycle is necessary.
1580 * Also, we never need to check that the interrupt is for us, since
1581 * MSI interrupts cannot be shared.
1583 irqreturn_t
efx_farch_msi_interrupt(int irq
, void *dev_id
)
1585 struct efx_msi_context
*context
= dev_id
;
1586 struct efx_nic
*efx
= context
->efx
;
1587 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1590 netif_vdbg(efx
, intr
, efx
->net_dev
,
1591 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1592 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1594 if (!likely(ACCESS_ONCE(efx
->irq_soft_enabled
)))
1597 /* Handle non-event-queue sources */
1598 if (context
->index
== efx
->irq_level
) {
1599 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1600 if (unlikely(syserr
))
1601 return efx_farch_fatal_interrupt(efx
);
1602 efx
->last_irq_cpu
= raw_smp_processor_id();
1605 /* Schedule processing of the channel */
1606 efx_schedule_channel_irq(efx
->channel
[context
->index
]);
1612 /* Setup RSS indirection table.
1613 * This maps from the hash value of the packet to RXQ
1615 void efx_farch_rx_push_indir_table(struct efx_nic
*efx
)
1620 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1623 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1624 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1626 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1627 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1628 efx
->rx_indir_table
[i
]);
1629 efx_writed(efx
, &dword
,
1630 FR_BZ_RX_INDIRECTION_TBL
+
1631 FR_BZ_RX_INDIRECTION_TBL_STEP
* i
);
1635 /* Looks at available SRAM resources and works out how many queues we
1636 * can support, and where things like descriptor caches should live.
1638 * SRAM is split up as follows:
1639 * 0 buftbl entries for channels
1640 * efx->vf_buftbl_base buftbl entries for SR-IOV
1641 * efx->rx_dc_base RX descriptor caches
1642 * efx->tx_dc_base TX descriptor caches
1644 void efx_farch_dimension_resources(struct efx_nic
*efx
, unsigned sram_lim_qw
)
1646 unsigned vi_count
, buftbl_min
;
1648 /* Account for the buffer table entries backing the datapath channels
1649 * and the descriptor caches for those channels.
1651 buftbl_min
= ((efx
->n_rx_channels
* EFX_MAX_DMAQ_SIZE
+
1652 efx
->n_tx_channels
* EFX_TXQ_TYPES
* EFX_MAX_DMAQ_SIZE
+
1653 efx
->n_channels
* EFX_MAX_EVQ_SIZE
)
1654 * sizeof(efx_qword_t
) / EFX_BUF_SIZE
);
1655 vi_count
= max(efx
->n_channels
, efx
->n_tx_channels
* EFX_TXQ_TYPES
);
1657 #ifdef CONFIG_SFC_SRIOV
1658 if (efx_sriov_wanted(efx
)) {
1659 unsigned vi_dc_entries
, buftbl_free
, entries_per_vf
, vf_limit
;
1661 efx
->vf_buftbl_base
= buftbl_min
;
1663 vi_dc_entries
= RX_DC_ENTRIES
+ TX_DC_ENTRIES
;
1664 vi_count
= max(vi_count
, EFX_VI_BASE
);
1665 buftbl_free
= (sram_lim_qw
- buftbl_min
-
1666 vi_count
* vi_dc_entries
);
1668 entries_per_vf
= ((vi_dc_entries
+ EFX_VF_BUFTBL_PER_VI
) *
1670 vf_limit
= min(buftbl_free
/ entries_per_vf
,
1671 (1024U - EFX_VI_BASE
) >> efx
->vi_scale
);
1673 if (efx
->vf_count
> vf_limit
) {
1674 netif_err(efx
, probe
, efx
->net_dev
,
1675 "Reducing VF count from from %d to %d\n",
1676 efx
->vf_count
, vf_limit
);
1677 efx
->vf_count
= vf_limit
;
1679 vi_count
+= efx
->vf_count
* efx_vf_size(efx
);
1683 efx
->tx_dc_base
= sram_lim_qw
- vi_count
* TX_DC_ENTRIES
;
1684 efx
->rx_dc_base
= efx
->tx_dc_base
- vi_count
* RX_DC_ENTRIES
;
1687 u32
efx_farch_fpga_ver(struct efx_nic
*efx
)
1689 efx_oword_t altera_build
;
1690 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1691 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1694 void efx_farch_init_common(struct efx_nic
*efx
)
1698 /* Set positions of descriptor caches in SRAM. */
1699 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
, efx
->tx_dc_base
);
1700 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1701 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
, efx
->rx_dc_base
);
1702 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1704 /* Set TX descriptor cache size. */
1705 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1706 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1707 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1709 /* Set RX descriptor cache size. Set low watermark to size-8, as
1710 * this allows most efficient prefetching.
1712 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1713 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1714 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1715 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1716 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1718 /* Program INT_KER address */
1719 EFX_POPULATE_OWORD_2(temp
,
1720 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1721 EFX_INT_MODE_USE_MSI(efx
),
1722 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1723 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1725 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1726 /* Use an interrupt level unused by event queues */
1727 efx
->irq_level
= 0x1f;
1729 /* Use a valid MSI-X vector */
1732 /* Enable all the genuinely fatal interrupts. (They are still
1733 * masked by the overall interrupt mask, controlled by
1734 * falcon_interrupts()).
1736 * Note: All other fatal interrupts are enabled
1738 EFX_POPULATE_OWORD_3(temp
,
1739 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1740 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1741 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1742 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1743 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1744 EFX_INVERT_OWORD(temp
);
1745 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1747 efx_farch_rx_push_indir_table(efx
);
1749 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1750 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1752 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1753 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1754 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1755 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1756 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1757 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1758 /* Enable SW_EV to inherit in char driver - assume harmless here */
1759 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1760 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1761 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1762 /* Disable hardware watchdog which can misfire */
1763 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1764 /* Squash TX of packets of 16 bytes or less */
1765 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1766 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1767 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1769 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1770 EFX_POPULATE_OWORD_4(temp
,
1771 /* Default values */
1772 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1773 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1774 FRF_BZ_TX_PACE_FB_BASE
, 0,
1775 /* Allow large pace values in the
1777 FRF_BZ_TX_PACE_BIN_TH
,
1778 FFE_BZ_TX_PACE_RESERVED
);
1779 efx_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1783 /**************************************************************************
1787 **************************************************************************
1790 /* "Fudge factors" - difference between programmed value and actual depth.
1791 * Due to pipelined implementation we need to program H/W with a value that
1792 * is larger than the hop limit we want.
1794 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1795 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1797 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1798 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1801 #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1803 /* Don't try very hard to find space for performance hints, as this is
1804 * counter-productive. */
1805 #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1807 enum efx_farch_filter_type
{
1808 EFX_FARCH_FILTER_TCP_FULL
= 0,
1809 EFX_FARCH_FILTER_TCP_WILD
,
1810 EFX_FARCH_FILTER_UDP_FULL
,
1811 EFX_FARCH_FILTER_UDP_WILD
,
1812 EFX_FARCH_FILTER_MAC_FULL
= 4,
1813 EFX_FARCH_FILTER_MAC_WILD
,
1814 EFX_FARCH_FILTER_UC_DEF
= 8,
1815 EFX_FARCH_FILTER_MC_DEF
,
1816 EFX_FARCH_FILTER_TYPE_COUNT
, /* number of specific types */
1819 enum efx_farch_filter_table_id
{
1820 EFX_FARCH_FILTER_TABLE_RX_IP
= 0,
1821 EFX_FARCH_FILTER_TABLE_RX_MAC
,
1822 EFX_FARCH_FILTER_TABLE_RX_DEF
,
1823 EFX_FARCH_FILTER_TABLE_TX_MAC
,
1824 EFX_FARCH_FILTER_TABLE_COUNT
,
1827 enum efx_farch_filter_index
{
1828 EFX_FARCH_FILTER_INDEX_UC_DEF
,
1829 EFX_FARCH_FILTER_INDEX_MC_DEF
,
1830 EFX_FARCH_FILTER_SIZE_RX_DEF
,
1833 struct efx_farch_filter_spec
{
1841 struct efx_farch_filter_table
{
1842 enum efx_farch_filter_table_id id
;
1843 u32 offset
; /* address of table relative to BAR */
1844 unsigned size
; /* number of entries */
1845 unsigned step
; /* step between entries */
1846 unsigned used
; /* number currently used */
1847 unsigned long *used_bitmap
;
1848 struct efx_farch_filter_spec
*spec
;
1849 unsigned search_limit
[EFX_FARCH_FILTER_TYPE_COUNT
];
1852 struct efx_farch_filter_state
{
1853 struct efx_farch_filter_table table
[EFX_FARCH_FILTER_TABLE_COUNT
];
1857 efx_farch_filter_table_clear_entry(struct efx_nic
*efx
,
1858 struct efx_farch_filter_table
*table
,
1859 unsigned int filter_idx
);
1861 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1862 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1863 static u16
efx_farch_filter_hash(u32 key
)
1867 /* First 16 rounds */
1868 tmp
= 0x1fff ^ key
>> 16;
1869 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1870 tmp
= tmp
^ tmp
>> 9;
1871 /* Last 16 rounds */
1872 tmp
= tmp
^ tmp
<< 13 ^ key
;
1873 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1874 return tmp
^ tmp
>> 9;
1877 /* To allow for hash collisions, filter search continues at these
1878 * increments from the first possible entry selected by the hash. */
1879 static u16
efx_farch_filter_increment(u32 key
)
1884 static enum efx_farch_filter_table_id
1885 efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec
*spec
)
1887 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1888 (EFX_FARCH_FILTER_TCP_FULL
>> 2));
1889 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1890 (EFX_FARCH_FILTER_TCP_WILD
>> 2));
1891 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1892 (EFX_FARCH_FILTER_UDP_FULL
>> 2));
1893 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1894 (EFX_FARCH_FILTER_UDP_WILD
>> 2));
1895 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC
!=
1896 (EFX_FARCH_FILTER_MAC_FULL
>> 2));
1897 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC
!=
1898 (EFX_FARCH_FILTER_MAC_WILD
>> 2));
1899 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC
!=
1900 EFX_FARCH_FILTER_TABLE_RX_MAC
+ 2);
1901 return (spec
->type
>> 2) + ((spec
->flags
& EFX_FILTER_FLAG_TX
) ? 2 : 0);
1904 static void efx_farch_filter_push_rx_config(struct efx_nic
*efx
)
1906 struct efx_farch_filter_state
*state
= efx
->filter_state
;
1907 struct efx_farch_filter_table
*table
;
1908 efx_oword_t filter_ctl
;
1910 efx_reado(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
1912 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
1913 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_FULL_SRCH_LIMIT
,
1914 table
->search_limit
[EFX_FARCH_FILTER_TCP_FULL
] +
1915 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1916 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_WILD_SRCH_LIMIT
,
1917 table
->search_limit
[EFX_FARCH_FILTER_TCP_WILD
] +
1918 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1919 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_FULL_SRCH_LIMIT
,
1920 table
->search_limit
[EFX_FARCH_FILTER_UDP_FULL
] +
1921 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1922 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_WILD_SRCH_LIMIT
,
1923 table
->search_limit
[EFX_FARCH_FILTER_UDP_WILD
] +
1924 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1926 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_MAC
];
1928 EFX_SET_OWORD_FIELD(
1929 filter_ctl
, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT
,
1930 table
->search_limit
[EFX_FARCH_FILTER_MAC_FULL
] +
1931 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1932 EFX_SET_OWORD_FIELD(
1933 filter_ctl
, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT
,
1934 table
->search_limit
[EFX_FARCH_FILTER_MAC_WILD
] +
1935 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1938 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
1940 EFX_SET_OWORD_FIELD(
1941 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_Q_ID
,
1942 table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].dmaq_id
);
1943 EFX_SET_OWORD_FIELD(
1944 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED
,
1945 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].flags
&
1946 EFX_FILTER_FLAG_RX_RSS
));
1947 EFX_SET_OWORD_FIELD(
1948 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_Q_ID
,
1949 table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].dmaq_id
);
1950 EFX_SET_OWORD_FIELD(
1951 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED
,
1952 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].flags
&
1953 EFX_FILTER_FLAG_RX_RSS
));
1955 /* There is a single bit to enable RX scatter for all
1956 * unmatched packets. Only set it if scatter is
1957 * enabled in both filter specs.
1959 EFX_SET_OWORD_FIELD(
1960 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
1961 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].flags
&
1962 table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].flags
&
1963 EFX_FILTER_FLAG_RX_SCATTER
));
1964 } else if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1965 /* We don't expose 'default' filters because unmatched
1966 * packets always go to the queue number found in the
1967 * RSS table. But we still need to set the RX scatter
1970 EFX_SET_OWORD_FIELD(
1971 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
1975 efx_writeo(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
1978 static void efx_farch_filter_push_tx_limits(struct efx_nic
*efx
)
1980 struct efx_farch_filter_state
*state
= efx
->filter_state
;
1981 struct efx_farch_filter_table
*table
;
1984 efx_reado(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
1986 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_TX_MAC
];
1988 EFX_SET_OWORD_FIELD(
1989 tx_cfg
, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE
,
1990 table
->search_limit
[EFX_FARCH_FILTER_MAC_FULL
] +
1991 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1992 EFX_SET_OWORD_FIELD(
1993 tx_cfg
, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE
,
1994 table
->search_limit
[EFX_FARCH_FILTER_MAC_WILD
] +
1995 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1998 efx_writeo(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
2002 efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec
*spec
,
2003 const struct efx_filter_spec
*gen_spec
)
2005 bool is_full
= false;
2007 if ((gen_spec
->flags
& EFX_FILTER_FLAG_RX_RSS
) &&
2008 gen_spec
->rss_context
!= EFX_FILTER_RSS_CONTEXT_DEFAULT
)
2011 spec
->priority
= gen_spec
->priority
;
2012 spec
->flags
= gen_spec
->flags
;
2013 spec
->dmaq_id
= gen_spec
->dmaq_id
;
2015 switch (gen_spec
->match_flags
) {
2016 case (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_IP_PROTO
|
2017 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
|
2018 EFX_FILTER_MATCH_REM_HOST
| EFX_FILTER_MATCH_REM_PORT
):
2021 case (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_IP_PROTO
|
2022 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
): {
2023 __be32 rhost
, host1
, host2
;
2024 __be16 rport
, port1
, port2
;
2026 EFX_BUG_ON_PARANOID(!(gen_spec
->flags
& EFX_FILTER_FLAG_RX
));
2028 if (gen_spec
->ether_type
!= htons(ETH_P_IP
))
2029 return -EPROTONOSUPPORT
;
2030 if (gen_spec
->loc_port
== 0 ||
2031 (is_full
&& gen_spec
->rem_port
== 0))
2032 return -EADDRNOTAVAIL
;
2033 switch (gen_spec
->ip_proto
) {
2035 spec
->type
= (is_full
? EFX_FARCH_FILTER_TCP_FULL
:
2036 EFX_FARCH_FILTER_TCP_WILD
);
2039 spec
->type
= (is_full
? EFX_FARCH_FILTER_UDP_FULL
:
2040 EFX_FARCH_FILTER_UDP_WILD
);
2043 return -EPROTONOSUPPORT
;
2046 /* Filter is constructed in terms of source and destination,
2047 * with the odd wrinkle that the ports are swapped in a UDP
2048 * wildcard filter. We need to convert from local and remote
2049 * (= zero for wildcard) addresses.
2051 rhost
= is_full
? gen_spec
->rem_host
[0] : 0;
2052 rport
= is_full
? gen_spec
->rem_port
: 0;
2054 host2
= gen_spec
->loc_host
[0];
2055 if (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
) {
2056 port1
= gen_spec
->loc_port
;
2060 port2
= gen_spec
->loc_port
;
2062 spec
->data
[0] = ntohl(host1
) << 16 | ntohs(port1
);
2063 spec
->data
[1] = ntohs(port2
) << 16 | ntohl(host1
) >> 16;
2064 spec
->data
[2] = ntohl(host2
);
2069 case EFX_FILTER_MATCH_LOC_MAC
| EFX_FILTER_MATCH_OUTER_VID
:
2072 case EFX_FILTER_MATCH_LOC_MAC
:
2073 spec
->type
= (is_full
? EFX_FARCH_FILTER_MAC_FULL
:
2074 EFX_FARCH_FILTER_MAC_WILD
);
2075 spec
->data
[0] = is_full
? ntohs(gen_spec
->outer_vid
) : 0;
2076 spec
->data
[1] = (gen_spec
->loc_mac
[2] << 24 |
2077 gen_spec
->loc_mac
[3] << 16 |
2078 gen_spec
->loc_mac
[4] << 8 |
2079 gen_spec
->loc_mac
[5]);
2080 spec
->data
[2] = (gen_spec
->loc_mac
[0] << 8 |
2081 gen_spec
->loc_mac
[1]);
2084 case EFX_FILTER_MATCH_LOC_MAC_IG
:
2085 spec
->type
= (is_multicast_ether_addr(gen_spec
->loc_mac
) ?
2086 EFX_FARCH_FILTER_MC_DEF
:
2087 EFX_FARCH_FILTER_UC_DEF
);
2088 memset(spec
->data
, 0, sizeof(spec
->data
)); /* ensure equality */
2092 return -EPROTONOSUPPORT
;
2099 efx_farch_filter_to_gen_spec(struct efx_filter_spec
*gen_spec
,
2100 const struct efx_farch_filter_spec
*spec
)
2102 bool is_full
= false;
2104 /* *gen_spec should be completely initialised, to be consistent
2105 * with efx_filter_init_{rx,tx}() and in case we want to copy
2106 * it back to userland.
2108 memset(gen_spec
, 0, sizeof(*gen_spec
));
2110 gen_spec
->priority
= spec
->priority
;
2111 gen_spec
->flags
= spec
->flags
;
2112 gen_spec
->dmaq_id
= spec
->dmaq_id
;
2114 switch (spec
->type
) {
2115 case EFX_FARCH_FILTER_TCP_FULL
:
2116 case EFX_FARCH_FILTER_UDP_FULL
:
2119 case EFX_FARCH_FILTER_TCP_WILD
:
2120 case EFX_FARCH_FILTER_UDP_WILD
: {
2121 __be32 host1
, host2
;
2122 __be16 port1
, port2
;
2124 gen_spec
->match_flags
=
2125 EFX_FILTER_MATCH_ETHER_TYPE
|
2126 EFX_FILTER_MATCH_IP_PROTO
|
2127 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
;
2129 gen_spec
->match_flags
|= (EFX_FILTER_MATCH_REM_HOST
|
2130 EFX_FILTER_MATCH_REM_PORT
);
2131 gen_spec
->ether_type
= htons(ETH_P_IP
);
2132 gen_spec
->ip_proto
=
2133 (spec
->type
== EFX_FARCH_FILTER_TCP_FULL
||
2134 spec
->type
== EFX_FARCH_FILTER_TCP_WILD
) ?
2135 IPPROTO_TCP
: IPPROTO_UDP
;
2137 host1
= htonl(spec
->data
[0] >> 16 | spec
->data
[1] << 16);
2138 port1
= htons(spec
->data
[0]);
2139 host2
= htonl(spec
->data
[2]);
2140 port2
= htons(spec
->data
[1] >> 16);
2141 if (spec
->flags
& EFX_FILTER_FLAG_TX
) {
2142 gen_spec
->loc_host
[0] = host1
;
2143 gen_spec
->rem_host
[0] = host2
;
2145 gen_spec
->loc_host
[0] = host2
;
2146 gen_spec
->rem_host
[0] = host1
;
2148 if (!!(gen_spec
->flags
& EFX_FILTER_FLAG_TX
) ^
2149 (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
)) {
2150 gen_spec
->loc_port
= port1
;
2151 gen_spec
->rem_port
= port2
;
2153 gen_spec
->loc_port
= port2
;
2154 gen_spec
->rem_port
= port1
;
2160 case EFX_FARCH_FILTER_MAC_FULL
:
2163 case EFX_FARCH_FILTER_MAC_WILD
:
2164 gen_spec
->match_flags
= EFX_FILTER_MATCH_LOC_MAC
;
2166 gen_spec
->match_flags
|= EFX_FILTER_MATCH_OUTER_VID
;
2167 gen_spec
->loc_mac
[0] = spec
->data
[2] >> 8;
2168 gen_spec
->loc_mac
[1] = spec
->data
[2];
2169 gen_spec
->loc_mac
[2] = spec
->data
[1] >> 24;
2170 gen_spec
->loc_mac
[3] = spec
->data
[1] >> 16;
2171 gen_spec
->loc_mac
[4] = spec
->data
[1] >> 8;
2172 gen_spec
->loc_mac
[5] = spec
->data
[1];
2173 gen_spec
->outer_vid
= htons(spec
->data
[0]);
2176 case EFX_FARCH_FILTER_UC_DEF
:
2177 case EFX_FARCH_FILTER_MC_DEF
:
2178 gen_spec
->match_flags
= EFX_FILTER_MATCH_LOC_MAC_IG
;
2179 gen_spec
->loc_mac
[0] = spec
->type
== EFX_FARCH_FILTER_MC_DEF
;
2189 efx_farch_filter_init_rx_for_stack(struct efx_nic
*efx
,
2190 struct efx_farch_filter_spec
*spec
)
2192 /* If there's only one channel then disable RSS for non VF
2193 * traffic, thereby allowing VFs to use RSS when the PF can't.
2195 spec
->priority
= EFX_FILTER_PRI_REQUIRED
;
2196 spec
->flags
= (EFX_FILTER_FLAG_RX
| EFX_FILTER_FLAG_RX_STACK
|
2197 (efx
->n_rx_channels
> 1 ? EFX_FILTER_FLAG_RX_RSS
: 0) |
2198 (efx
->rx_scatter
? EFX_FILTER_FLAG_RX_SCATTER
: 0));
2202 /* Build a filter entry and return its n-tuple key. */
2203 static u32
efx_farch_filter_build(efx_oword_t
*filter
,
2204 struct efx_farch_filter_spec
*spec
)
2208 switch (efx_farch_filter_spec_table_id(spec
)) {
2209 case EFX_FARCH_FILTER_TABLE_RX_IP
: {
2210 bool is_udp
= (spec
->type
== EFX_FARCH_FILTER_UDP_FULL
||
2211 spec
->type
== EFX_FARCH_FILTER_UDP_WILD
);
2212 EFX_POPULATE_OWORD_7(
2215 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
2217 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
2218 FRF_BZ_TCP_UDP
, is_udp
,
2219 FRF_BZ_RXQ_ID
, spec
->dmaq_id
,
2220 EFX_DWORD_2
, spec
->data
[2],
2221 EFX_DWORD_1
, spec
->data
[1],
2222 EFX_DWORD_0
, spec
->data
[0]);
2227 case EFX_FARCH_FILTER_TABLE_RX_MAC
: {
2228 bool is_wild
= spec
->type
== EFX_FARCH_FILTER_MAC_WILD
;
2229 EFX_POPULATE_OWORD_7(
2232 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
2233 FRF_CZ_RMFT_SCATTER_EN
,
2234 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
2235 FRF_CZ_RMFT_RXQ_ID
, spec
->dmaq_id
,
2236 FRF_CZ_RMFT_WILDCARD_MATCH
, is_wild
,
2237 FRF_CZ_RMFT_DEST_MAC_HI
, spec
->data
[2],
2238 FRF_CZ_RMFT_DEST_MAC_LO
, spec
->data
[1],
2239 FRF_CZ_RMFT_VLAN_ID
, spec
->data
[0]);
2244 case EFX_FARCH_FILTER_TABLE_TX_MAC
: {
2245 bool is_wild
= spec
->type
== EFX_FARCH_FILTER_MAC_WILD
;
2246 EFX_POPULATE_OWORD_5(*filter
,
2247 FRF_CZ_TMFT_TXQ_ID
, spec
->dmaq_id
,
2248 FRF_CZ_TMFT_WILDCARD_MATCH
, is_wild
,
2249 FRF_CZ_TMFT_SRC_MAC_HI
, spec
->data
[2],
2250 FRF_CZ_TMFT_SRC_MAC_LO
, spec
->data
[1],
2251 FRF_CZ_TMFT_VLAN_ID
, spec
->data
[0]);
2252 data3
= is_wild
| spec
->dmaq_id
<< 1;
2260 return spec
->data
[0] ^ spec
->data
[1] ^ spec
->data
[2] ^ data3
;
2263 static bool efx_farch_filter_equal(const struct efx_farch_filter_spec
*left
,
2264 const struct efx_farch_filter_spec
*right
)
2266 if (left
->type
!= right
->type
||
2267 memcmp(left
->data
, right
->data
, sizeof(left
->data
)))
2270 if (left
->flags
& EFX_FILTER_FLAG_TX
&&
2271 left
->dmaq_id
!= right
->dmaq_id
)
2278 * Construct/deconstruct external filter IDs. At least the RX filter
2279 * IDs must be ordered by matching priority, for RX NFC semantics.
2281 * Deconstruction needs to be robust against invalid IDs so that
2282 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2283 * accept user-provided IDs.
2286 #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2288 static const u8 efx_farch_filter_type_match_pri
[EFX_FARCH_FILTER_TYPE_COUNT
] = {
2289 [EFX_FARCH_FILTER_TCP_FULL
] = 0,
2290 [EFX_FARCH_FILTER_UDP_FULL
] = 0,
2291 [EFX_FARCH_FILTER_TCP_WILD
] = 1,
2292 [EFX_FARCH_FILTER_UDP_WILD
] = 1,
2293 [EFX_FARCH_FILTER_MAC_FULL
] = 2,
2294 [EFX_FARCH_FILTER_MAC_WILD
] = 3,
2295 [EFX_FARCH_FILTER_UC_DEF
] = 4,
2296 [EFX_FARCH_FILTER_MC_DEF
] = 4,
2299 static const enum efx_farch_filter_table_id efx_farch_filter_range_table
[] = {
2300 EFX_FARCH_FILTER_TABLE_RX_IP
, /* RX match pri 0 */
2301 EFX_FARCH_FILTER_TABLE_RX_IP
,
2302 EFX_FARCH_FILTER_TABLE_RX_MAC
,
2303 EFX_FARCH_FILTER_TABLE_RX_MAC
,
2304 EFX_FARCH_FILTER_TABLE_RX_DEF
, /* RX match pri 4 */
2305 EFX_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 0 */
2306 EFX_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 1 */
2309 #define EFX_FARCH_FILTER_INDEX_WIDTH 13
2310 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2313 efx_farch_filter_make_id(const struct efx_farch_filter_spec
*spec
,
2318 range
= efx_farch_filter_type_match_pri
[spec
->type
];
2319 if (!(spec
->flags
& EFX_FILTER_FLAG_RX
))
2320 range
+= EFX_FARCH_FILTER_MATCH_PRI_COUNT
;
2322 return range
<< EFX_FARCH_FILTER_INDEX_WIDTH
| index
;
2325 static inline enum efx_farch_filter_table_id
2326 efx_farch_filter_id_table_id(u32 id
)
2328 unsigned int range
= id
>> EFX_FARCH_FILTER_INDEX_WIDTH
;
2330 if (range
< ARRAY_SIZE(efx_farch_filter_range_table
))
2331 return efx_farch_filter_range_table
[range
];
2333 return EFX_FARCH_FILTER_TABLE_COUNT
; /* invalid */
2336 static inline unsigned int efx_farch_filter_id_index(u32 id
)
2338 return id
& EFX_FARCH_FILTER_INDEX_MASK
;
2341 u32
efx_farch_filter_get_rx_id_limit(struct efx_nic
*efx
)
2343 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2344 unsigned int range
= EFX_FARCH_FILTER_MATCH_PRI_COUNT
- 1;
2345 enum efx_farch_filter_table_id table_id
;
2348 table_id
= efx_farch_filter_range_table
[range
];
2349 if (state
->table
[table_id
].size
!= 0)
2350 return range
<< EFX_FARCH_FILTER_INDEX_WIDTH
|
2351 state
->table
[table_id
].size
;
2357 s32
efx_farch_filter_insert(struct efx_nic
*efx
,
2358 struct efx_filter_spec
*gen_spec
,
2361 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2362 struct efx_farch_filter_table
*table
;
2363 struct efx_farch_filter_spec spec
;
2365 int rep_index
, ins_index
;
2366 unsigned int depth
= 0;
2369 rc
= efx_farch_filter_from_gen_spec(&spec
, gen_spec
);
2373 table
= &state
->table
[efx_farch_filter_spec_table_id(&spec
)];
2374 if (table
->size
== 0)
2377 netif_vdbg(efx
, hw
, efx
->net_dev
,
2378 "%s: type %d search_limit=%d", __func__
, spec
.type
,
2379 table
->search_limit
[spec
.type
]);
2381 if (table
->id
== EFX_FARCH_FILTER_TABLE_RX_DEF
) {
2382 /* One filter spec per type */
2383 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF
!= 0);
2384 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF
!=
2385 EFX_FARCH_FILTER_MC_DEF
- EFX_FARCH_FILTER_UC_DEF
);
2386 rep_index
= spec
.type
- EFX_FARCH_FILTER_UC_DEF
;
2387 ins_index
= rep_index
;
2389 spin_lock_bh(&efx
->filter_lock
);
2391 /* Search concurrently for
2392 * (1) a filter to be replaced (rep_index): any filter
2393 * with the same match values, up to the current
2394 * search depth for this type, and
2395 * (2) the insertion point (ins_index): (1) or any
2396 * free slot before it or up to the maximum search
2397 * depth for this priority
2398 * We fail if we cannot find (2).
2400 * We can stop once either
2401 * (a) we find (1), in which case we have definitely
2402 * found (2) as well; or
2403 * (b) we have searched exhaustively for (1), and have
2404 * either found (2) or searched exhaustively for it
2406 u32 key
= efx_farch_filter_build(&filter
, &spec
);
2407 unsigned int hash
= efx_farch_filter_hash(key
);
2408 unsigned int incr
= efx_farch_filter_increment(key
);
2409 unsigned int max_rep_depth
= table
->search_limit
[spec
.type
];
2410 unsigned int max_ins_depth
=
2411 spec
.priority
<= EFX_FILTER_PRI_HINT
?
2412 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX
:
2413 EFX_FARCH_FILTER_CTL_SRCH_MAX
;
2414 unsigned int i
= hash
& (table
->size
- 1);
2419 spin_lock_bh(&efx
->filter_lock
);
2422 if (!test_bit(i
, table
->used_bitmap
)) {
2425 } else if (efx_farch_filter_equal(&spec
,
2434 if (depth
>= max_rep_depth
&&
2435 (ins_index
>= 0 || depth
>= max_ins_depth
)) {
2437 if (ins_index
< 0) {
2445 i
= (i
+ incr
) & (table
->size
- 1);
2450 /* If we found a filter to be replaced, check whether we
2453 if (rep_index
>= 0) {
2454 struct efx_farch_filter_spec
*saved_spec
=
2455 &table
->spec
[rep_index
];
2457 if (spec
.priority
== saved_spec
->priority
&& !replace_equal
) {
2461 if (spec
.priority
< saved_spec
->priority
&&
2462 !(saved_spec
->priority
== EFX_FILTER_PRI_REQUIRED
&&
2463 saved_spec
->flags
& EFX_FILTER_FLAG_RX_STACK
)) {
2467 if (spec
.flags
& EFX_FILTER_FLAG_RX_STACK
) {
2468 /* Just make sure it won't be removed */
2469 saved_spec
->flags
|= EFX_FILTER_FLAG_RX_STACK
;
2473 /* Retain the RX_STACK flag */
2474 spec
.flags
|= saved_spec
->flags
& EFX_FILTER_FLAG_RX_STACK
;
2477 /* Insert the filter */
2478 if (ins_index
!= rep_index
) {
2479 __set_bit(ins_index
, table
->used_bitmap
);
2482 table
->spec
[ins_index
] = spec
;
2484 if (table
->id
== EFX_FARCH_FILTER_TABLE_RX_DEF
) {
2485 efx_farch_filter_push_rx_config(efx
);
2487 if (table
->search_limit
[spec
.type
] < depth
) {
2488 table
->search_limit
[spec
.type
] = depth
;
2489 if (spec
.flags
& EFX_FILTER_FLAG_TX
)
2490 efx_farch_filter_push_tx_limits(efx
);
2492 efx_farch_filter_push_rx_config(efx
);
2495 efx_writeo(efx
, &filter
,
2496 table
->offset
+ table
->step
* ins_index
);
2498 /* If we were able to replace a filter by inserting
2499 * at a lower depth, clear the replaced filter
2501 if (ins_index
!= rep_index
&& rep_index
>= 0)
2502 efx_farch_filter_table_clear_entry(efx
, table
,
2506 netif_vdbg(efx
, hw
, efx
->net_dev
,
2507 "%s: filter type %d index %d rxq %u set",
2508 __func__
, spec
.type
, ins_index
, spec
.dmaq_id
);
2509 rc
= efx_farch_filter_make_id(&spec
, ins_index
);
2512 spin_unlock_bh(&efx
->filter_lock
);
2517 efx_farch_filter_table_clear_entry(struct efx_nic
*efx
,
2518 struct efx_farch_filter_table
*table
,
2519 unsigned int filter_idx
)
2521 static efx_oword_t filter
;
2523 EFX_WARN_ON_PARANOID(!test_bit(filter_idx
, table
->used_bitmap
));
2524 BUG_ON(table
->offset
== 0); /* can't clear MAC default filters */
2526 __clear_bit(filter_idx
, table
->used_bitmap
);
2528 memset(&table
->spec
[filter_idx
], 0, sizeof(table
->spec
[0]));
2530 efx_writeo(efx
, &filter
, table
->offset
+ table
->step
* filter_idx
);
2532 /* If this filter required a greater search depth than
2533 * any other, the search limit for its type can now be
2534 * decreased. However, it is hard to determine that
2535 * unless the table has become completely empty - in
2536 * which case, all its search limits can be set to 0.
2538 if (unlikely(table
->used
== 0)) {
2539 memset(table
->search_limit
, 0, sizeof(table
->search_limit
));
2540 if (table
->id
== EFX_FARCH_FILTER_TABLE_TX_MAC
)
2541 efx_farch_filter_push_tx_limits(efx
);
2543 efx_farch_filter_push_rx_config(efx
);
2547 static int efx_farch_filter_remove(struct efx_nic
*efx
,
2548 struct efx_farch_filter_table
*table
,
2549 unsigned int filter_idx
,
2550 enum efx_filter_priority priority
)
2552 struct efx_farch_filter_spec
*spec
= &table
->spec
[filter_idx
];
2554 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2555 spec
->priority
> priority
)
2558 if (spec
->flags
& EFX_FILTER_FLAG_RX_STACK
) {
2559 efx_farch_filter_init_rx_for_stack(efx
, spec
);
2560 efx_farch_filter_push_rx_config(efx
);
2562 efx_farch_filter_table_clear_entry(efx
, table
, filter_idx
);
2568 int efx_farch_filter_remove_safe(struct efx_nic
*efx
,
2569 enum efx_filter_priority priority
,
2572 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2573 enum efx_farch_filter_table_id table_id
;
2574 struct efx_farch_filter_table
*table
;
2575 unsigned int filter_idx
;
2576 struct efx_farch_filter_spec
*spec
;
2579 table_id
= efx_farch_filter_id_table_id(filter_id
);
2580 if ((unsigned int)table_id
>= EFX_FARCH_FILTER_TABLE_COUNT
)
2582 table
= &state
->table
[table_id
];
2584 filter_idx
= efx_farch_filter_id_index(filter_id
);
2585 if (filter_idx
>= table
->size
)
2587 spec
= &table
->spec
[filter_idx
];
2589 spin_lock_bh(&efx
->filter_lock
);
2590 rc
= efx_farch_filter_remove(efx
, table
, filter_idx
, priority
);
2591 spin_unlock_bh(&efx
->filter_lock
);
2596 int efx_farch_filter_get_safe(struct efx_nic
*efx
,
2597 enum efx_filter_priority priority
,
2598 u32 filter_id
, struct efx_filter_spec
*spec_buf
)
2600 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2601 enum efx_farch_filter_table_id table_id
;
2602 struct efx_farch_filter_table
*table
;
2603 struct efx_farch_filter_spec
*spec
;
2604 unsigned int filter_idx
;
2607 table_id
= efx_farch_filter_id_table_id(filter_id
);
2608 if ((unsigned int)table_id
>= EFX_FARCH_FILTER_TABLE_COUNT
)
2610 table
= &state
->table
[table_id
];
2612 filter_idx
= efx_farch_filter_id_index(filter_id
);
2613 if (filter_idx
>= table
->size
)
2615 spec
= &table
->spec
[filter_idx
];
2617 spin_lock_bh(&efx
->filter_lock
);
2619 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2620 spec
->priority
== priority
) {
2621 efx_farch_filter_to_gen_spec(spec_buf
, spec
);
2627 spin_unlock_bh(&efx
->filter_lock
);
2633 efx_farch_filter_table_clear(struct efx_nic
*efx
,
2634 enum efx_farch_filter_table_id table_id
,
2635 enum efx_filter_priority priority
)
2637 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2638 struct efx_farch_filter_table
*table
= &state
->table
[table_id
];
2639 unsigned int filter_idx
;
2641 spin_lock_bh(&efx
->filter_lock
);
2642 for (filter_idx
= 0; filter_idx
< table
->size
; ++filter_idx
)
2643 efx_farch_filter_remove(efx
, table
, filter_idx
, priority
);
2644 spin_unlock_bh(&efx
->filter_lock
);
2647 void efx_farch_filter_clear_rx(struct efx_nic
*efx
,
2648 enum efx_filter_priority priority
)
2650 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_IP
,
2652 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_MAC
,
2654 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_DEF
,
2658 u32
efx_farch_filter_count_rx_used(struct efx_nic
*efx
,
2659 enum efx_filter_priority priority
)
2661 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2662 enum efx_farch_filter_table_id table_id
;
2663 struct efx_farch_filter_table
*table
;
2664 unsigned int filter_idx
;
2667 spin_lock_bh(&efx
->filter_lock
);
2669 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2670 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2672 table
= &state
->table
[table_id
];
2673 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2674 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2675 table
->spec
[filter_idx
].priority
== priority
)
2680 spin_unlock_bh(&efx
->filter_lock
);
2685 s32
efx_farch_filter_get_rx_ids(struct efx_nic
*efx
,
2686 enum efx_filter_priority priority
,
2689 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2690 enum efx_farch_filter_table_id table_id
;
2691 struct efx_farch_filter_table
*table
;
2692 unsigned int filter_idx
;
2695 spin_lock_bh(&efx
->filter_lock
);
2697 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2698 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2700 table
= &state
->table
[table_id
];
2701 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2702 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2703 table
->spec
[filter_idx
].priority
== priority
) {
2704 if (count
== size
) {
2708 buf
[count
++] = efx_farch_filter_make_id(
2709 &table
->spec
[filter_idx
], filter_idx
);
2714 spin_unlock_bh(&efx
->filter_lock
);
2719 /* Restore filter stater after reset */
2720 void efx_farch_filter_table_restore(struct efx_nic
*efx
)
2722 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2723 enum efx_farch_filter_table_id table_id
;
2724 struct efx_farch_filter_table
*table
;
2726 unsigned int filter_idx
;
2728 spin_lock_bh(&efx
->filter_lock
);
2730 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2731 table
= &state
->table
[table_id
];
2733 /* Check whether this is a regular register table */
2734 if (table
->step
== 0)
2737 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2738 if (!test_bit(filter_idx
, table
->used_bitmap
))
2740 efx_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2741 efx_writeo(efx
, &filter
,
2742 table
->offset
+ table
->step
* filter_idx
);
2746 efx_farch_filter_push_rx_config(efx
);
2747 efx_farch_filter_push_tx_limits(efx
);
2749 spin_unlock_bh(&efx
->filter_lock
);
2752 void efx_farch_filter_table_remove(struct efx_nic
*efx
)
2754 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2755 enum efx_farch_filter_table_id table_id
;
2757 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2758 kfree(state
->table
[table_id
].used_bitmap
);
2759 vfree(state
->table
[table_id
].spec
);
2764 int efx_farch_filter_table_probe(struct efx_nic
*efx
)
2766 struct efx_farch_filter_state
*state
;
2767 struct efx_farch_filter_table
*table
;
2770 state
= kzalloc(sizeof(struct efx_farch_filter_state
), GFP_KERNEL
);
2773 efx
->filter_state
= state
;
2775 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
2776 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
2777 table
->id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2778 table
->offset
= FR_BZ_RX_FILTER_TBL0
;
2779 table
->size
= FR_BZ_RX_FILTER_TBL0_ROWS
;
2780 table
->step
= FR_BZ_RX_FILTER_TBL0_STEP
;
2783 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
2784 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_MAC
];
2785 table
->id
= EFX_FARCH_FILTER_TABLE_RX_MAC
;
2786 table
->offset
= FR_CZ_RX_MAC_FILTER_TBL0
;
2787 table
->size
= FR_CZ_RX_MAC_FILTER_TBL0_ROWS
;
2788 table
->step
= FR_CZ_RX_MAC_FILTER_TBL0_STEP
;
2790 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
2791 table
->id
= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2792 table
->size
= EFX_FARCH_FILTER_SIZE_RX_DEF
;
2794 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_TX_MAC
];
2795 table
->id
= EFX_FARCH_FILTER_TABLE_TX_MAC
;
2796 table
->offset
= FR_CZ_TX_MAC_FILTER_TBL0
;
2797 table
->size
= FR_CZ_TX_MAC_FILTER_TBL0_ROWS
;
2798 table
->step
= FR_CZ_TX_MAC_FILTER_TBL0_STEP
;
2801 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2802 table
= &state
->table
[table_id
];
2803 if (table
->size
== 0)
2805 table
->used_bitmap
= kcalloc(BITS_TO_LONGS(table
->size
),
2806 sizeof(unsigned long),
2808 if (!table
->used_bitmap
)
2810 table
->spec
= vzalloc(table
->size
* sizeof(*table
->spec
));
2815 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
2817 /* RX default filters must always exist */
2818 struct efx_farch_filter_spec
*spec
;
2821 for (i
= 0; i
< EFX_FARCH_FILTER_SIZE_RX_DEF
; i
++) {
2822 spec
= &table
->spec
[i
];
2823 spec
->type
= EFX_FARCH_FILTER_UC_DEF
+ i
;
2824 efx_farch_filter_init_rx_for_stack(efx
, spec
);
2825 __set_bit(i
, table
->used_bitmap
);
2829 efx_farch_filter_push_rx_config(efx
);
2834 efx_farch_filter_table_remove(efx
);
2838 /* Update scatter enable flags for filters pointing to our own RX queues */
2839 void efx_farch_filter_update_rx_scatter(struct efx_nic
*efx
)
2841 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2842 enum efx_farch_filter_table_id table_id
;
2843 struct efx_farch_filter_table
*table
;
2845 unsigned int filter_idx
;
2847 spin_lock_bh(&efx
->filter_lock
);
2849 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2850 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2852 table
= &state
->table
[table_id
];
2854 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2855 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2856 table
->spec
[filter_idx
].dmaq_id
>=
2860 if (efx
->rx_scatter
)
2861 table
->spec
[filter_idx
].flags
|=
2862 EFX_FILTER_FLAG_RX_SCATTER
;
2864 table
->spec
[filter_idx
].flags
&=
2865 ~EFX_FILTER_FLAG_RX_SCATTER
;
2867 if (table_id
== EFX_FARCH_FILTER_TABLE_RX_DEF
)
2868 /* Pushed by efx_farch_filter_push_rx_config() */
2871 efx_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2872 efx_writeo(efx
, &filter
,
2873 table
->offset
+ table
->step
* filter_idx
);
2877 efx_farch_filter_push_rx_config(efx
);
2879 spin_unlock_bh(&efx
->filter_lock
);
2882 #ifdef CONFIG_RFS_ACCEL
2884 s32
efx_farch_filter_rfs_insert(struct efx_nic
*efx
,
2885 struct efx_filter_spec
*gen_spec
)
2887 return efx_farch_filter_insert(efx
, gen_spec
, true);
2890 bool efx_farch_filter_rfs_expire_one(struct efx_nic
*efx
, u32 flow_id
,
2893 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2894 struct efx_farch_filter_table
*table
=
2895 &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
2897 if (test_bit(index
, table
->used_bitmap
) &&
2898 table
->spec
[index
].priority
== EFX_FILTER_PRI_HINT
&&
2899 rps_may_expire_flow(efx
->net_dev
, table
->spec
[index
].dmaq_id
,
2901 efx_farch_filter_table_clear_entry(efx
, table
, index
);
2908 #endif /* CONFIG_RFS_ACCEL */