1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/module.h>
13 #include <linux/seq_file.h>
14 #include <linux/crc32.h>
15 #include "net_driver.h"
19 #include "farch_regs.h"
21 #include "siena_sriov.h"
23 #include "workarounds.h"
25 /* Falcon-architecture (SFC9000-family) support */
27 /**************************************************************************
31 **************************************************************************
34 /* This is set to 16 for a good reason. In summary, if larger than
35 * 16, the descriptor cache holds more than a default socket
36 * buffer's worth of packets (for UDP we can only have at most one
37 * socket buffer's worth outstanding). This combined with the fact
38 * that we only get 1 TX event per descriptor cache means the NIC
41 #define TX_DC_ENTRIES 16
42 #define TX_DC_ENTRIES_ORDER 1
44 #define RX_DC_ENTRIES 64
45 #define RX_DC_ENTRIES_ORDER 3
47 /* If EFX_MAX_INT_ERRORS internal errors occur within
48 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
51 #define EFX_INT_ERROR_EXPIRE 3600
52 #define EFX_MAX_INT_ERRORS 5
54 /* Depth of RX flush request fifo */
55 #define EFX_RX_FLUSH_COUNT 4
57 /* Driver generated events */
58 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
59 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
60 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
61 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
63 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
64 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
66 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
68 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
69 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
70 efx_rx_queue_index(_rx_queue))
71 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
72 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
73 efx_rx_queue_index(_rx_queue))
74 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
75 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
78 static void efx_farch_magic_event(struct efx_channel
*channel
, u32 magic
);
80 /**************************************************************************
84 **************************************************************************/
86 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
89 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
93 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
94 const efx_oword_t
*mask
)
96 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
97 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
100 int efx_farch_test_registers(struct efx_nic
*efx
,
101 const struct efx_farch_register_test
*regs
,
104 unsigned address
= 0;
106 efx_oword_t mask
, imask
, original
, reg
, buf
;
108 for (i
= 0; i
< n_regs
; ++i
) {
109 address
= regs
[i
].address
;
110 mask
= imask
= regs
[i
].mask
;
111 EFX_INVERT_OWORD(imask
);
113 efx_reado(efx
, &original
, address
);
115 /* bit sweep on and off */
116 for (j
= 0; j
< 128; j
++) {
117 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
120 /* Test this testable bit can be set in isolation */
121 EFX_AND_OWORD(reg
, original
, mask
);
122 EFX_SET_OWORD32(reg
, j
, j
, 1);
124 efx_writeo(efx
, ®
, address
);
125 efx_reado(efx
, &buf
, address
);
127 if (efx_masked_compare_oword(®
, &buf
, &mask
))
130 /* Test this testable bit can be cleared in isolation */
131 EFX_OR_OWORD(reg
, original
, mask
);
132 EFX_SET_OWORD32(reg
, j
, j
, 0);
134 efx_writeo(efx
, ®
, address
);
135 efx_reado(efx
, &buf
, address
);
137 if (efx_masked_compare_oword(®
, &buf
, &mask
))
141 efx_writeo(efx
, &original
, address
);
147 netif_err(efx
, hw
, efx
->net_dev
,
148 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
149 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
150 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
154 /**************************************************************************
156 * Special buffer handling
157 * Special buffers are used for event queues and the TX and RX
160 *************************************************************************/
163 * Initialise a special buffer
165 * This will define a buffer (previously allocated via
166 * efx_alloc_special_buffer()) in the buffer table, allowing
167 * it to be used for event queues, descriptor rings etc.
170 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
172 efx_qword_t buf_desc
;
177 EFX_WARN_ON_PARANOID(!buffer
->buf
.addr
);
179 /* Write buffer descriptors to NIC */
180 for (i
= 0; i
< buffer
->entries
; i
++) {
181 index
= buffer
->index
+ i
;
182 dma_addr
= buffer
->buf
.dma_addr
+ (i
* EFX_BUF_SIZE
);
183 netif_dbg(efx
, probe
, efx
->net_dev
,
184 "mapping special buffer %d at %llx\n",
185 index
, (unsigned long long)dma_addr
);
186 EFX_POPULATE_QWORD_3(buf_desc
,
187 FRF_AZ_BUF_ADR_REGION
, 0,
188 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
189 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
190 efx_write_buf_tbl(efx
, &buf_desc
, index
);
194 /* Unmaps a buffer and clears the buffer table entries */
196 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
198 efx_oword_t buf_tbl_upd
;
199 unsigned int start
= buffer
->index
;
200 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
202 if (!buffer
->entries
)
205 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
206 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
208 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
209 FRF_AZ_BUF_UPD_CMD
, 0,
210 FRF_AZ_BUF_CLR_CMD
, 1,
211 FRF_AZ_BUF_CLR_END_ID
, end
,
212 FRF_AZ_BUF_CLR_START_ID
, start
);
213 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
217 * Allocate a new special buffer
219 * This allocates memory for a new buffer, clears it and allocates a
220 * new buffer ID range. It does not write into the buffer table.
222 * This call will allocate 4KB buffers, since 8KB buffers can't be
223 * used for event queues and descriptor rings.
225 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
226 struct efx_special_buffer
*buffer
,
229 #ifdef CONFIG_SFC_SRIOV
230 struct siena_nic_data
*nic_data
= efx
->nic_data
;
232 len
= ALIGN(len
, EFX_BUF_SIZE
);
234 if (efx_nic_alloc_buffer(efx
, &buffer
->buf
, len
, GFP_KERNEL
))
236 buffer
->entries
= len
/ EFX_BUF_SIZE
;
237 BUG_ON(buffer
->buf
.dma_addr
& (EFX_BUF_SIZE
- 1));
239 /* Select new buffer ID */
240 buffer
->index
= efx
->next_buffer_table
;
241 efx
->next_buffer_table
+= buffer
->entries
;
242 #ifdef CONFIG_SFC_SRIOV
243 BUG_ON(efx_siena_sriov_enabled(efx
) &&
244 nic_data
->vf_buftbl_base
< efx
->next_buffer_table
);
247 netif_dbg(efx
, probe
, efx
->net_dev
,
248 "allocating special buffers %d-%d at %llx+%x "
249 "(virt %p phys %llx)\n", buffer
->index
,
250 buffer
->index
+ buffer
->entries
- 1,
251 (u64
)buffer
->buf
.dma_addr
, len
,
252 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
258 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
260 if (!buffer
->buf
.addr
)
263 netif_dbg(efx
, hw
, efx
->net_dev
,
264 "deallocating special buffers %d-%d at %llx+%x "
265 "(virt %p phys %llx)\n", buffer
->index
,
266 buffer
->index
+ buffer
->entries
- 1,
267 (u64
)buffer
->buf
.dma_addr
, buffer
->buf
.len
,
268 buffer
->buf
.addr
, (u64
)virt_to_phys(buffer
->buf
.addr
));
270 efx_nic_free_buffer(efx
, &buffer
->buf
);
274 /**************************************************************************
278 **************************************************************************/
280 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
281 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
286 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
287 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
288 efx_writed_page(tx_queue
->efx
, ®
,
289 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
292 /* Write pointer and first descriptor for TX descriptor ring */
293 static inline void efx_farch_push_tx_desc(struct efx_tx_queue
*tx_queue
,
294 const efx_qword_t
*txd
)
299 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
300 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
302 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
303 EFX_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
304 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
306 efx_writeo_page(tx_queue
->efx
, ®
,
307 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
311 /* For each entry inserted into the software descriptor ring, create a
312 * descriptor in the hardware TX descriptor ring (in host memory), and
315 void efx_farch_tx_write(struct efx_tx_queue
*tx_queue
)
317 struct efx_tx_buffer
*buffer
;
320 unsigned old_write_count
= tx_queue
->write_count
;
322 tx_queue
->xmit_more_available
= false;
323 if (unlikely(tx_queue
->write_count
== tx_queue
->insert_count
))
327 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
328 buffer
= &tx_queue
->buffer
[write_ptr
];
329 txd
= efx_tx_desc(tx_queue
, write_ptr
);
330 ++tx_queue
->write_count
;
332 EFX_WARN_ON_ONCE_PARANOID(buffer
->flags
& EFX_TX_BUF_OPTION
);
334 /* Create TX descriptor ring entry */
335 BUILD_BUG_ON(EFX_TX_BUF_CONT
!= 1);
336 EFX_POPULATE_QWORD_4(*txd
,
338 buffer
->flags
& EFX_TX_BUF_CONT
,
339 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
340 FSF_AZ_TX_KER_BUF_REGION
, 0,
341 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
342 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
344 wmb(); /* Ensure descriptors are written before they are fetched */
346 if (efx_nic_may_push_tx_desc(tx_queue
, old_write_count
)) {
347 txd
= efx_tx_desc(tx_queue
,
348 old_write_count
& tx_queue
->ptr_mask
);
349 efx_farch_push_tx_desc(tx_queue
, txd
);
352 efx_farch_notify_tx_desc(tx_queue
);
356 unsigned int efx_farch_tx_limit_len(struct efx_tx_queue
*tx_queue
,
357 dma_addr_t dma_addr
, unsigned int len
)
359 /* Don't cross 4K boundaries with descriptors. */
360 unsigned int limit
= (~dma_addr
& (EFX_PAGE_SIZE
- 1)) + 1;
362 len
= min(limit
, len
);
368 /* Allocate hardware resources for a TX queue */
369 int efx_farch_tx_probe(struct efx_tx_queue
*tx_queue
)
371 struct efx_nic
*efx
= tx_queue
->efx
;
374 entries
= tx_queue
->ptr_mask
+ 1;
375 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
376 entries
* sizeof(efx_qword_t
));
379 void efx_farch_tx_init(struct efx_tx_queue
*tx_queue
)
381 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
382 struct efx_nic
*efx
= tx_queue
->efx
;
385 /* Pin TX descriptor ring */
386 efx_init_special_buffer(efx
, &tx_queue
->txd
);
388 /* Push TX descriptor ring to card */
389 EFX_POPULATE_OWORD_10(reg
,
390 FRF_AZ_TX_DESCQ_EN
, 1,
391 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
392 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
393 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
394 FRF_AZ_TX_DESCQ_EVQ_ID
,
395 tx_queue
->channel
->channel
,
396 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
397 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
398 FRF_AZ_TX_DESCQ_SIZE
,
399 __ffs(tx_queue
->txd
.entries
),
400 FRF_AZ_TX_DESCQ_TYPE
, 0,
401 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
403 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
404 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
, !csum
);
406 efx_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
409 EFX_POPULATE_OWORD_1(reg
,
411 (tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
413 FFE_BZ_TX_PACE_RESERVED
);
414 efx_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
, tx_queue
->queue
);
417 static void efx_farch_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
419 struct efx_nic
*efx
= tx_queue
->efx
;
420 efx_oword_t tx_flush_descq
;
422 WARN_ON(atomic_read(&tx_queue
->flush_outstanding
));
423 atomic_set(&tx_queue
->flush_outstanding
, 1);
425 EFX_POPULATE_OWORD_2(tx_flush_descq
,
426 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
427 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
428 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
431 void efx_farch_tx_fini(struct efx_tx_queue
*tx_queue
)
433 struct efx_nic
*efx
= tx_queue
->efx
;
434 efx_oword_t tx_desc_ptr
;
436 /* Remove TX descriptor ring from card */
437 EFX_ZERO_OWORD(tx_desc_ptr
);
438 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
441 /* Unpin TX descriptor ring */
442 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
445 /* Free buffers backing TX queue */
446 void efx_farch_tx_remove(struct efx_tx_queue
*tx_queue
)
448 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
451 /**************************************************************************
455 **************************************************************************/
457 /* This creates an entry in the RX descriptor queue */
459 efx_farch_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
461 struct efx_rx_buffer
*rx_buf
;
464 rxd
= efx_rx_desc(rx_queue
, index
);
465 rx_buf
= efx_rx_buffer(rx_queue
, index
);
466 EFX_POPULATE_QWORD_3(*rxd
,
467 FSF_AZ_RX_KER_BUF_SIZE
,
469 rx_queue
->efx
->type
->rx_buffer_padding
,
470 FSF_AZ_RX_KER_BUF_REGION
, 0,
471 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
474 /* This writes to the RX_DESC_WPTR register for the specified receive
477 void efx_farch_rx_write(struct efx_rx_queue
*rx_queue
)
479 struct efx_nic
*efx
= rx_queue
->efx
;
483 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
484 efx_farch_build_rx_desc(
486 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
487 ++rx_queue
->notified_count
;
491 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
492 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
493 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
494 efx_rx_queue_index(rx_queue
));
497 int efx_farch_rx_probe(struct efx_rx_queue
*rx_queue
)
499 struct efx_nic
*efx
= rx_queue
->efx
;
502 entries
= rx_queue
->ptr_mask
+ 1;
503 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
504 entries
* sizeof(efx_qword_t
));
507 void efx_farch_rx_init(struct efx_rx_queue
*rx_queue
)
509 efx_oword_t rx_desc_ptr
;
510 struct efx_nic
*efx
= rx_queue
->efx
;
513 /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */
514 jumbo_en
= efx
->rx_scatter
;
516 netif_dbg(efx
, hw
, efx
->net_dev
,
517 "RX queue %d ring in special buffers %d-%d\n",
518 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
519 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
521 rx_queue
->scatter_n
= 0;
523 /* Pin RX descriptor ring */
524 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
526 /* Push RX descriptor ring to card */
527 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
528 FRF_AZ_RX_ISCSI_DDIG_EN
, true,
529 FRF_AZ_RX_ISCSI_HDIG_EN
, true,
530 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
531 FRF_AZ_RX_DESCQ_EVQ_ID
,
532 efx_rx_queue_channel(rx_queue
)->channel
,
533 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
534 FRF_AZ_RX_DESCQ_LABEL
,
535 efx_rx_queue_index(rx_queue
),
536 FRF_AZ_RX_DESCQ_SIZE
,
537 __ffs(rx_queue
->rxd
.entries
),
538 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
539 FRF_AZ_RX_DESCQ_JUMBO
, jumbo_en
,
540 FRF_AZ_RX_DESCQ_EN
, 1);
541 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
542 efx_rx_queue_index(rx_queue
));
545 static void efx_farch_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
547 struct efx_nic
*efx
= rx_queue
->efx
;
548 efx_oword_t rx_flush_descq
;
550 EFX_POPULATE_OWORD_2(rx_flush_descq
,
551 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
552 FRF_AZ_RX_FLUSH_DESCQ
,
553 efx_rx_queue_index(rx_queue
));
554 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
557 void efx_farch_rx_fini(struct efx_rx_queue
*rx_queue
)
559 efx_oword_t rx_desc_ptr
;
560 struct efx_nic
*efx
= rx_queue
->efx
;
562 /* Remove RX descriptor ring from card */
563 EFX_ZERO_OWORD(rx_desc_ptr
);
564 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
565 efx_rx_queue_index(rx_queue
));
567 /* Unpin RX descriptor ring */
568 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
571 /* Free buffers backing RX queue */
572 void efx_farch_rx_remove(struct efx_rx_queue
*rx_queue
)
574 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
577 /**************************************************************************
581 **************************************************************************/
583 /* efx_farch_flush_queues() must be woken up when all flushes are completed,
584 * or more RX flushes can be kicked off.
586 static bool efx_farch_flush_wake(struct efx_nic
*efx
)
588 /* Ensure that all updates are visible to efx_farch_flush_queues() */
591 return (atomic_read(&efx
->active_queues
) == 0 ||
592 (atomic_read(&efx
->rxq_flush_outstanding
) < EFX_RX_FLUSH_COUNT
593 && atomic_read(&efx
->rxq_flush_pending
) > 0));
596 static bool efx_check_tx_flush_complete(struct efx_nic
*efx
)
599 efx_oword_t txd_ptr_tbl
;
600 struct efx_channel
*channel
;
601 struct efx_tx_queue
*tx_queue
;
603 efx_for_each_channel(channel
, efx
) {
604 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
605 efx_reado_table(efx
, &txd_ptr_tbl
,
606 FR_BZ_TX_DESC_PTR_TBL
, tx_queue
->queue
);
607 if (EFX_OWORD_FIELD(txd_ptr_tbl
,
608 FRF_AZ_TX_DESCQ_FLUSH
) ||
609 EFX_OWORD_FIELD(txd_ptr_tbl
,
610 FRF_AZ_TX_DESCQ_EN
)) {
611 netif_dbg(efx
, hw
, efx
->net_dev
,
612 "flush did not complete on TXQ %d\n",
615 } else if (atomic_cmpxchg(&tx_queue
->flush_outstanding
,
617 /* The flush is complete, but we didn't
618 * receive a flush completion event
620 netif_dbg(efx
, hw
, efx
->net_dev
,
621 "flush complete on TXQ %d, so drain "
622 "the queue\n", tx_queue
->queue
);
623 /* Don't need to increment active_queues as it
624 * has already been incremented for the queues
625 * which did not drain
627 efx_farch_magic_event(channel
,
628 EFX_CHANNEL_MAGIC_TX_DRAIN(
637 /* Flush all the transmit queues, and continue flushing receive queues until
638 * they're all flushed. Wait for the DRAIN events to be received so that there
639 * are no more RX and TX events left on any channel. */
640 static int efx_farch_do_flush(struct efx_nic
*efx
)
642 unsigned timeout
= msecs_to_jiffies(5000); /* 5s for all flushes and drains */
643 struct efx_channel
*channel
;
644 struct efx_rx_queue
*rx_queue
;
645 struct efx_tx_queue
*tx_queue
;
648 efx_for_each_channel(channel
, efx
) {
649 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
650 efx_farch_flush_tx_queue(tx_queue
);
652 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
653 rx_queue
->flush_pending
= true;
654 atomic_inc(&efx
->rxq_flush_pending
);
658 while (timeout
&& atomic_read(&efx
->active_queues
) > 0) {
659 /* If SRIOV is enabled, then offload receive queue flushing to
660 * the firmware (though we will still have to poll for
661 * completion). If that fails, fall back to the old scheme.
663 if (efx_siena_sriov_enabled(efx
)) {
664 rc
= efx_mcdi_flush_rxqs(efx
);
669 /* The hardware supports four concurrent rx flushes, each of
670 * which may need to be retried if there is an outstanding
673 efx_for_each_channel(channel
, efx
) {
674 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
675 if (atomic_read(&efx
->rxq_flush_outstanding
) >=
679 if (rx_queue
->flush_pending
) {
680 rx_queue
->flush_pending
= false;
681 atomic_dec(&efx
->rxq_flush_pending
);
682 atomic_inc(&efx
->rxq_flush_outstanding
);
683 efx_farch_flush_rx_queue(rx_queue
);
689 timeout
= wait_event_timeout(efx
->flush_wq
,
690 efx_farch_flush_wake(efx
),
694 if (atomic_read(&efx
->active_queues
) &&
695 !efx_check_tx_flush_complete(efx
)) {
696 netif_err(efx
, hw
, efx
->net_dev
, "failed to flush %d queues "
697 "(rx %d+%d)\n", atomic_read(&efx
->active_queues
),
698 atomic_read(&efx
->rxq_flush_outstanding
),
699 atomic_read(&efx
->rxq_flush_pending
));
702 atomic_set(&efx
->active_queues
, 0);
703 atomic_set(&efx
->rxq_flush_pending
, 0);
704 atomic_set(&efx
->rxq_flush_outstanding
, 0);
710 int efx_farch_fini_dmaq(struct efx_nic
*efx
)
712 struct efx_channel
*channel
;
713 struct efx_tx_queue
*tx_queue
;
714 struct efx_rx_queue
*rx_queue
;
717 /* Do not attempt to write to the NIC during EEH recovery */
718 if (efx
->state
!= STATE_RECOVERY
) {
719 /* Only perform flush if DMA is enabled */
720 if (efx
->pci_dev
->is_busmaster
) {
721 efx
->type
->prepare_flush(efx
);
722 rc
= efx_farch_do_flush(efx
);
723 efx
->type
->finish_flush(efx
);
726 efx_for_each_channel(channel
, efx
) {
727 efx_for_each_channel_rx_queue(rx_queue
, channel
)
728 efx_farch_rx_fini(rx_queue
);
729 efx_for_each_channel_tx_queue(tx_queue
, channel
)
730 efx_farch_tx_fini(tx_queue
);
737 /* Reset queue and flush accounting after FLR
739 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
740 * mastering was disabled), in which case we don't receive (RXQ) flush
741 * completion events. This means that efx->rxq_flush_outstanding remained at 4
742 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
743 * events were received, and we didn't go through efx_check_tx_flush_complete())
744 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
745 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
746 * for batched flush requests; and the efx->active_queues gets messed up because
747 * we keep incrementing for the newly initialised queues, but it never went to
748 * zero previously. Then we get a timeout every time we try to restart the
749 * queues, as it doesn't go back to zero when we should be flushing the queues.
751 void efx_farch_finish_flr(struct efx_nic
*efx
)
753 atomic_set(&efx
->rxq_flush_pending
, 0);
754 atomic_set(&efx
->rxq_flush_outstanding
, 0);
755 atomic_set(&efx
->active_queues
, 0);
759 /**************************************************************************
761 * Event queue processing
762 * Event queues are processed by per-channel tasklets.
764 **************************************************************************/
766 /* Update a channel's event queue's read pointer (RPTR) register
768 * This writes the EVQ_RPTR_REG register for the specified channel's
771 void efx_farch_ev_read_ack(struct efx_channel
*channel
)
774 struct efx_nic
*efx
= channel
->efx
;
776 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
,
777 channel
->eventq_read_ptr
& channel
->eventq_mask
);
779 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
780 * of 4 bytes, but it is really 16 bytes just like later revisions.
782 efx_writed(efx
, ®
,
783 efx
->type
->evq_rptr_tbl_base
+
784 FR_BZ_EVQ_RPTR_STEP
* channel
->channel
);
787 /* Use HW to insert a SW defined event */
788 void efx_farch_generate_event(struct efx_nic
*efx
, unsigned int evq
,
791 efx_oword_t drv_ev_reg
;
793 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
794 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
795 drv_ev_reg
.u32
[0] = event
->u32
[0];
796 drv_ev_reg
.u32
[1] = event
->u32
[1];
797 drv_ev_reg
.u32
[2] = 0;
798 drv_ev_reg
.u32
[3] = 0;
799 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, evq
);
800 efx_writeo(efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
803 static void efx_farch_magic_event(struct efx_channel
*channel
, u32 magic
)
807 EFX_POPULATE_QWORD_2(event
, FSF_AZ_EV_CODE
,
808 FSE_AZ_EV_CODE_DRV_GEN_EV
,
809 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
810 efx_farch_generate_event(channel
->efx
, channel
->channel
, &event
);
813 /* Handle a transmit completion event
815 * The NIC batches TX completion events; the message we receive is of
816 * the form "complete all TX events up to this index".
819 efx_farch_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
821 unsigned int tx_ev_desc_ptr
;
822 unsigned int tx_ev_q_label
;
823 struct efx_tx_queue
*tx_queue
;
824 struct efx_nic
*efx
= channel
->efx
;
826 if (unlikely(READ_ONCE(efx
->reset_pending
)))
829 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
830 /* Transmit completion */
831 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
832 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
833 tx_queue
= efx_channel_get_tx_queue(
834 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
835 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
836 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
837 /* Rewrite the FIFO write pointer */
838 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
839 tx_queue
= efx_channel_get_tx_queue(
840 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
842 netif_tx_lock(efx
->net_dev
);
843 efx_farch_notify_tx_desc(tx_queue
);
844 netif_tx_unlock(efx
->net_dev
);
845 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
)) {
846 efx_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
848 netif_err(efx
, tx_err
, efx
->net_dev
,
849 "channel %d unexpected TX event "
850 EFX_QWORD_FMT
"\n", channel
->channel
,
851 EFX_QWORD_VAL(*event
));
855 /* Detect errors included in the rx_evt_pkt_ok bit. */
856 static u16
efx_farch_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
857 const efx_qword_t
*event
)
859 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
860 struct efx_nic
*efx
= rx_queue
->efx
;
861 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
862 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
863 bool rx_ev_frm_trunc
, rx_ev_tobe_disc
;
864 bool rx_ev_other_err
, rx_ev_pause_frm
;
865 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
866 unsigned rx_ev_pkt_type
;
868 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
869 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
870 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
871 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
872 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
873 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
874 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
875 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
876 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
877 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
878 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
879 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
880 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
882 /* Every error apart from tobe_disc and pause_frm */
883 rx_ev_other_err
= (rx_ev_tcp_udp_chksum_err
|
884 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
885 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
887 /* Count errors that are not in MAC stats. Ignore expected
888 * checksum errors during self-test. */
890 ++channel
->n_rx_frm_trunc
;
891 else if (rx_ev_tobe_disc
)
892 ++channel
->n_rx_tobe_disc
;
893 else if (!efx
->loopback_selftest
) {
894 if (rx_ev_ip_hdr_chksum_err
)
895 ++channel
->n_rx_ip_hdr_chksum_err
;
896 else if (rx_ev_tcp_udp_chksum_err
)
897 ++channel
->n_rx_tcp_udp_chksum_err
;
900 /* TOBE_DISC is expected on unicast mismatches; don't print out an
901 * error message. FRM_TRUNC indicates RXDP dropped the packet due
902 * to a FIFO overflow.
905 if (rx_ev_other_err
&& net_ratelimit()) {
906 netif_dbg(efx
, rx_err
, efx
->net_dev
,
907 " RX queue %d unexpected RX event "
908 EFX_QWORD_FMT
"%s%s%s%s%s%s%s\n",
909 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
910 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
911 rx_ev_ip_hdr_chksum_err
?
912 " [IP_HDR_CHKSUM_ERR]" : "",
913 rx_ev_tcp_udp_chksum_err
?
914 " [TCP_UDP_CHKSUM_ERR]" : "",
915 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
916 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
917 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
918 rx_ev_pause_frm
? " [PAUSE]" : "");
922 if (efx
->net_dev
->features
& NETIF_F_RXALL
)
923 /* don't discard frame for CRC error */
924 rx_ev_eth_crc_err
= false;
926 /* The frame must be discarded if any of these are true. */
927 return (rx_ev_eth_crc_err
| rx_ev_frm_trunc
|
928 rx_ev_tobe_disc
| rx_ev_pause_frm
) ?
929 EFX_RX_PKT_DISCARD
: 0;
932 /* Handle receive events that are not in-order. Return true if this
933 * can be handled as a partial packet discard, false if it's more
937 efx_farch_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
939 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
940 struct efx_nic
*efx
= rx_queue
->efx
;
941 unsigned expected
, dropped
;
943 if (rx_queue
->scatter_n
&&
944 index
== ((rx_queue
->removed_count
+ rx_queue
->scatter_n
- 1) &
945 rx_queue
->ptr_mask
)) {
946 ++channel
->n_rx_nodesc_trunc
;
950 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
951 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
952 netif_info(efx
, rx_err
, efx
->net_dev
,
953 "dropped %d events (index=%d expected=%d)\n",
954 dropped
, index
, expected
);
956 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
960 /* Handle a packet received event
962 * The NIC gives a "discard" flag if it's a unicast packet with the
963 * wrong destination address
964 * Also "is multicast" and "matches multicast filter" flags can be used to
965 * discard non-matching multicast packets.
968 efx_farch_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
970 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
971 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
972 unsigned expected_ptr
;
973 bool rx_ev_pkt_ok
, rx_ev_sop
, rx_ev_cont
;
975 struct efx_rx_queue
*rx_queue
;
976 struct efx_nic
*efx
= channel
->efx
;
978 if (unlikely(READ_ONCE(efx
->reset_pending
)))
981 rx_ev_cont
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
);
982 rx_ev_sop
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
);
983 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
986 rx_queue
= efx_channel_get_rx_queue(channel
);
988 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
989 expected_ptr
= ((rx_queue
->removed_count
+ rx_queue
->scatter_n
) &
992 /* Check for partial drops and other errors */
993 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
) ||
994 unlikely(rx_ev_sop
!= (rx_queue
->scatter_n
== 0))) {
995 if (rx_ev_desc_ptr
!= expected_ptr
&&
996 !efx_farch_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
))
999 /* Discard all pending fragments */
1000 if (rx_queue
->scatter_n
) {
1003 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1004 rx_queue
->scatter_n
, 0, EFX_RX_PKT_DISCARD
);
1005 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1006 rx_queue
->scatter_n
= 0;
1009 /* Return if there is no new fragment */
1010 if (rx_ev_desc_ptr
!= expected_ptr
)
1013 /* Discard new fragment if not SOP */
1017 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1018 1, 0, EFX_RX_PKT_DISCARD
);
1019 ++rx_queue
->removed_count
;
1024 ++rx_queue
->scatter_n
;
1028 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
1029 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
1030 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
1032 if (likely(rx_ev_pkt_ok
)) {
1033 /* If packet is marked as OK then we can rely on the
1034 * hardware checksum and classification.
1037 switch (rx_ev_hdr_type
) {
1038 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
:
1039 flags
|= EFX_RX_PKT_TCP
;
1041 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
:
1042 flags
|= EFX_RX_PKT_CSUMMED
;
1044 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER
:
1045 case FSE_AZ_RX_EV_HDR_TYPE_OTHER
:
1049 flags
= efx_farch_handle_rx_not_ok(rx_queue
, event
);
1052 /* Detect multicast packets that didn't match the filter */
1053 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
1054 if (rx_ev_mcast_pkt
) {
1055 unsigned int rx_ev_mcast_hash_match
=
1056 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
1058 if (unlikely(!rx_ev_mcast_hash_match
)) {
1059 ++channel
->n_rx_mcast_mismatch
;
1060 flags
|= EFX_RX_PKT_DISCARD
;
1064 channel
->irq_mod_score
+= 2;
1066 /* Handle received packet */
1067 efx_rx_packet(rx_queue
,
1068 rx_queue
->removed_count
& rx_queue
->ptr_mask
,
1069 rx_queue
->scatter_n
, rx_ev_byte_cnt
, flags
);
1070 rx_queue
->removed_count
+= rx_queue
->scatter_n
;
1071 rx_queue
->scatter_n
= 0;
1074 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1075 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1076 * of all transmit completions.
1079 efx_farch_handle_tx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1081 struct efx_tx_queue
*tx_queue
;
1084 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1085 if (qid
< EFX_TXQ_TYPES
* (efx
->n_tx_channels
+ efx
->n_extra_tx_channels
)) {
1086 tx_queue
= efx_get_tx_queue(efx
, qid
/ EFX_TXQ_TYPES
,
1087 qid
% EFX_TXQ_TYPES
);
1088 if (atomic_cmpxchg(&tx_queue
->flush_outstanding
, 1, 0)) {
1089 efx_farch_magic_event(tx_queue
->channel
,
1090 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue
));
1095 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1096 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1097 * the RX queue back to the mask of RX queues in need of flushing.
1100 efx_farch_handle_rx_flush_done(struct efx_nic
*efx
, efx_qword_t
*event
)
1102 struct efx_channel
*channel
;
1103 struct efx_rx_queue
*rx_queue
;
1107 qid
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1108 failed
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1109 if (qid
>= efx
->n_channels
)
1111 channel
= efx_get_channel(efx
, qid
);
1112 if (!efx_channel_has_rx_queue(channel
))
1114 rx_queue
= efx_channel_get_rx_queue(channel
);
1117 netif_info(efx
, hw
, efx
->net_dev
,
1118 "RXQ %d flush retry\n", qid
);
1119 rx_queue
->flush_pending
= true;
1120 atomic_inc(&efx
->rxq_flush_pending
);
1122 efx_farch_magic_event(efx_rx_queue_channel(rx_queue
),
1123 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
));
1125 atomic_dec(&efx
->rxq_flush_outstanding
);
1126 if (efx_farch_flush_wake(efx
))
1127 wake_up(&efx
->flush_wq
);
1131 efx_farch_handle_drain_event(struct efx_channel
*channel
)
1133 struct efx_nic
*efx
= channel
->efx
;
1135 WARN_ON(atomic_read(&efx
->active_queues
) == 0);
1136 atomic_dec(&efx
->active_queues
);
1137 if (efx_farch_flush_wake(efx
))
1138 wake_up(&efx
->flush_wq
);
1141 static void efx_farch_handle_generated_event(struct efx_channel
*channel
,
1144 struct efx_nic
*efx
= channel
->efx
;
1145 struct efx_rx_queue
*rx_queue
=
1146 efx_channel_has_rx_queue(channel
) ?
1147 efx_channel_get_rx_queue(channel
) : NULL
;
1148 unsigned magic
, code
;
1150 magic
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1151 code
= _EFX_CHANNEL_MAGIC_CODE(magic
);
1153 if (magic
== EFX_CHANNEL_MAGIC_TEST(channel
)) {
1154 channel
->event_test_cpu
= raw_smp_processor_id();
1155 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_FILL(rx_queue
)) {
1156 /* The queue must be empty, so we won't receive any rx
1157 * events, so efx_process_channel() won't refill the
1158 * queue. Refill it here */
1159 efx_fast_push_rx_descriptors(rx_queue
, true);
1160 } else if (rx_queue
&& magic
== EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue
)) {
1161 efx_farch_handle_drain_event(channel
);
1162 } else if (code
== _EFX_CHANNEL_MAGIC_TX_DRAIN
) {
1163 efx_farch_handle_drain_event(channel
);
1165 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
1166 "generated event "EFX_QWORD_FMT
"\n",
1167 channel
->channel
, EFX_QWORD_VAL(*event
));
1172 efx_farch_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
1174 struct efx_nic
*efx
= channel
->efx
;
1175 unsigned int ev_sub_code
;
1176 unsigned int ev_sub_data
;
1178 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
1179 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
1181 switch (ev_sub_code
) {
1182 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
1183 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
1184 channel
->channel
, ev_sub_data
);
1185 efx_farch_handle_tx_flush_done(efx
, event
);
1186 #ifdef CONFIG_SFC_SRIOV
1187 efx_siena_sriov_tx_flush_done(efx
, event
);
1190 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
1191 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
1192 channel
->channel
, ev_sub_data
);
1193 efx_farch_handle_rx_flush_done(efx
, event
);
1194 #ifdef CONFIG_SFC_SRIOV
1195 efx_siena_sriov_rx_flush_done(efx
, event
);
1198 case FSE_AZ_EVQ_INIT_DONE_EV
:
1199 netif_dbg(efx
, hw
, efx
->net_dev
,
1200 "channel %d EVQ %d initialised\n",
1201 channel
->channel
, ev_sub_data
);
1203 case FSE_AZ_SRM_UPD_DONE_EV
:
1204 netif_vdbg(efx
, hw
, efx
->net_dev
,
1205 "channel %d SRAM update done\n", channel
->channel
);
1207 case FSE_AZ_WAKE_UP_EV
:
1208 netif_vdbg(efx
, hw
, efx
->net_dev
,
1209 "channel %d RXQ %d wakeup event\n",
1210 channel
->channel
, ev_sub_data
);
1212 case FSE_AZ_TIMER_EV
:
1213 netif_vdbg(efx
, hw
, efx
->net_dev
,
1214 "channel %d RX queue %d timer expired\n",
1215 channel
->channel
, ev_sub_data
);
1217 case FSE_AA_RX_RECOVER_EV
:
1218 netif_err(efx
, rx_err
, efx
->net_dev
,
1219 "channel %d seen DRIVER RX_RESET event. "
1220 "Resetting.\n", channel
->channel
);
1221 atomic_inc(&efx
->rx_reset
);
1222 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1224 case FSE_BZ_RX_DSC_ERROR_EV
:
1225 if (ev_sub_data
< EFX_VI_BASE
) {
1226 netif_err(efx
, rx_err
, efx
->net_dev
,
1227 "RX DMA Q %d reports descriptor fetch error."
1228 " RX Q %d is disabled.\n", ev_sub_data
,
1230 efx_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
1232 #ifdef CONFIG_SFC_SRIOV
1234 efx_siena_sriov_desc_fetch_err(efx
, ev_sub_data
);
1237 case FSE_BZ_TX_DSC_ERROR_EV
:
1238 if (ev_sub_data
< EFX_VI_BASE
) {
1239 netif_err(efx
, tx_err
, efx
->net_dev
,
1240 "TX DMA Q %d reports descriptor fetch error."
1241 " TX Q %d is disabled.\n", ev_sub_data
,
1243 efx_schedule_reset(efx
, RESET_TYPE_DMA_ERROR
);
1245 #ifdef CONFIG_SFC_SRIOV
1247 efx_siena_sriov_desc_fetch_err(efx
, ev_sub_data
);
1251 netif_vdbg(efx
, hw
, efx
->net_dev
,
1252 "channel %d unknown driver event code %d "
1253 "data %04x\n", channel
->channel
, ev_sub_code
,
1259 int efx_farch_ev_process(struct efx_channel
*channel
, int budget
)
1261 struct efx_nic
*efx
= channel
->efx
;
1262 unsigned int read_ptr
;
1263 efx_qword_t event
, *p_event
;
1270 read_ptr
= channel
->eventq_read_ptr
;
1273 p_event
= efx_event(channel
, read_ptr
);
1276 if (!efx_event_present(&event
))
1280 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1281 "channel %d event is "EFX_QWORD_FMT
"\n",
1282 channel
->channel
, EFX_QWORD_VAL(event
));
1284 /* Clear this event by marking it all ones */
1285 EFX_SET_QWORD(*p_event
);
1289 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1292 case FSE_AZ_EV_CODE_RX_EV
:
1293 efx_farch_handle_rx_event(channel
, &event
);
1294 if (++spent
== budget
)
1297 case FSE_AZ_EV_CODE_TX_EV
:
1298 efx_farch_handle_tx_event(channel
, &event
);
1300 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1301 efx_farch_handle_generated_event(channel
, &event
);
1303 case FSE_AZ_EV_CODE_DRIVER_EV
:
1304 efx_farch_handle_driver_event(channel
, &event
);
1306 #ifdef CONFIG_SFC_SRIOV
1307 case FSE_CZ_EV_CODE_USER_EV
:
1308 efx_siena_sriov_event(channel
, &event
);
1311 case FSE_CZ_EV_CODE_MCDI_EV
:
1312 efx_mcdi_process_event(channel
, &event
);
1314 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1315 if (efx
->type
->handle_global_event
&&
1316 efx
->type
->handle_global_event(channel
, &event
))
1318 /* else fall through */
1320 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1321 "channel %d unknown event type %d (data "
1322 EFX_QWORD_FMT
")\n", channel
->channel
,
1323 ev_code
, EFX_QWORD_VAL(event
));
1328 channel
->eventq_read_ptr
= read_ptr
;
1332 /* Allocate buffer table entries for event queue */
1333 int efx_farch_ev_probe(struct efx_channel
*channel
)
1335 struct efx_nic
*efx
= channel
->efx
;
1338 entries
= channel
->eventq_mask
+ 1;
1339 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1340 entries
* sizeof(efx_qword_t
));
1343 int efx_farch_ev_init(struct efx_channel
*channel
)
1346 struct efx_nic
*efx
= channel
->efx
;
1348 netif_dbg(efx
, hw
, efx
->net_dev
,
1349 "channel %d event queue in special buffers %d-%d\n",
1350 channel
->channel
, channel
->eventq
.index
,
1351 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1353 EFX_POPULATE_OWORD_3(reg
,
1354 FRF_CZ_TIMER_Q_EN
, 1,
1355 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1356 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1357 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1359 /* Pin event queue buffer */
1360 efx_init_special_buffer(efx
, &channel
->eventq
);
1362 /* Fill event queue with all ones (i.e. empty events) */
1363 memset(channel
->eventq
.buf
.addr
, 0xff, channel
->eventq
.buf
.len
);
1365 /* Push event queue to card */
1366 EFX_POPULATE_OWORD_3(reg
,
1368 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1369 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1370 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1376 void efx_farch_ev_fini(struct efx_channel
*channel
)
1379 struct efx_nic
*efx
= channel
->efx
;
1381 /* Remove event queue from card */
1382 EFX_ZERO_OWORD(reg
);
1383 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1385 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1387 /* Unpin event queue */
1388 efx_fini_special_buffer(efx
, &channel
->eventq
);
1391 /* Free buffers backing event queue */
1392 void efx_farch_ev_remove(struct efx_channel
*channel
)
1394 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1398 void efx_farch_ev_test_generate(struct efx_channel
*channel
)
1400 efx_farch_magic_event(channel
, EFX_CHANNEL_MAGIC_TEST(channel
));
1403 void efx_farch_rx_defer_refill(struct efx_rx_queue
*rx_queue
)
1405 efx_farch_magic_event(efx_rx_queue_channel(rx_queue
),
1406 EFX_CHANNEL_MAGIC_FILL(rx_queue
));
1409 /**************************************************************************
1411 * Hardware interrupts
1412 * The hardware interrupt handler does very little work; all the event
1413 * queue processing is carried out by per-channel tasklets.
1415 **************************************************************************/
1417 /* Enable/disable/generate interrupts */
1418 static inline void efx_farch_interrupts(struct efx_nic
*efx
,
1419 bool enabled
, bool force
)
1421 efx_oword_t int_en_reg_ker
;
1423 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1424 FRF_AZ_KER_INT_LEVE_SEL
, efx
->irq_level
,
1425 FRF_AZ_KER_INT_KER
, force
,
1426 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1427 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1430 void efx_farch_irq_enable_master(struct efx_nic
*efx
)
1432 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1433 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1435 efx_farch_interrupts(efx
, true, false);
1438 void efx_farch_irq_disable_master(struct efx_nic
*efx
)
1440 /* Disable interrupts */
1441 efx_farch_interrupts(efx
, false, false);
1444 /* Generate a test interrupt
1445 * Interrupt must already have been enabled, otherwise nasty things
1448 int efx_farch_irq_test_generate(struct efx_nic
*efx
)
1450 efx_farch_interrupts(efx
, true, true);
1454 /* Process a fatal interrupt
1455 * Disable bus mastering ASAP and schedule a reset
1457 irqreturn_t
efx_farch_fatal_interrupt(struct efx_nic
*efx
)
1459 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1460 efx_oword_t fatal_intr
;
1461 int error
, mem_perr
;
1463 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1464 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1466 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1467 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1468 EFX_OWORD_VAL(fatal_intr
),
1469 error
? "disabling bus mastering" : "no recognised error");
1471 /* If this is a memory parity error dump which blocks are offending */
1472 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1473 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1476 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1477 netif_err(efx
, hw
, efx
->net_dev
,
1478 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1479 EFX_OWORD_VAL(reg
));
1482 /* Disable both devices */
1483 pci_clear_master(efx
->pci_dev
);
1484 efx_farch_irq_disable_master(efx
);
1486 /* Count errors and reset or disable the NIC accordingly */
1487 if (efx
->int_error_count
== 0 ||
1488 time_after(jiffies
, efx
->int_error_expire
)) {
1489 efx
->int_error_count
= 0;
1490 efx
->int_error_expire
=
1491 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1493 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1494 netif_err(efx
, hw
, efx
->net_dev
,
1495 "SYSTEM ERROR - reset scheduled\n");
1496 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1498 netif_err(efx
, hw
, efx
->net_dev
,
1499 "SYSTEM ERROR - max number of errors seen."
1500 "NIC will be disabled\n");
1501 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1507 /* Handle a legacy interrupt
1508 * Acknowledges the interrupt and schedule event queue processing.
1510 irqreturn_t
efx_farch_legacy_interrupt(int irq
, void *dev_id
)
1512 struct efx_nic
*efx
= dev_id
;
1513 bool soft_enabled
= READ_ONCE(efx
->irq_soft_enabled
);
1514 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1515 irqreturn_t result
= IRQ_NONE
;
1516 struct efx_channel
*channel
;
1521 /* Read the ISR which also ACKs the interrupts */
1522 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1523 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1525 /* Legacy interrupts are disabled too late by the EEH kernel
1526 * code. Disable them earlier.
1527 * If an EEH error occurred, the read will have returned all ones.
1529 if (EFX_DWORD_IS_ALL_ONES(reg
) && efx_try_recovery(efx
) &&
1530 !efx
->eeh_disabled_legacy_irq
) {
1531 disable_irq_nosync(efx
->legacy_irq
);
1532 efx
->eeh_disabled_legacy_irq
= true;
1535 /* Handle non-event-queue sources */
1536 if (queues
& (1U << efx
->irq_level
) && soft_enabled
) {
1537 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1538 if (unlikely(syserr
))
1539 return efx_farch_fatal_interrupt(efx
);
1540 efx
->last_irq_cpu
= raw_smp_processor_id();
1544 efx
->irq_zero_count
= 0;
1546 /* Schedule processing of any interrupting queues */
1547 if (likely(soft_enabled
)) {
1548 efx_for_each_channel(channel
, efx
) {
1550 efx_schedule_channel_irq(channel
);
1554 result
= IRQ_HANDLED
;
1559 /* Legacy ISR read can return zero once (SF bug 15783) */
1561 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1562 * because this might be a shared interrupt. */
1563 if (efx
->irq_zero_count
++ == 0)
1564 result
= IRQ_HANDLED
;
1566 /* Ensure we schedule or rearm all event queues */
1567 if (likely(soft_enabled
)) {
1568 efx_for_each_channel(channel
, efx
) {
1569 event
= efx_event(channel
,
1570 channel
->eventq_read_ptr
);
1571 if (efx_event_present(event
))
1572 efx_schedule_channel_irq(channel
);
1574 efx_farch_ev_read_ack(channel
);
1579 if (result
== IRQ_HANDLED
)
1580 netif_vdbg(efx
, intr
, efx
->net_dev
,
1581 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1582 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1587 /* Handle an MSI interrupt
1589 * Handle an MSI hardware interrupt. This routine schedules event
1590 * queue processing. No interrupt acknowledgement cycle is necessary.
1591 * Also, we never need to check that the interrupt is for us, since
1592 * MSI interrupts cannot be shared.
1594 irqreturn_t
efx_farch_msi_interrupt(int irq
, void *dev_id
)
1596 struct efx_msi_context
*context
= dev_id
;
1597 struct efx_nic
*efx
= context
->efx
;
1598 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1601 netif_vdbg(efx
, intr
, efx
->net_dev
,
1602 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1603 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1605 if (!likely(READ_ONCE(efx
->irq_soft_enabled
)))
1608 /* Handle non-event-queue sources */
1609 if (context
->index
== efx
->irq_level
) {
1610 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1611 if (unlikely(syserr
))
1612 return efx_farch_fatal_interrupt(efx
);
1613 efx
->last_irq_cpu
= raw_smp_processor_id();
1616 /* Schedule processing of the channel */
1617 efx_schedule_channel_irq(efx
->channel
[context
->index
]);
1622 /* Setup RSS indirection table.
1623 * This maps from the hash value of the packet to RXQ
1625 void efx_farch_rx_push_indir_table(struct efx_nic
*efx
)
1630 BUILD_BUG_ON(ARRAY_SIZE(efx
->rss_context
.rx_indir_table
) !=
1631 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1633 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1634 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1635 efx
->rss_context
.rx_indir_table
[i
]);
1636 efx_writed(efx
, &dword
,
1637 FR_BZ_RX_INDIRECTION_TBL
+
1638 FR_BZ_RX_INDIRECTION_TBL_STEP
* i
);
1642 void efx_farch_rx_pull_indir_table(struct efx_nic
*efx
)
1647 BUILD_BUG_ON(ARRAY_SIZE(efx
->rss_context
.rx_indir_table
) !=
1648 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1650 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1651 efx_readd(efx
, &dword
,
1652 FR_BZ_RX_INDIRECTION_TBL
+
1653 FR_BZ_RX_INDIRECTION_TBL_STEP
* i
);
1654 efx
->rss_context
.rx_indir_table
[i
] = EFX_DWORD_FIELD(dword
, FRF_BZ_IT_QUEUE
);
1658 /* Looks at available SRAM resources and works out how many queues we
1659 * can support, and where things like descriptor caches should live.
1661 * SRAM is split up as follows:
1662 * 0 buftbl entries for channels
1663 * efx->vf_buftbl_base buftbl entries for SR-IOV
1664 * efx->rx_dc_base RX descriptor caches
1665 * efx->tx_dc_base TX descriptor caches
1667 void efx_farch_dimension_resources(struct efx_nic
*efx
, unsigned sram_lim_qw
)
1669 unsigned vi_count
, buftbl_min
, total_tx_channels
;
1671 #ifdef CONFIG_SFC_SRIOV
1672 struct siena_nic_data
*nic_data
= efx
->nic_data
;
1675 total_tx_channels
= efx
->n_tx_channels
+ efx
->n_extra_tx_channels
;
1676 /* Account for the buffer table entries backing the datapath channels
1677 * and the descriptor caches for those channels.
1679 buftbl_min
= ((efx
->n_rx_channels
* EFX_MAX_DMAQ_SIZE
+
1680 total_tx_channels
* EFX_TXQ_TYPES
* EFX_MAX_DMAQ_SIZE
+
1681 efx
->n_channels
* EFX_MAX_EVQ_SIZE
)
1682 * sizeof(efx_qword_t
) / EFX_BUF_SIZE
);
1683 vi_count
= max(efx
->n_channels
, total_tx_channels
* EFX_TXQ_TYPES
);
1685 #ifdef CONFIG_SFC_SRIOV
1686 if (efx
->type
->sriov_wanted
) {
1687 if (efx
->type
->sriov_wanted(efx
)) {
1688 unsigned vi_dc_entries
, buftbl_free
;
1689 unsigned entries_per_vf
, vf_limit
;
1691 nic_data
->vf_buftbl_base
= buftbl_min
;
1693 vi_dc_entries
= RX_DC_ENTRIES
+ TX_DC_ENTRIES
;
1694 vi_count
= max(vi_count
, EFX_VI_BASE
);
1695 buftbl_free
= (sram_lim_qw
- buftbl_min
-
1696 vi_count
* vi_dc_entries
);
1698 entries_per_vf
= ((vi_dc_entries
+
1699 EFX_VF_BUFTBL_PER_VI
) *
1701 vf_limit
= min(buftbl_free
/ entries_per_vf
,
1702 (1024U - EFX_VI_BASE
) >> efx
->vi_scale
);
1704 if (efx
->vf_count
> vf_limit
) {
1705 netif_err(efx
, probe
, efx
->net_dev
,
1706 "Reducing VF count from from %d to %d\n",
1707 efx
->vf_count
, vf_limit
);
1708 efx
->vf_count
= vf_limit
;
1710 vi_count
+= efx
->vf_count
* efx_vf_size(efx
);
1715 efx
->tx_dc_base
= sram_lim_qw
- vi_count
* TX_DC_ENTRIES
;
1716 efx
->rx_dc_base
= efx
->tx_dc_base
- vi_count
* RX_DC_ENTRIES
;
1719 u32
efx_farch_fpga_ver(struct efx_nic
*efx
)
1721 efx_oword_t altera_build
;
1722 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1723 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1726 void efx_farch_init_common(struct efx_nic
*efx
)
1730 /* Set positions of descriptor caches in SRAM. */
1731 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
, efx
->tx_dc_base
);
1732 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1733 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
, efx
->rx_dc_base
);
1734 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1736 /* Set TX descriptor cache size. */
1737 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1738 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1739 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1741 /* Set RX descriptor cache size. Set low watermark to size-8, as
1742 * this allows most efficient prefetching.
1744 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1745 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1746 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1747 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1748 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1750 /* Program INT_KER address */
1751 EFX_POPULATE_OWORD_2(temp
,
1752 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1753 EFX_INT_MODE_USE_MSI(efx
),
1754 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1755 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1757 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1758 /* Use an interrupt level unused by event queues */
1759 efx
->irq_level
= 0x1f;
1761 /* Use a valid MSI-X vector */
1764 /* Enable all the genuinely fatal interrupts. (They are still
1765 * masked by the overall interrupt mask, controlled by
1766 * falcon_interrupts()).
1768 * Note: All other fatal interrupts are enabled
1770 EFX_POPULATE_OWORD_3(temp
,
1771 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1772 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1773 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1774 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1775 EFX_INVERT_OWORD(temp
);
1776 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1778 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1779 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1781 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1782 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1783 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1784 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1785 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1786 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1787 /* Enable SW_EV to inherit in char driver - assume harmless here */
1788 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1789 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1790 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1791 /* Disable hardware watchdog which can misfire */
1792 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1793 /* Squash TX of packets of 16 bytes or less */
1794 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1795 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1797 EFX_POPULATE_OWORD_4(temp
,
1798 /* Default values */
1799 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1800 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1801 FRF_BZ_TX_PACE_FB_BASE
, 0,
1802 /* Allow large pace values in the fast bin. */
1803 FRF_BZ_TX_PACE_BIN_TH
,
1804 FFE_BZ_TX_PACE_RESERVED
);
1805 efx_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1808 /**************************************************************************
1812 **************************************************************************
1815 /* "Fudge factors" - difference between programmed value and actual depth.
1816 * Due to pipelined implementation we need to program H/W with a value that
1817 * is larger than the hop limit we want.
1819 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1820 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1822 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1823 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1826 #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1828 /* Don't try very hard to find space for performance hints, as this is
1829 * counter-productive. */
1830 #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1832 enum efx_farch_filter_type
{
1833 EFX_FARCH_FILTER_TCP_FULL
= 0,
1834 EFX_FARCH_FILTER_TCP_WILD
,
1835 EFX_FARCH_FILTER_UDP_FULL
,
1836 EFX_FARCH_FILTER_UDP_WILD
,
1837 EFX_FARCH_FILTER_MAC_FULL
= 4,
1838 EFX_FARCH_FILTER_MAC_WILD
,
1839 EFX_FARCH_FILTER_UC_DEF
= 8,
1840 EFX_FARCH_FILTER_MC_DEF
,
1841 EFX_FARCH_FILTER_TYPE_COUNT
, /* number of specific types */
1844 enum efx_farch_filter_table_id
{
1845 EFX_FARCH_FILTER_TABLE_RX_IP
= 0,
1846 EFX_FARCH_FILTER_TABLE_RX_MAC
,
1847 EFX_FARCH_FILTER_TABLE_RX_DEF
,
1848 EFX_FARCH_FILTER_TABLE_TX_MAC
,
1849 EFX_FARCH_FILTER_TABLE_COUNT
,
1852 enum efx_farch_filter_index
{
1853 EFX_FARCH_FILTER_INDEX_UC_DEF
,
1854 EFX_FARCH_FILTER_INDEX_MC_DEF
,
1855 EFX_FARCH_FILTER_SIZE_RX_DEF
,
1858 struct efx_farch_filter_spec
{
1866 struct efx_farch_filter_table
{
1867 enum efx_farch_filter_table_id id
;
1868 u32 offset
; /* address of table relative to BAR */
1869 unsigned size
; /* number of entries */
1870 unsigned step
; /* step between entries */
1871 unsigned used
; /* number currently used */
1872 unsigned long *used_bitmap
;
1873 struct efx_farch_filter_spec
*spec
;
1874 unsigned search_limit
[EFX_FARCH_FILTER_TYPE_COUNT
];
1877 struct efx_farch_filter_state
{
1878 struct rw_semaphore lock
; /* Protects table contents */
1879 struct efx_farch_filter_table table
[EFX_FARCH_FILTER_TABLE_COUNT
];
1883 efx_farch_filter_table_clear_entry(struct efx_nic
*efx
,
1884 struct efx_farch_filter_table
*table
,
1885 unsigned int filter_idx
);
1887 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1888 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1889 static u16
efx_farch_filter_hash(u32 key
)
1893 /* First 16 rounds */
1894 tmp
= 0x1fff ^ key
>> 16;
1895 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1896 tmp
= tmp
^ tmp
>> 9;
1897 /* Last 16 rounds */
1898 tmp
= tmp
^ tmp
<< 13 ^ key
;
1899 tmp
= tmp
^ tmp
>> 3 ^ tmp
>> 6;
1900 return tmp
^ tmp
>> 9;
1903 /* To allow for hash collisions, filter search continues at these
1904 * increments from the first possible entry selected by the hash. */
1905 static u16
efx_farch_filter_increment(u32 key
)
1910 static enum efx_farch_filter_table_id
1911 efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec
*spec
)
1913 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1914 (EFX_FARCH_FILTER_TCP_FULL
>> 2));
1915 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1916 (EFX_FARCH_FILTER_TCP_WILD
>> 2));
1917 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1918 (EFX_FARCH_FILTER_UDP_FULL
>> 2));
1919 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP
!=
1920 (EFX_FARCH_FILTER_UDP_WILD
>> 2));
1921 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC
!=
1922 (EFX_FARCH_FILTER_MAC_FULL
>> 2));
1923 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC
!=
1924 (EFX_FARCH_FILTER_MAC_WILD
>> 2));
1925 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC
!=
1926 EFX_FARCH_FILTER_TABLE_RX_MAC
+ 2);
1927 return (spec
->type
>> 2) + ((spec
->flags
& EFX_FILTER_FLAG_TX
) ? 2 : 0);
1930 static void efx_farch_filter_push_rx_config(struct efx_nic
*efx
)
1932 struct efx_farch_filter_state
*state
= efx
->filter_state
;
1933 struct efx_farch_filter_table
*table
;
1934 efx_oword_t filter_ctl
;
1936 efx_reado(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
1938 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
1939 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_FULL_SRCH_LIMIT
,
1940 table
->search_limit
[EFX_FARCH_FILTER_TCP_FULL
] +
1941 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1942 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_TCP_WILD_SRCH_LIMIT
,
1943 table
->search_limit
[EFX_FARCH_FILTER_TCP_WILD
] +
1944 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1945 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_FULL_SRCH_LIMIT
,
1946 table
->search_limit
[EFX_FARCH_FILTER_UDP_FULL
] +
1947 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1948 EFX_SET_OWORD_FIELD(filter_ctl
, FRF_BZ_UDP_WILD_SRCH_LIMIT
,
1949 table
->search_limit
[EFX_FARCH_FILTER_UDP_WILD
] +
1950 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1952 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_MAC
];
1954 EFX_SET_OWORD_FIELD(
1955 filter_ctl
, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT
,
1956 table
->search_limit
[EFX_FARCH_FILTER_MAC_FULL
] +
1957 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
1958 EFX_SET_OWORD_FIELD(
1959 filter_ctl
, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT
,
1960 table
->search_limit
[EFX_FARCH_FILTER_MAC_WILD
] +
1961 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
1964 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
1966 EFX_SET_OWORD_FIELD(
1967 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_Q_ID
,
1968 table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].dmaq_id
);
1969 EFX_SET_OWORD_FIELD(
1970 filter_ctl
, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED
,
1971 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].flags
&
1972 EFX_FILTER_FLAG_RX_RSS
));
1973 EFX_SET_OWORD_FIELD(
1974 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_Q_ID
,
1975 table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].dmaq_id
);
1976 EFX_SET_OWORD_FIELD(
1977 filter_ctl
, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED
,
1978 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].flags
&
1979 EFX_FILTER_FLAG_RX_RSS
));
1981 /* There is a single bit to enable RX scatter for all
1982 * unmatched packets. Only set it if scatter is
1983 * enabled in both filter specs.
1985 EFX_SET_OWORD_FIELD(
1986 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
1987 !!(table
->spec
[EFX_FARCH_FILTER_INDEX_UC_DEF
].flags
&
1988 table
->spec
[EFX_FARCH_FILTER_INDEX_MC_DEF
].flags
&
1989 EFX_FILTER_FLAG_RX_SCATTER
));
1991 /* We don't expose 'default' filters because unmatched
1992 * packets always go to the queue number found in the
1993 * RSS table. But we still need to set the RX scatter
1996 EFX_SET_OWORD_FIELD(
1997 filter_ctl
, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q
,
2001 efx_writeo(efx
, &filter_ctl
, FR_BZ_RX_FILTER_CTL
);
2004 static void efx_farch_filter_push_tx_limits(struct efx_nic
*efx
)
2006 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2007 struct efx_farch_filter_table
*table
;
2010 efx_reado(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
2012 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_TX_MAC
];
2014 EFX_SET_OWORD_FIELD(
2015 tx_cfg
, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE
,
2016 table
->search_limit
[EFX_FARCH_FILTER_MAC_FULL
] +
2017 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL
);
2018 EFX_SET_OWORD_FIELD(
2019 tx_cfg
, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE
,
2020 table
->search_limit
[EFX_FARCH_FILTER_MAC_WILD
] +
2021 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD
);
2024 efx_writeo(efx
, &tx_cfg
, FR_AZ_TX_CFG
);
2028 efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec
*spec
,
2029 const struct efx_filter_spec
*gen_spec
)
2031 bool is_full
= false;
2033 if ((gen_spec
->flags
& EFX_FILTER_FLAG_RX_RSS
) && gen_spec
->rss_context
)
2036 spec
->priority
= gen_spec
->priority
;
2037 spec
->flags
= gen_spec
->flags
;
2038 spec
->dmaq_id
= gen_spec
->dmaq_id
;
2040 switch (gen_spec
->match_flags
) {
2041 case (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_IP_PROTO
|
2042 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
|
2043 EFX_FILTER_MATCH_REM_HOST
| EFX_FILTER_MATCH_REM_PORT
):
2046 case (EFX_FILTER_MATCH_ETHER_TYPE
| EFX_FILTER_MATCH_IP_PROTO
|
2047 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
): {
2048 __be32 rhost
, host1
, host2
;
2049 __be16 rport
, port1
, port2
;
2051 EFX_WARN_ON_PARANOID(!(gen_spec
->flags
& EFX_FILTER_FLAG_RX
));
2053 if (gen_spec
->ether_type
!= htons(ETH_P_IP
))
2054 return -EPROTONOSUPPORT
;
2055 if (gen_spec
->loc_port
== 0 ||
2056 (is_full
&& gen_spec
->rem_port
== 0))
2057 return -EADDRNOTAVAIL
;
2058 switch (gen_spec
->ip_proto
) {
2060 spec
->type
= (is_full
? EFX_FARCH_FILTER_TCP_FULL
:
2061 EFX_FARCH_FILTER_TCP_WILD
);
2064 spec
->type
= (is_full
? EFX_FARCH_FILTER_UDP_FULL
:
2065 EFX_FARCH_FILTER_UDP_WILD
);
2068 return -EPROTONOSUPPORT
;
2071 /* Filter is constructed in terms of source and destination,
2072 * with the odd wrinkle that the ports are swapped in a UDP
2073 * wildcard filter. We need to convert from local and remote
2074 * (= zero for wildcard) addresses.
2076 rhost
= is_full
? gen_spec
->rem_host
[0] : 0;
2077 rport
= is_full
? gen_spec
->rem_port
: 0;
2079 host2
= gen_spec
->loc_host
[0];
2080 if (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
) {
2081 port1
= gen_spec
->loc_port
;
2085 port2
= gen_spec
->loc_port
;
2087 spec
->data
[0] = ntohl(host1
) << 16 | ntohs(port1
);
2088 spec
->data
[1] = ntohs(port2
) << 16 | ntohl(host1
) >> 16;
2089 spec
->data
[2] = ntohl(host2
);
2094 case EFX_FILTER_MATCH_LOC_MAC
| EFX_FILTER_MATCH_OUTER_VID
:
2097 case EFX_FILTER_MATCH_LOC_MAC
:
2098 spec
->type
= (is_full
? EFX_FARCH_FILTER_MAC_FULL
:
2099 EFX_FARCH_FILTER_MAC_WILD
);
2100 spec
->data
[0] = is_full
? ntohs(gen_spec
->outer_vid
) : 0;
2101 spec
->data
[1] = (gen_spec
->loc_mac
[2] << 24 |
2102 gen_spec
->loc_mac
[3] << 16 |
2103 gen_spec
->loc_mac
[4] << 8 |
2104 gen_spec
->loc_mac
[5]);
2105 spec
->data
[2] = (gen_spec
->loc_mac
[0] << 8 |
2106 gen_spec
->loc_mac
[1]);
2109 case EFX_FILTER_MATCH_LOC_MAC_IG
:
2110 spec
->type
= (is_multicast_ether_addr(gen_spec
->loc_mac
) ?
2111 EFX_FARCH_FILTER_MC_DEF
:
2112 EFX_FARCH_FILTER_UC_DEF
);
2113 memset(spec
->data
, 0, sizeof(spec
->data
)); /* ensure equality */
2117 return -EPROTONOSUPPORT
;
2124 efx_farch_filter_to_gen_spec(struct efx_filter_spec
*gen_spec
,
2125 const struct efx_farch_filter_spec
*spec
)
2127 bool is_full
= false;
2129 /* *gen_spec should be completely initialised, to be consistent
2130 * with efx_filter_init_{rx,tx}() and in case we want to copy
2131 * it back to userland.
2133 memset(gen_spec
, 0, sizeof(*gen_spec
));
2135 gen_spec
->priority
= spec
->priority
;
2136 gen_spec
->flags
= spec
->flags
;
2137 gen_spec
->dmaq_id
= spec
->dmaq_id
;
2139 switch (spec
->type
) {
2140 case EFX_FARCH_FILTER_TCP_FULL
:
2141 case EFX_FARCH_FILTER_UDP_FULL
:
2144 case EFX_FARCH_FILTER_TCP_WILD
:
2145 case EFX_FARCH_FILTER_UDP_WILD
: {
2146 __be32 host1
, host2
;
2147 __be16 port1
, port2
;
2149 gen_spec
->match_flags
=
2150 EFX_FILTER_MATCH_ETHER_TYPE
|
2151 EFX_FILTER_MATCH_IP_PROTO
|
2152 EFX_FILTER_MATCH_LOC_HOST
| EFX_FILTER_MATCH_LOC_PORT
;
2154 gen_spec
->match_flags
|= (EFX_FILTER_MATCH_REM_HOST
|
2155 EFX_FILTER_MATCH_REM_PORT
);
2156 gen_spec
->ether_type
= htons(ETH_P_IP
);
2157 gen_spec
->ip_proto
=
2158 (spec
->type
== EFX_FARCH_FILTER_TCP_FULL
||
2159 spec
->type
== EFX_FARCH_FILTER_TCP_WILD
) ?
2160 IPPROTO_TCP
: IPPROTO_UDP
;
2162 host1
= htonl(spec
->data
[0] >> 16 | spec
->data
[1] << 16);
2163 port1
= htons(spec
->data
[0]);
2164 host2
= htonl(spec
->data
[2]);
2165 port2
= htons(spec
->data
[1] >> 16);
2166 if (spec
->flags
& EFX_FILTER_FLAG_TX
) {
2167 gen_spec
->loc_host
[0] = host1
;
2168 gen_spec
->rem_host
[0] = host2
;
2170 gen_spec
->loc_host
[0] = host2
;
2171 gen_spec
->rem_host
[0] = host1
;
2173 if (!!(gen_spec
->flags
& EFX_FILTER_FLAG_TX
) ^
2174 (!is_full
&& gen_spec
->ip_proto
== IPPROTO_UDP
)) {
2175 gen_spec
->loc_port
= port1
;
2176 gen_spec
->rem_port
= port2
;
2178 gen_spec
->loc_port
= port2
;
2179 gen_spec
->rem_port
= port1
;
2185 case EFX_FARCH_FILTER_MAC_FULL
:
2188 case EFX_FARCH_FILTER_MAC_WILD
:
2189 gen_spec
->match_flags
= EFX_FILTER_MATCH_LOC_MAC
;
2191 gen_spec
->match_flags
|= EFX_FILTER_MATCH_OUTER_VID
;
2192 gen_spec
->loc_mac
[0] = spec
->data
[2] >> 8;
2193 gen_spec
->loc_mac
[1] = spec
->data
[2];
2194 gen_spec
->loc_mac
[2] = spec
->data
[1] >> 24;
2195 gen_spec
->loc_mac
[3] = spec
->data
[1] >> 16;
2196 gen_spec
->loc_mac
[4] = spec
->data
[1] >> 8;
2197 gen_spec
->loc_mac
[5] = spec
->data
[1];
2198 gen_spec
->outer_vid
= htons(spec
->data
[0]);
2201 case EFX_FARCH_FILTER_UC_DEF
:
2202 case EFX_FARCH_FILTER_MC_DEF
:
2203 gen_spec
->match_flags
= EFX_FILTER_MATCH_LOC_MAC_IG
;
2204 gen_spec
->loc_mac
[0] = spec
->type
== EFX_FARCH_FILTER_MC_DEF
;
2214 efx_farch_filter_init_rx_auto(struct efx_nic
*efx
,
2215 struct efx_farch_filter_spec
*spec
)
2217 /* If there's only one channel then disable RSS for non VF
2218 * traffic, thereby allowing VFs to use RSS when the PF can't.
2220 spec
->priority
= EFX_FILTER_PRI_AUTO
;
2221 spec
->flags
= (EFX_FILTER_FLAG_RX
|
2222 (efx_rss_enabled(efx
) ? EFX_FILTER_FLAG_RX_RSS
: 0) |
2223 (efx
->rx_scatter
? EFX_FILTER_FLAG_RX_SCATTER
: 0));
2227 /* Build a filter entry and return its n-tuple key. */
2228 static u32
efx_farch_filter_build(efx_oword_t
*filter
,
2229 struct efx_farch_filter_spec
*spec
)
2233 switch (efx_farch_filter_spec_table_id(spec
)) {
2234 case EFX_FARCH_FILTER_TABLE_RX_IP
: {
2235 bool is_udp
= (spec
->type
== EFX_FARCH_FILTER_UDP_FULL
||
2236 spec
->type
== EFX_FARCH_FILTER_UDP_WILD
);
2237 EFX_POPULATE_OWORD_7(
2240 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
2242 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
2243 FRF_BZ_TCP_UDP
, is_udp
,
2244 FRF_BZ_RXQ_ID
, spec
->dmaq_id
,
2245 EFX_DWORD_2
, spec
->data
[2],
2246 EFX_DWORD_1
, spec
->data
[1],
2247 EFX_DWORD_0
, spec
->data
[0]);
2252 case EFX_FARCH_FILTER_TABLE_RX_MAC
: {
2253 bool is_wild
= spec
->type
== EFX_FARCH_FILTER_MAC_WILD
;
2254 EFX_POPULATE_OWORD_7(
2257 !!(spec
->flags
& EFX_FILTER_FLAG_RX_RSS
),
2258 FRF_CZ_RMFT_SCATTER_EN
,
2259 !!(spec
->flags
& EFX_FILTER_FLAG_RX_SCATTER
),
2260 FRF_CZ_RMFT_RXQ_ID
, spec
->dmaq_id
,
2261 FRF_CZ_RMFT_WILDCARD_MATCH
, is_wild
,
2262 FRF_CZ_RMFT_DEST_MAC_HI
, spec
->data
[2],
2263 FRF_CZ_RMFT_DEST_MAC_LO
, spec
->data
[1],
2264 FRF_CZ_RMFT_VLAN_ID
, spec
->data
[0]);
2269 case EFX_FARCH_FILTER_TABLE_TX_MAC
: {
2270 bool is_wild
= spec
->type
== EFX_FARCH_FILTER_MAC_WILD
;
2271 EFX_POPULATE_OWORD_5(*filter
,
2272 FRF_CZ_TMFT_TXQ_ID
, spec
->dmaq_id
,
2273 FRF_CZ_TMFT_WILDCARD_MATCH
, is_wild
,
2274 FRF_CZ_TMFT_SRC_MAC_HI
, spec
->data
[2],
2275 FRF_CZ_TMFT_SRC_MAC_LO
, spec
->data
[1],
2276 FRF_CZ_TMFT_VLAN_ID
, spec
->data
[0]);
2277 data3
= is_wild
| spec
->dmaq_id
<< 1;
2285 return spec
->data
[0] ^ spec
->data
[1] ^ spec
->data
[2] ^ data3
;
2288 static bool efx_farch_filter_equal(const struct efx_farch_filter_spec
*left
,
2289 const struct efx_farch_filter_spec
*right
)
2291 if (left
->type
!= right
->type
||
2292 memcmp(left
->data
, right
->data
, sizeof(left
->data
)))
2295 if (left
->flags
& EFX_FILTER_FLAG_TX
&&
2296 left
->dmaq_id
!= right
->dmaq_id
)
2303 * Construct/deconstruct external filter IDs. At least the RX filter
2304 * IDs must be ordered by matching priority, for RX NFC semantics.
2306 * Deconstruction needs to be robust against invalid IDs so that
2307 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2308 * accept user-provided IDs.
2311 #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2313 static const u8 efx_farch_filter_type_match_pri
[EFX_FARCH_FILTER_TYPE_COUNT
] = {
2314 [EFX_FARCH_FILTER_TCP_FULL
] = 0,
2315 [EFX_FARCH_FILTER_UDP_FULL
] = 0,
2316 [EFX_FARCH_FILTER_TCP_WILD
] = 1,
2317 [EFX_FARCH_FILTER_UDP_WILD
] = 1,
2318 [EFX_FARCH_FILTER_MAC_FULL
] = 2,
2319 [EFX_FARCH_FILTER_MAC_WILD
] = 3,
2320 [EFX_FARCH_FILTER_UC_DEF
] = 4,
2321 [EFX_FARCH_FILTER_MC_DEF
] = 4,
2324 static const enum efx_farch_filter_table_id efx_farch_filter_range_table
[] = {
2325 EFX_FARCH_FILTER_TABLE_RX_IP
, /* RX match pri 0 */
2326 EFX_FARCH_FILTER_TABLE_RX_IP
,
2327 EFX_FARCH_FILTER_TABLE_RX_MAC
,
2328 EFX_FARCH_FILTER_TABLE_RX_MAC
,
2329 EFX_FARCH_FILTER_TABLE_RX_DEF
, /* RX match pri 4 */
2330 EFX_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 0 */
2331 EFX_FARCH_FILTER_TABLE_TX_MAC
, /* TX match pri 1 */
2334 #define EFX_FARCH_FILTER_INDEX_WIDTH 13
2335 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2338 efx_farch_filter_make_id(const struct efx_farch_filter_spec
*spec
,
2343 range
= efx_farch_filter_type_match_pri
[spec
->type
];
2344 if (!(spec
->flags
& EFX_FILTER_FLAG_RX
))
2345 range
+= EFX_FARCH_FILTER_MATCH_PRI_COUNT
;
2347 return range
<< EFX_FARCH_FILTER_INDEX_WIDTH
| index
;
2350 static inline enum efx_farch_filter_table_id
2351 efx_farch_filter_id_table_id(u32 id
)
2353 unsigned int range
= id
>> EFX_FARCH_FILTER_INDEX_WIDTH
;
2355 if (range
< ARRAY_SIZE(efx_farch_filter_range_table
))
2356 return efx_farch_filter_range_table
[range
];
2358 return EFX_FARCH_FILTER_TABLE_COUNT
; /* invalid */
2361 static inline unsigned int efx_farch_filter_id_index(u32 id
)
2363 return id
& EFX_FARCH_FILTER_INDEX_MASK
;
2366 u32
efx_farch_filter_get_rx_id_limit(struct efx_nic
*efx
)
2368 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2369 unsigned int range
= EFX_FARCH_FILTER_MATCH_PRI_COUNT
- 1;
2370 enum efx_farch_filter_table_id table_id
;
2373 table_id
= efx_farch_filter_range_table
[range
];
2374 if (state
->table
[table_id
].size
!= 0)
2375 return range
<< EFX_FARCH_FILTER_INDEX_WIDTH
|
2376 state
->table
[table_id
].size
;
2382 s32
efx_farch_filter_insert(struct efx_nic
*efx
,
2383 struct efx_filter_spec
*gen_spec
,
2386 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2387 struct efx_farch_filter_table
*table
;
2388 struct efx_farch_filter_spec spec
;
2390 int rep_index
, ins_index
;
2391 unsigned int depth
= 0;
2394 rc
= efx_farch_filter_from_gen_spec(&spec
, gen_spec
);
2398 down_write(&state
->lock
);
2400 table
= &state
->table
[efx_farch_filter_spec_table_id(&spec
)];
2401 if (table
->size
== 0) {
2406 netif_vdbg(efx
, hw
, efx
->net_dev
,
2407 "%s: type %d search_limit=%d", __func__
, spec
.type
,
2408 table
->search_limit
[spec
.type
]);
2410 if (table
->id
== EFX_FARCH_FILTER_TABLE_RX_DEF
) {
2411 /* One filter spec per type */
2412 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF
!= 0);
2413 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF
!=
2414 EFX_FARCH_FILTER_MC_DEF
- EFX_FARCH_FILTER_UC_DEF
);
2415 rep_index
= spec
.type
- EFX_FARCH_FILTER_UC_DEF
;
2416 ins_index
= rep_index
;
2418 /* Search concurrently for
2419 * (1) a filter to be replaced (rep_index): any filter
2420 * with the same match values, up to the current
2421 * search depth for this type, and
2422 * (2) the insertion point (ins_index): (1) or any
2423 * free slot before it or up to the maximum search
2424 * depth for this priority
2425 * We fail if we cannot find (2).
2427 * We can stop once either
2428 * (a) we find (1), in which case we have definitely
2429 * found (2) as well; or
2430 * (b) we have searched exhaustively for (1), and have
2431 * either found (2) or searched exhaustively for it
2433 u32 key
= efx_farch_filter_build(&filter
, &spec
);
2434 unsigned int hash
= efx_farch_filter_hash(key
);
2435 unsigned int incr
= efx_farch_filter_increment(key
);
2436 unsigned int max_rep_depth
= table
->search_limit
[spec
.type
];
2437 unsigned int max_ins_depth
=
2438 spec
.priority
<= EFX_FILTER_PRI_HINT
?
2439 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX
:
2440 EFX_FARCH_FILTER_CTL_SRCH_MAX
;
2441 unsigned int i
= hash
& (table
->size
- 1);
2447 if (!test_bit(i
, table
->used_bitmap
)) {
2450 } else if (efx_farch_filter_equal(&spec
,
2459 if (depth
>= max_rep_depth
&&
2460 (ins_index
>= 0 || depth
>= max_ins_depth
)) {
2462 if (ins_index
< 0) {
2470 i
= (i
+ incr
) & (table
->size
- 1);
2475 /* If we found a filter to be replaced, check whether we
2478 if (rep_index
>= 0) {
2479 struct efx_farch_filter_spec
*saved_spec
=
2480 &table
->spec
[rep_index
];
2482 if (spec
.priority
== saved_spec
->priority
&& !replace_equal
) {
2486 if (spec
.priority
< saved_spec
->priority
) {
2490 if (saved_spec
->priority
== EFX_FILTER_PRI_AUTO
||
2491 saved_spec
->flags
& EFX_FILTER_FLAG_RX_OVER_AUTO
)
2492 spec
.flags
|= EFX_FILTER_FLAG_RX_OVER_AUTO
;
2495 /* Insert the filter */
2496 if (ins_index
!= rep_index
) {
2497 __set_bit(ins_index
, table
->used_bitmap
);
2500 table
->spec
[ins_index
] = spec
;
2502 if (table
->id
== EFX_FARCH_FILTER_TABLE_RX_DEF
) {
2503 efx_farch_filter_push_rx_config(efx
);
2505 if (table
->search_limit
[spec
.type
] < depth
) {
2506 table
->search_limit
[spec
.type
] = depth
;
2507 if (spec
.flags
& EFX_FILTER_FLAG_TX
)
2508 efx_farch_filter_push_tx_limits(efx
);
2510 efx_farch_filter_push_rx_config(efx
);
2513 efx_writeo(efx
, &filter
,
2514 table
->offset
+ table
->step
* ins_index
);
2516 /* If we were able to replace a filter by inserting
2517 * at a lower depth, clear the replaced filter
2519 if (ins_index
!= rep_index
&& rep_index
>= 0)
2520 efx_farch_filter_table_clear_entry(efx
, table
,
2524 netif_vdbg(efx
, hw
, efx
->net_dev
,
2525 "%s: filter type %d index %d rxq %u set",
2526 __func__
, spec
.type
, ins_index
, spec
.dmaq_id
);
2527 rc
= efx_farch_filter_make_id(&spec
, ins_index
);
2530 up_write(&state
->lock
);
2535 efx_farch_filter_table_clear_entry(struct efx_nic
*efx
,
2536 struct efx_farch_filter_table
*table
,
2537 unsigned int filter_idx
)
2539 static efx_oword_t filter
;
2541 EFX_WARN_ON_PARANOID(!test_bit(filter_idx
, table
->used_bitmap
));
2542 BUG_ON(table
->offset
== 0); /* can't clear MAC default filters */
2544 __clear_bit(filter_idx
, table
->used_bitmap
);
2546 memset(&table
->spec
[filter_idx
], 0, sizeof(table
->spec
[0]));
2548 efx_writeo(efx
, &filter
, table
->offset
+ table
->step
* filter_idx
);
2550 /* If this filter required a greater search depth than
2551 * any other, the search limit for its type can now be
2552 * decreased. However, it is hard to determine that
2553 * unless the table has become completely empty - in
2554 * which case, all its search limits can be set to 0.
2556 if (unlikely(table
->used
== 0)) {
2557 memset(table
->search_limit
, 0, sizeof(table
->search_limit
));
2558 if (table
->id
== EFX_FARCH_FILTER_TABLE_TX_MAC
)
2559 efx_farch_filter_push_tx_limits(efx
);
2561 efx_farch_filter_push_rx_config(efx
);
2565 static int efx_farch_filter_remove(struct efx_nic
*efx
,
2566 struct efx_farch_filter_table
*table
,
2567 unsigned int filter_idx
,
2568 enum efx_filter_priority priority
)
2570 struct efx_farch_filter_spec
*spec
= &table
->spec
[filter_idx
];
2572 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2573 spec
->priority
!= priority
)
2576 if (spec
->flags
& EFX_FILTER_FLAG_RX_OVER_AUTO
) {
2577 efx_farch_filter_init_rx_auto(efx
, spec
);
2578 efx_farch_filter_push_rx_config(efx
);
2580 efx_farch_filter_table_clear_entry(efx
, table
, filter_idx
);
2586 int efx_farch_filter_remove_safe(struct efx_nic
*efx
,
2587 enum efx_filter_priority priority
,
2590 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2591 enum efx_farch_filter_table_id table_id
;
2592 struct efx_farch_filter_table
*table
;
2593 unsigned int filter_idx
;
2594 struct efx_farch_filter_spec
*spec
;
2597 table_id
= efx_farch_filter_id_table_id(filter_id
);
2598 if ((unsigned int)table_id
>= EFX_FARCH_FILTER_TABLE_COUNT
)
2600 table
= &state
->table
[table_id
];
2602 filter_idx
= efx_farch_filter_id_index(filter_id
);
2603 if (filter_idx
>= table
->size
)
2605 down_write(&state
->lock
);
2606 spec
= &table
->spec
[filter_idx
];
2608 rc
= efx_farch_filter_remove(efx
, table
, filter_idx
, priority
);
2609 up_write(&state
->lock
);
2614 int efx_farch_filter_get_safe(struct efx_nic
*efx
,
2615 enum efx_filter_priority priority
,
2616 u32 filter_id
, struct efx_filter_spec
*spec_buf
)
2618 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2619 enum efx_farch_filter_table_id table_id
;
2620 struct efx_farch_filter_table
*table
;
2621 struct efx_farch_filter_spec
*spec
;
2622 unsigned int filter_idx
;
2625 down_read(&state
->lock
);
2627 table_id
= efx_farch_filter_id_table_id(filter_id
);
2628 if ((unsigned int)table_id
>= EFX_FARCH_FILTER_TABLE_COUNT
)
2630 table
= &state
->table
[table_id
];
2632 filter_idx
= efx_farch_filter_id_index(filter_id
);
2633 if (filter_idx
>= table
->size
)
2635 spec
= &table
->spec
[filter_idx
];
2637 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2638 spec
->priority
== priority
) {
2639 efx_farch_filter_to_gen_spec(spec_buf
, spec
);
2644 up_read(&state
->lock
);
2649 efx_farch_filter_table_clear(struct efx_nic
*efx
,
2650 enum efx_farch_filter_table_id table_id
,
2651 enum efx_filter_priority priority
)
2653 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2654 struct efx_farch_filter_table
*table
= &state
->table
[table_id
];
2655 unsigned int filter_idx
;
2657 down_write(&state
->lock
);
2658 for (filter_idx
= 0; filter_idx
< table
->size
; ++filter_idx
) {
2659 if (table
->spec
[filter_idx
].priority
!= EFX_FILTER_PRI_AUTO
)
2660 efx_farch_filter_remove(efx
, table
,
2661 filter_idx
, priority
);
2663 up_write(&state
->lock
);
2666 int efx_farch_filter_clear_rx(struct efx_nic
*efx
,
2667 enum efx_filter_priority priority
)
2669 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_IP
,
2671 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_MAC
,
2673 efx_farch_filter_table_clear(efx
, EFX_FARCH_FILTER_TABLE_RX_DEF
,
2678 u32
efx_farch_filter_count_rx_used(struct efx_nic
*efx
,
2679 enum efx_filter_priority priority
)
2681 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2682 enum efx_farch_filter_table_id table_id
;
2683 struct efx_farch_filter_table
*table
;
2684 unsigned int filter_idx
;
2687 down_read(&state
->lock
);
2689 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2690 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2692 table
= &state
->table
[table_id
];
2693 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2694 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2695 table
->spec
[filter_idx
].priority
== priority
)
2700 up_read(&state
->lock
);
2705 s32
efx_farch_filter_get_rx_ids(struct efx_nic
*efx
,
2706 enum efx_filter_priority priority
,
2709 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2710 enum efx_farch_filter_table_id table_id
;
2711 struct efx_farch_filter_table
*table
;
2712 unsigned int filter_idx
;
2715 down_read(&state
->lock
);
2717 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2718 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2720 table
= &state
->table
[table_id
];
2721 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2722 if (test_bit(filter_idx
, table
->used_bitmap
) &&
2723 table
->spec
[filter_idx
].priority
== priority
) {
2724 if (count
== size
) {
2728 buf
[count
++] = efx_farch_filter_make_id(
2729 &table
->spec
[filter_idx
], filter_idx
);
2734 up_read(&state
->lock
);
2739 /* Restore filter stater after reset */
2740 void efx_farch_filter_table_restore(struct efx_nic
*efx
)
2742 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2743 enum efx_farch_filter_table_id table_id
;
2744 struct efx_farch_filter_table
*table
;
2746 unsigned int filter_idx
;
2748 down_write(&state
->lock
);
2750 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2751 table
= &state
->table
[table_id
];
2753 /* Check whether this is a regular register table */
2754 if (table
->step
== 0)
2757 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2758 if (!test_bit(filter_idx
, table
->used_bitmap
))
2760 efx_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2761 efx_writeo(efx
, &filter
,
2762 table
->offset
+ table
->step
* filter_idx
);
2766 efx_farch_filter_push_rx_config(efx
);
2767 efx_farch_filter_push_tx_limits(efx
);
2769 up_write(&state
->lock
);
2772 void efx_farch_filter_table_remove(struct efx_nic
*efx
)
2774 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2775 enum efx_farch_filter_table_id table_id
;
2777 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2778 kfree(state
->table
[table_id
].used_bitmap
);
2779 vfree(state
->table
[table_id
].spec
);
2784 int efx_farch_filter_table_probe(struct efx_nic
*efx
)
2786 struct efx_farch_filter_state
*state
;
2787 struct efx_farch_filter_table
*table
;
2790 state
= kzalloc(sizeof(struct efx_farch_filter_state
), GFP_KERNEL
);
2793 efx
->filter_state
= state
;
2794 init_rwsem(&state
->lock
);
2796 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
2797 table
->id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2798 table
->offset
= FR_BZ_RX_FILTER_TBL0
;
2799 table
->size
= FR_BZ_RX_FILTER_TBL0_ROWS
;
2800 table
->step
= FR_BZ_RX_FILTER_TBL0_STEP
;
2802 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_MAC
];
2803 table
->id
= EFX_FARCH_FILTER_TABLE_RX_MAC
;
2804 table
->offset
= FR_CZ_RX_MAC_FILTER_TBL0
;
2805 table
->size
= FR_CZ_RX_MAC_FILTER_TBL0_ROWS
;
2806 table
->step
= FR_CZ_RX_MAC_FILTER_TBL0_STEP
;
2808 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
2809 table
->id
= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2810 table
->size
= EFX_FARCH_FILTER_SIZE_RX_DEF
;
2812 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_TX_MAC
];
2813 table
->id
= EFX_FARCH_FILTER_TABLE_TX_MAC
;
2814 table
->offset
= FR_CZ_TX_MAC_FILTER_TBL0
;
2815 table
->size
= FR_CZ_TX_MAC_FILTER_TBL0_ROWS
;
2816 table
->step
= FR_CZ_TX_MAC_FILTER_TBL0_STEP
;
2818 for (table_id
= 0; table_id
< EFX_FARCH_FILTER_TABLE_COUNT
; table_id
++) {
2819 table
= &state
->table
[table_id
];
2820 if (table
->size
== 0)
2822 table
->used_bitmap
= kcalloc(BITS_TO_LONGS(table
->size
),
2823 sizeof(unsigned long),
2825 if (!table
->used_bitmap
)
2827 table
->spec
= vzalloc(array_size(sizeof(*table
->spec
),
2833 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_DEF
];
2835 /* RX default filters must always exist */
2836 struct efx_farch_filter_spec
*spec
;
2839 for (i
= 0; i
< EFX_FARCH_FILTER_SIZE_RX_DEF
; i
++) {
2840 spec
= &table
->spec
[i
];
2841 spec
->type
= EFX_FARCH_FILTER_UC_DEF
+ i
;
2842 efx_farch_filter_init_rx_auto(efx
, spec
);
2843 __set_bit(i
, table
->used_bitmap
);
2847 efx_farch_filter_push_rx_config(efx
);
2852 efx_farch_filter_table_remove(efx
);
2856 /* Update scatter enable flags for filters pointing to our own RX queues */
2857 void efx_farch_filter_update_rx_scatter(struct efx_nic
*efx
)
2859 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2860 enum efx_farch_filter_table_id table_id
;
2861 struct efx_farch_filter_table
*table
;
2863 unsigned int filter_idx
;
2865 down_write(&state
->lock
);
2867 for (table_id
= EFX_FARCH_FILTER_TABLE_RX_IP
;
2868 table_id
<= EFX_FARCH_FILTER_TABLE_RX_DEF
;
2870 table
= &state
->table
[table_id
];
2872 for (filter_idx
= 0; filter_idx
< table
->size
; filter_idx
++) {
2873 if (!test_bit(filter_idx
, table
->used_bitmap
) ||
2874 table
->spec
[filter_idx
].dmaq_id
>=
2878 if (efx
->rx_scatter
)
2879 table
->spec
[filter_idx
].flags
|=
2880 EFX_FILTER_FLAG_RX_SCATTER
;
2882 table
->spec
[filter_idx
].flags
&=
2883 ~EFX_FILTER_FLAG_RX_SCATTER
;
2885 if (table_id
== EFX_FARCH_FILTER_TABLE_RX_DEF
)
2886 /* Pushed by efx_farch_filter_push_rx_config() */
2889 efx_farch_filter_build(&filter
, &table
->spec
[filter_idx
]);
2890 efx_writeo(efx
, &filter
,
2891 table
->offset
+ table
->step
* filter_idx
);
2895 efx_farch_filter_push_rx_config(efx
);
2897 up_write(&state
->lock
);
2900 #ifdef CONFIG_RFS_ACCEL
2902 bool efx_farch_filter_rfs_expire_one(struct efx_nic
*efx
, u32 flow_id
,
2905 struct efx_farch_filter_state
*state
= efx
->filter_state
;
2906 struct efx_farch_filter_table
*table
;
2907 bool ret
= false, force
= false;
2910 down_write(&state
->lock
);
2911 spin_lock_bh(&efx
->rps_hash_lock
);
2912 table
= &state
->table
[EFX_FARCH_FILTER_TABLE_RX_IP
];
2913 if (test_bit(index
, table
->used_bitmap
) &&
2914 table
->spec
[index
].priority
== EFX_FILTER_PRI_HINT
) {
2915 struct efx_arfs_rule
*rule
= NULL
;
2916 struct efx_filter_spec spec
;
2918 efx_farch_filter_to_gen_spec(&spec
, &table
->spec
[index
]);
2919 if (!efx
->rps_hash_table
) {
2920 /* In the absence of the table, we always returned 0 to
2921 * ARFS, so use the same to query it.
2925 rule
= efx_rps_hash_find(efx
, &spec
);
2927 /* ARFS table doesn't know of this filter, remove it */
2930 arfs_id
= rule
->arfs_id
;
2931 if (!efx_rps_check_rule(rule
, index
, &force
))
2935 if (force
|| rps_may_expire_flow(efx
->net_dev
, spec
.dmaq_id
,
2936 flow_id
, arfs_id
)) {
2938 rule
->filter_id
= EFX_ARFS_FILTER_ID_REMOVING
;
2939 efx_rps_hash_del(efx
, &spec
);
2940 efx_farch_filter_table_clear_entry(efx
, table
, index
);
2945 spin_unlock_bh(&efx
->rps_hash_lock
);
2946 up_write(&state
->lock
);
2950 #endif /* CONFIG_RFS_ACCEL */
2952 void efx_farch_filter_sync_rx_mode(struct efx_nic
*efx
)
2954 struct net_device
*net_dev
= efx
->net_dev
;
2955 struct netdev_hw_addr
*ha
;
2956 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
2960 if (!efx_dev_registered(efx
))
2963 netif_addr_lock_bh(net_dev
);
2965 efx
->unicast_filter
= !(net_dev
->flags
& IFF_PROMISC
);
2967 /* Build multicast hash table */
2968 if (net_dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
2969 memset(mc_hash
, 0xff, sizeof(*mc_hash
));
2971 memset(mc_hash
, 0x00, sizeof(*mc_hash
));
2972 netdev_for_each_mc_addr(ha
, net_dev
) {
2973 crc
= ether_crc_le(ETH_ALEN
, ha
->addr
);
2974 bit
= crc
& (EFX_MCAST_HASH_ENTRIES
- 1);
2975 __set_bit_le(bit
, mc_hash
);
2978 /* Broadcast packets go through the multicast hash filter.
2979 * ether_crc_le() of the broadcast address is 0xbe2612ff
2980 * so we always add bit 0xff to the mask.
2982 __set_bit_le(0xff, mc_hash
);
2985 netif_addr_unlock_bh(net_dev
);