1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
23 #include "workarounds.h"
25 /**************************************************************************
29 **************************************************************************
32 /* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
45 /* If EFX_MAX_INT_ERRORS internal errors occur within
46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
52 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
54 #define EFX_FLUSH_INTERVAL 10
55 #define EFX_FLUSH_POLL_COUNT 100
57 /* Depth of RX flush request fifo */
58 #define EFX_RX_FLUSH_COUNT 4
60 /* Generated event code for efx_generate_test_event() */
61 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
62 (0x00010100 + (_channel)->channel)
64 /* Generated event code for efx_generate_fill_event() */
65 #define EFX_CHANNEL_MAGIC_FILL(_channel) \
66 (0x00010200 + (_channel)->channel)
68 /**************************************************************************
70 * Solarstorm hardware access
72 **************************************************************************/
74 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
77 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
81 /* Read the current event from the event queue */
82 static inline efx_qword_t
*efx_event(struct efx_channel
*channel
,
85 return ((efx_qword_t
*) (channel
->eventq
.addr
)) +
86 (index
& channel
->eventq_mask
);
89 /* See if an event is present
91 * We check both the high and low dword of the event for all ones. We
92 * wrote all ones when we cleared the event, and no valid event can
93 * have all ones in either its high or low dwords. This approach is
94 * robust against reordering.
96 * Note that using a single 64-bit comparison is incorrect; even
97 * though the CPU read will be atomic, the DMA write may not be.
99 static inline int efx_event_present(efx_qword_t
*event
)
101 return !(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
102 EFX_DWORD_IS_ALL_ONES(event
->dword
[1]));
105 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
106 const efx_oword_t
*mask
)
108 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
109 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
112 int efx_nic_test_registers(struct efx_nic
*efx
,
113 const struct efx_nic_register_test
*regs
,
116 unsigned address
= 0, i
, j
;
117 efx_oword_t mask
, imask
, original
, reg
, buf
;
119 /* Falcon should be in loopback to isolate the XMAC from the PHY */
120 WARN_ON(!LOOPBACK_INTERNAL(efx
));
122 for (i
= 0; i
< n_regs
; ++i
) {
123 address
= regs
[i
].address
;
124 mask
= imask
= regs
[i
].mask
;
125 EFX_INVERT_OWORD(imask
);
127 efx_reado(efx
, &original
, address
);
129 /* bit sweep on and off */
130 for (j
= 0; j
< 128; j
++) {
131 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
134 /* Test this testable bit can be set in isolation */
135 EFX_AND_OWORD(reg
, original
, mask
);
136 EFX_SET_OWORD32(reg
, j
, j
, 1);
138 efx_writeo(efx
, ®
, address
);
139 efx_reado(efx
, &buf
, address
);
141 if (efx_masked_compare_oword(®
, &buf
, &mask
))
144 /* Test this testable bit can be cleared in isolation */
145 EFX_OR_OWORD(reg
, original
, mask
);
146 EFX_SET_OWORD32(reg
, j
, j
, 0);
148 efx_writeo(efx
, ®
, address
);
149 efx_reado(efx
, &buf
, address
);
151 if (efx_masked_compare_oword(®
, &buf
, &mask
))
155 efx_writeo(efx
, &original
, address
);
161 netif_err(efx
, hw
, efx
->net_dev
,
162 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
163 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
164 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
168 /**************************************************************************
170 * Special buffer handling
171 * Special buffers are used for event queues and the TX and RX
174 *************************************************************************/
177 * Initialise a special buffer
179 * This will define a buffer (previously allocated via
180 * efx_alloc_special_buffer()) in the buffer table, allowing
181 * it to be used for event queues, descriptor rings etc.
184 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
186 efx_qword_t buf_desc
;
191 EFX_BUG_ON_PARANOID(!buffer
->addr
);
193 /* Write buffer descriptors to NIC */
194 for (i
= 0; i
< buffer
->entries
; i
++) {
195 index
= buffer
->index
+ i
;
196 dma_addr
= buffer
->dma_addr
+ (i
* EFX_BUF_SIZE
);
197 netif_dbg(efx
, probe
, efx
->net_dev
,
198 "mapping special buffer %d at %llx\n",
199 index
, (unsigned long long)dma_addr
);
200 EFX_POPULATE_QWORD_3(buf_desc
,
201 FRF_AZ_BUF_ADR_REGION
, 0,
202 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
203 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
204 efx_write_buf_tbl(efx
, &buf_desc
, index
);
208 /* Unmaps a buffer and clears the buffer table entries */
210 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
212 efx_oword_t buf_tbl_upd
;
213 unsigned int start
= buffer
->index
;
214 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
216 if (!buffer
->entries
)
219 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
220 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
222 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
223 FRF_AZ_BUF_UPD_CMD
, 0,
224 FRF_AZ_BUF_CLR_CMD
, 1,
225 FRF_AZ_BUF_CLR_END_ID
, end
,
226 FRF_AZ_BUF_CLR_START_ID
, start
);
227 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
231 * Allocate a new special buffer
233 * This allocates memory for a new buffer, clears it and allocates a
234 * new buffer ID range. It does not write into the buffer table.
236 * This call will allocate 4KB buffers, since 8KB buffers can't be
237 * used for event queues and descriptor rings.
239 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
240 struct efx_special_buffer
*buffer
,
243 len
= ALIGN(len
, EFX_BUF_SIZE
);
245 buffer
->addr
= dma_alloc_coherent(&efx
->pci_dev
->dev
, len
,
246 &buffer
->dma_addr
, GFP_KERNEL
);
250 buffer
->entries
= len
/ EFX_BUF_SIZE
;
251 BUG_ON(buffer
->dma_addr
& (EFX_BUF_SIZE
- 1));
253 /* All zeros is a potentially valid event so memset to 0xff */
254 memset(buffer
->addr
, 0xff, len
);
256 /* Select new buffer ID */
257 buffer
->index
= efx
->next_buffer_table
;
258 efx
->next_buffer_table
+= buffer
->entries
;
260 netif_dbg(efx
, probe
, efx
->net_dev
,
261 "allocating special buffers %d-%d at %llx+%x "
262 "(virt %p phys %llx)\n", buffer
->index
,
263 buffer
->index
+ buffer
->entries
- 1,
264 (u64
)buffer
->dma_addr
, len
,
265 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
271 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
276 netif_dbg(efx
, hw
, efx
->net_dev
,
277 "deallocating special buffers %d-%d at %llx+%x "
278 "(virt %p phys %llx)\n", buffer
->index
,
279 buffer
->index
+ buffer
->entries
- 1,
280 (u64
)buffer
->dma_addr
, buffer
->len
,
281 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
283 dma_free_coherent(&efx
->pci_dev
->dev
, buffer
->len
, buffer
->addr
,
289 /**************************************************************************
291 * Generic buffer handling
292 * These buffers are used for interrupt status and MAC stats
294 **************************************************************************/
296 int efx_nic_alloc_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
,
299 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
304 memset(buffer
->addr
, 0, len
);
308 void efx_nic_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
311 pci_free_consistent(efx
->pci_dev
, buffer
->len
,
312 buffer
->addr
, buffer
->dma_addr
);
317 /**************************************************************************
321 **************************************************************************/
323 /* Returns a pointer to the specified transmit descriptor in the TX
324 * descriptor queue belonging to the specified channel.
326 static inline efx_qword_t
*
327 efx_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int index
)
329 return ((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
;
332 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
333 static inline void efx_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
338 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
339 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
340 efx_writed_page(tx_queue
->efx
, ®
,
341 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
344 /* Write pointer and first descriptor for TX descriptor ring */
345 static inline void efx_push_tx_desc(struct efx_tx_queue
*tx_queue
,
346 const efx_qword_t
*txd
)
351 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
352 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
354 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
355 EFX_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
356 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
358 efx_writeo_page(tx_queue
->efx
, ®
,
359 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
363 efx_may_push_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int write_count
)
365 unsigned empty_read_count
= ACCESS_ONCE(tx_queue
->empty_read_count
);
367 if (empty_read_count
== 0)
370 tx_queue
->empty_read_count
= 0;
371 return ((empty_read_count
^ write_count
) & ~EFX_EMPTY_COUNT_VALID
) == 0;
374 /* For each entry inserted into the software descriptor ring, create a
375 * descriptor in the hardware TX descriptor ring (in host memory), and
378 void efx_nic_push_buffers(struct efx_tx_queue
*tx_queue
)
381 struct efx_tx_buffer
*buffer
;
384 unsigned old_write_count
= tx_queue
->write_count
;
386 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
389 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
390 buffer
= &tx_queue
->buffer
[write_ptr
];
391 txd
= efx_tx_desc(tx_queue
, write_ptr
);
392 ++tx_queue
->write_count
;
394 /* Create TX descriptor ring entry */
395 EFX_POPULATE_QWORD_4(*txd
,
396 FSF_AZ_TX_KER_CONT
, buffer
->continuation
,
397 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
398 FSF_AZ_TX_KER_BUF_REGION
, 0,
399 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
400 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
402 wmb(); /* Ensure descriptors are written before they are fetched */
404 if (efx_may_push_tx_desc(tx_queue
, old_write_count
)) {
405 txd
= efx_tx_desc(tx_queue
,
406 old_write_count
& tx_queue
->ptr_mask
);
407 efx_push_tx_desc(tx_queue
, txd
);
410 efx_notify_tx_desc(tx_queue
);
414 /* Allocate hardware resources for a TX queue */
415 int efx_nic_probe_tx(struct efx_tx_queue
*tx_queue
)
417 struct efx_nic
*efx
= tx_queue
->efx
;
420 entries
= tx_queue
->ptr_mask
+ 1;
421 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
422 entries
* sizeof(efx_qword_t
));
425 void efx_nic_init_tx(struct efx_tx_queue
*tx_queue
)
427 struct efx_nic
*efx
= tx_queue
->efx
;
430 tx_queue
->flushed
= FLUSH_NONE
;
432 /* Pin TX descriptor ring */
433 efx_init_special_buffer(efx
, &tx_queue
->txd
);
435 /* Push TX descriptor ring to card */
436 EFX_POPULATE_OWORD_10(reg
,
437 FRF_AZ_TX_DESCQ_EN
, 1,
438 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
439 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
440 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
441 FRF_AZ_TX_DESCQ_EVQ_ID
,
442 tx_queue
->channel
->channel
,
443 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
444 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
445 FRF_AZ_TX_DESCQ_SIZE
,
446 __ffs(tx_queue
->txd
.entries
),
447 FRF_AZ_TX_DESCQ_TYPE
, 0,
448 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
450 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
451 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
452 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
453 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
,
457 efx_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
460 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
461 /* Only 128 bits in this register */
462 BUILD_BUG_ON(EFX_MAX_TX_QUEUES
> 128);
464 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
465 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
466 clear_bit_le(tx_queue
->queue
, (void *)®
);
468 set_bit_le(tx_queue
->queue
, (void *)®
);
469 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
472 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
473 EFX_POPULATE_OWORD_1(reg
,
475 (tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
477 FFE_BZ_TX_PACE_RESERVED
);
478 efx_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
,
483 static void efx_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
485 struct efx_nic
*efx
= tx_queue
->efx
;
486 efx_oword_t tx_flush_descq
;
488 tx_queue
->flushed
= FLUSH_PENDING
;
490 /* Post a flush command */
491 EFX_POPULATE_OWORD_2(tx_flush_descq
,
492 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
493 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
494 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
497 void efx_nic_fini_tx(struct efx_tx_queue
*tx_queue
)
499 struct efx_nic
*efx
= tx_queue
->efx
;
500 efx_oword_t tx_desc_ptr
;
502 /* The queue should have been flushed */
503 WARN_ON(tx_queue
->flushed
!= FLUSH_DONE
);
505 /* Remove TX descriptor ring from card */
506 EFX_ZERO_OWORD(tx_desc_ptr
);
507 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
510 /* Unpin TX descriptor ring */
511 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
514 /* Free buffers backing TX queue */
515 void efx_nic_remove_tx(struct efx_tx_queue
*tx_queue
)
517 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
520 /**************************************************************************
524 **************************************************************************/
526 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
527 static inline efx_qword_t
*
528 efx_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned int index
)
530 return ((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
;
533 /* This creates an entry in the RX descriptor queue */
535 efx_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
537 struct efx_rx_buffer
*rx_buf
;
540 rxd
= efx_rx_desc(rx_queue
, index
);
541 rx_buf
= efx_rx_buffer(rx_queue
, index
);
542 EFX_POPULATE_QWORD_3(*rxd
,
543 FSF_AZ_RX_KER_BUF_SIZE
,
545 rx_queue
->efx
->type
->rx_buffer_padding
,
546 FSF_AZ_RX_KER_BUF_REGION
, 0,
547 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
550 /* This writes to the RX_DESC_WPTR register for the specified receive
553 void efx_nic_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
555 struct efx_nic
*efx
= rx_queue
->efx
;
559 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
562 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
563 ++rx_queue
->notified_count
;
567 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
568 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
569 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
570 efx_rx_queue_index(rx_queue
));
573 int efx_nic_probe_rx(struct efx_rx_queue
*rx_queue
)
575 struct efx_nic
*efx
= rx_queue
->efx
;
578 entries
= rx_queue
->ptr_mask
+ 1;
579 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
580 entries
* sizeof(efx_qword_t
));
583 void efx_nic_init_rx(struct efx_rx_queue
*rx_queue
)
585 efx_oword_t rx_desc_ptr
;
586 struct efx_nic
*efx
= rx_queue
->efx
;
587 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
588 bool iscsi_digest_en
= is_b0
;
590 netif_dbg(efx
, hw
, efx
->net_dev
,
591 "RX queue %d ring in special buffers %d-%d\n",
592 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
593 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
595 rx_queue
->flushed
= FLUSH_NONE
;
597 /* Pin RX descriptor ring */
598 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
600 /* Push RX descriptor ring to card */
601 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
602 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
603 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
604 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
605 FRF_AZ_RX_DESCQ_EVQ_ID
,
606 efx_rx_queue_channel(rx_queue
)->channel
,
607 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
608 FRF_AZ_RX_DESCQ_LABEL
,
609 efx_rx_queue_index(rx_queue
),
610 FRF_AZ_RX_DESCQ_SIZE
,
611 __ffs(rx_queue
->rxd
.entries
),
612 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
613 /* For >=B0 this is scatter so disable */
614 FRF_AZ_RX_DESCQ_JUMBO
, !is_b0
,
615 FRF_AZ_RX_DESCQ_EN
, 1);
616 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
617 efx_rx_queue_index(rx_queue
));
620 static void efx_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
622 struct efx_nic
*efx
= rx_queue
->efx
;
623 efx_oword_t rx_flush_descq
;
625 rx_queue
->flushed
= FLUSH_PENDING
;
627 /* Post a flush command */
628 EFX_POPULATE_OWORD_2(rx_flush_descq
,
629 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
630 FRF_AZ_RX_FLUSH_DESCQ
,
631 efx_rx_queue_index(rx_queue
));
632 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
635 void efx_nic_fini_rx(struct efx_rx_queue
*rx_queue
)
637 efx_oword_t rx_desc_ptr
;
638 struct efx_nic
*efx
= rx_queue
->efx
;
640 /* The queue should already have been flushed */
641 WARN_ON(rx_queue
->flushed
!= FLUSH_DONE
);
643 /* Remove RX descriptor ring from card */
644 EFX_ZERO_OWORD(rx_desc_ptr
);
645 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
646 efx_rx_queue_index(rx_queue
));
648 /* Unpin RX descriptor ring */
649 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
652 /* Free buffers backing RX queue */
653 void efx_nic_remove_rx(struct efx_rx_queue
*rx_queue
)
655 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
658 /**************************************************************************
660 * Event queue processing
661 * Event queues are processed by per-channel tasklets.
663 **************************************************************************/
665 /* Update a channel's event queue's read pointer (RPTR) register
667 * This writes the EVQ_RPTR_REG register for the specified channel's
670 void efx_nic_eventq_read_ack(struct efx_channel
*channel
)
673 struct efx_nic
*efx
= channel
->efx
;
675 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
,
676 channel
->eventq_read_ptr
& channel
->eventq_mask
);
677 efx_writed_table(efx
, ®
, efx
->type
->evq_rptr_tbl_base
,
681 /* Use HW to insert a SW defined event */
682 static void efx_generate_event(struct efx_channel
*channel
, efx_qword_t
*event
)
684 efx_oword_t drv_ev_reg
;
686 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
687 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
688 drv_ev_reg
.u32
[0] = event
->u32
[0];
689 drv_ev_reg
.u32
[1] = event
->u32
[1];
690 drv_ev_reg
.u32
[2] = 0;
691 drv_ev_reg
.u32
[3] = 0;
692 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, channel
->channel
);
693 efx_writeo(channel
->efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
696 /* Handle a transmit completion event
698 * The NIC batches TX completion events; the message we receive is of
699 * the form "complete all TX events up to this index".
702 efx_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
704 unsigned int tx_ev_desc_ptr
;
705 unsigned int tx_ev_q_label
;
706 struct efx_tx_queue
*tx_queue
;
707 struct efx_nic
*efx
= channel
->efx
;
710 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
711 /* Transmit completion */
712 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
713 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
714 tx_queue
= efx_channel_get_tx_queue(
715 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
716 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
718 channel
->irq_mod_score
+= tx_packets
;
719 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
720 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
721 /* Rewrite the FIFO write pointer */
722 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
723 tx_queue
= efx_channel_get_tx_queue(
724 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
726 netif_tx_lock(efx
->net_dev
);
727 efx_notify_tx_desc(tx_queue
);
728 netif_tx_unlock(efx
->net_dev
);
729 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
) &&
730 EFX_WORKAROUND_10727(efx
)) {
731 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
733 netif_err(efx
, tx_err
, efx
->net_dev
,
734 "channel %d unexpected TX event "
735 EFX_QWORD_FMT
"\n", channel
->channel
,
736 EFX_QWORD_VAL(*event
));
742 /* Detect errors included in the rx_evt_pkt_ok bit. */
743 static u16
efx_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
744 const efx_qword_t
*event
)
746 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
747 struct efx_nic
*efx
= rx_queue
->efx
;
748 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
749 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
750 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
751 bool rx_ev_other_err
, rx_ev_pause_frm
;
752 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
753 unsigned rx_ev_pkt_type
;
755 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
756 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
757 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
758 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
759 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
760 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
761 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
762 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
763 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
764 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
765 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
766 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
767 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
768 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
769 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
771 /* Every error apart from tobe_disc and pause_frm */
772 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
773 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
774 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
776 /* Count errors that are not in MAC stats. Ignore expected
777 * checksum errors during self-test. */
779 ++channel
->n_rx_frm_trunc
;
780 else if (rx_ev_tobe_disc
)
781 ++channel
->n_rx_tobe_disc
;
782 else if (!efx
->loopback_selftest
) {
783 if (rx_ev_ip_hdr_chksum_err
)
784 ++channel
->n_rx_ip_hdr_chksum_err
;
785 else if (rx_ev_tcp_udp_chksum_err
)
786 ++channel
->n_rx_tcp_udp_chksum_err
;
789 /* TOBE_DISC is expected on unicast mismatches; don't print out an
790 * error message. FRM_TRUNC indicates RXDP dropped the packet due
791 * to a FIFO overflow.
794 if (rx_ev_other_err
&& net_ratelimit()) {
795 netif_dbg(efx
, rx_err
, efx
->net_dev
,
796 " RX queue %d unexpected RX event "
797 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
798 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
799 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
800 rx_ev_ip_hdr_chksum_err
?
801 " [IP_HDR_CHKSUM_ERR]" : "",
802 rx_ev_tcp_udp_chksum_err
?
803 " [TCP_UDP_CHKSUM_ERR]" : "",
804 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
805 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
806 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
807 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
808 rx_ev_pause_frm
? " [PAUSE]" : "");
812 /* The frame must be discarded if any of these are true. */
813 return (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
814 rx_ev_tobe_disc
| rx_ev_pause_frm
) ?
815 EFX_RX_PKT_DISCARD
: 0;
818 /* Handle receive events that are not in-order. */
820 efx_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
822 struct efx_nic
*efx
= rx_queue
->efx
;
823 unsigned expected
, dropped
;
825 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
826 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
827 netif_info(efx
, rx_err
, efx
->net_dev
,
828 "dropped %d events (index=%d expected=%d)\n",
829 dropped
, index
, expected
);
831 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
832 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
835 /* Handle a packet received event
837 * The NIC gives a "discard" flag if it's a unicast packet with the
838 * wrong destination address
839 * Also "is multicast" and "matches multicast filter" flags can be used to
840 * discard non-matching multicast packets.
843 efx_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
845 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
846 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
847 unsigned expected_ptr
;
850 struct efx_rx_queue
*rx_queue
;
852 /* Basic packet information */
853 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
854 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
855 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
856 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
));
857 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
) != 1);
858 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
861 rx_queue
= efx_channel_get_rx_queue(channel
);
863 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
864 expected_ptr
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
865 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
))
866 efx_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
);
868 if (likely(rx_ev_pkt_ok
)) {
869 /* If packet is marked as OK and packet type is TCP/IP or
870 * UDP/IP, then we can rely on the hardware checksum.
872 flags
= (rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
||
873 rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
) ?
874 EFX_RX_PKT_CSUMMED
: 0;
876 flags
= efx_handle_rx_not_ok(rx_queue
, event
);
879 /* Detect multicast packets that didn't match the filter */
880 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
881 if (rx_ev_mcast_pkt
) {
882 unsigned int rx_ev_mcast_hash_match
=
883 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
885 if (unlikely(!rx_ev_mcast_hash_match
)) {
886 ++channel
->n_rx_mcast_mismatch
;
887 flags
|= EFX_RX_PKT_DISCARD
;
891 channel
->irq_mod_score
+= 2;
893 /* Handle received packet */
894 efx_rx_packet(rx_queue
, rx_ev_desc_ptr
, rx_ev_byte_cnt
, flags
);
898 efx_handle_generated_event(struct efx_channel
*channel
, efx_qword_t
*event
)
900 struct efx_nic
*efx
= channel
->efx
;
903 code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
904 if (code
== EFX_CHANNEL_MAGIC_TEST(channel
))
906 else if (code
== EFX_CHANNEL_MAGIC_FILL(channel
))
907 /* The queue must be empty, so we won't receive any rx
908 * events, so efx_process_channel() won't refill the
909 * queue. Refill it here */
910 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel
));
912 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
913 "generated event "EFX_QWORD_FMT
"\n",
914 channel
->channel
, EFX_QWORD_VAL(*event
));
918 efx_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
920 struct efx_nic
*efx
= channel
->efx
;
921 unsigned int ev_sub_code
;
922 unsigned int ev_sub_data
;
924 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
925 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
927 switch (ev_sub_code
) {
928 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
929 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
930 channel
->channel
, ev_sub_data
);
932 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
933 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
934 channel
->channel
, ev_sub_data
);
936 case FSE_AZ_EVQ_INIT_DONE_EV
:
937 netif_dbg(efx
, hw
, efx
->net_dev
,
938 "channel %d EVQ %d initialised\n",
939 channel
->channel
, ev_sub_data
);
941 case FSE_AZ_SRM_UPD_DONE_EV
:
942 netif_vdbg(efx
, hw
, efx
->net_dev
,
943 "channel %d SRAM update done\n", channel
->channel
);
945 case FSE_AZ_WAKE_UP_EV
:
946 netif_vdbg(efx
, hw
, efx
->net_dev
,
947 "channel %d RXQ %d wakeup event\n",
948 channel
->channel
, ev_sub_data
);
950 case FSE_AZ_TIMER_EV
:
951 netif_vdbg(efx
, hw
, efx
->net_dev
,
952 "channel %d RX queue %d timer expired\n",
953 channel
->channel
, ev_sub_data
);
955 case FSE_AA_RX_RECOVER_EV
:
956 netif_err(efx
, rx_err
, efx
->net_dev
,
957 "channel %d seen DRIVER RX_RESET event. "
958 "Resetting.\n", channel
->channel
);
959 atomic_inc(&efx
->rx_reset
);
960 efx_schedule_reset(efx
,
961 EFX_WORKAROUND_6555(efx
) ?
962 RESET_TYPE_RX_RECOVERY
:
965 case FSE_BZ_RX_DSC_ERROR_EV
:
966 netif_err(efx
, rx_err
, efx
->net_dev
,
967 "RX DMA Q %d reports descriptor fetch error."
968 " RX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
969 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
971 case FSE_BZ_TX_DSC_ERROR_EV
:
972 netif_err(efx
, tx_err
, efx
->net_dev
,
973 "TX DMA Q %d reports descriptor fetch error."
974 " TX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
975 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
978 netif_vdbg(efx
, hw
, efx
->net_dev
,
979 "channel %d unknown driver event code %d "
980 "data %04x\n", channel
->channel
, ev_sub_code
,
986 int efx_nic_process_eventq(struct efx_channel
*channel
, int budget
)
988 struct efx_nic
*efx
= channel
->efx
;
989 unsigned int read_ptr
;
990 efx_qword_t event
, *p_event
;
995 read_ptr
= channel
->eventq_read_ptr
;
998 p_event
= efx_event(channel
, read_ptr
);
1001 if (!efx_event_present(&event
))
1005 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1006 "channel %d event is "EFX_QWORD_FMT
"\n",
1007 channel
->channel
, EFX_QWORD_VAL(event
));
1009 /* Clear this event by marking it all ones */
1010 EFX_SET_QWORD(*p_event
);
1014 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1017 case FSE_AZ_EV_CODE_RX_EV
:
1018 efx_handle_rx_event(channel
, &event
);
1019 if (++spent
== budget
)
1022 case FSE_AZ_EV_CODE_TX_EV
:
1023 tx_packets
+= efx_handle_tx_event(channel
, &event
);
1024 if (tx_packets
> efx
->txq_entries
) {
1029 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1030 efx_handle_generated_event(channel
, &event
);
1032 case FSE_AZ_EV_CODE_DRIVER_EV
:
1033 efx_handle_driver_event(channel
, &event
);
1035 case FSE_CZ_EV_CODE_MCDI_EV
:
1036 efx_mcdi_process_event(channel
, &event
);
1038 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1039 if (efx
->type
->handle_global_event
&&
1040 efx
->type
->handle_global_event(channel
, &event
))
1042 /* else fall through */
1044 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1045 "channel %d unknown event type %d (data "
1046 EFX_QWORD_FMT
")\n", channel
->channel
,
1047 ev_code
, EFX_QWORD_VAL(event
));
1052 channel
->eventq_read_ptr
= read_ptr
;
1056 /* Check whether an event is present in the eventq at the current
1057 * read pointer. Only useful for self-test.
1059 bool efx_nic_event_present(struct efx_channel
*channel
)
1061 return efx_event_present(efx_event(channel
, channel
->eventq_read_ptr
));
1064 /* Allocate buffer table entries for event queue */
1065 int efx_nic_probe_eventq(struct efx_channel
*channel
)
1067 struct efx_nic
*efx
= channel
->efx
;
1070 entries
= channel
->eventq_mask
+ 1;
1071 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1072 entries
* sizeof(efx_qword_t
));
1075 void efx_nic_init_eventq(struct efx_channel
*channel
)
1078 struct efx_nic
*efx
= channel
->efx
;
1080 netif_dbg(efx
, hw
, efx
->net_dev
,
1081 "channel %d event queue in special buffers %d-%d\n",
1082 channel
->channel
, channel
->eventq
.index
,
1083 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1085 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
1086 EFX_POPULATE_OWORD_3(reg
,
1087 FRF_CZ_TIMER_Q_EN
, 1,
1088 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1089 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1090 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1093 /* Pin event queue buffer */
1094 efx_init_special_buffer(efx
, &channel
->eventq
);
1096 /* Fill event queue with all ones (i.e. empty events) */
1097 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1099 /* Push event queue to card */
1100 EFX_POPULATE_OWORD_3(reg
,
1102 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1103 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1104 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1107 efx
->type
->push_irq_moderation(channel
);
1110 void efx_nic_fini_eventq(struct efx_channel
*channel
)
1113 struct efx_nic
*efx
= channel
->efx
;
1115 /* Remove event queue from card */
1116 EFX_ZERO_OWORD(reg
);
1117 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1119 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1120 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1122 /* Unpin event queue */
1123 efx_fini_special_buffer(efx
, &channel
->eventq
);
1126 /* Free buffers backing event queue */
1127 void efx_nic_remove_eventq(struct efx_channel
*channel
)
1129 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1133 void efx_nic_generate_test_event(struct efx_channel
*channel
)
1135 unsigned int magic
= EFX_CHANNEL_MAGIC_TEST(channel
);
1136 efx_qword_t test_event
;
1138 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1139 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1140 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1141 efx_generate_event(channel
, &test_event
);
1144 void efx_nic_generate_fill_event(struct efx_channel
*channel
)
1146 unsigned int magic
= EFX_CHANNEL_MAGIC_FILL(channel
);
1147 efx_qword_t test_event
;
1149 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1150 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1151 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1152 efx_generate_event(channel
, &test_event
);
1155 /**************************************************************************
1159 **************************************************************************/
1162 static void efx_poll_flush_events(struct efx_nic
*efx
)
1164 struct efx_channel
*channel
= efx_get_channel(efx
, 0);
1165 struct efx_tx_queue
*tx_queue
;
1166 struct efx_rx_queue
*rx_queue
;
1167 unsigned int read_ptr
= channel
->eventq_read_ptr
;
1168 unsigned int end_ptr
= read_ptr
+ channel
->eventq_mask
- 1;
1171 efx_qword_t
*event
= efx_event(channel
, read_ptr
);
1172 int ev_code
, ev_sub_code
, ev_queue
;
1175 if (!efx_event_present(event
))
1178 ev_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_EV_CODE
);
1179 ev_sub_code
= EFX_QWORD_FIELD(*event
,
1180 FSF_AZ_DRIVER_EV_SUBCODE
);
1181 if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1182 ev_sub_code
== FSE_AZ_TX_DESCQ_FLS_DONE_EV
) {
1183 ev_queue
= EFX_QWORD_FIELD(*event
,
1184 FSF_AZ_DRIVER_EV_SUBDATA
);
1185 if (ev_queue
< EFX_TXQ_TYPES
* efx
->n_tx_channels
) {
1186 tx_queue
= efx_get_tx_queue(
1187 efx
, ev_queue
/ EFX_TXQ_TYPES
,
1188 ev_queue
% EFX_TXQ_TYPES
);
1189 tx_queue
->flushed
= FLUSH_DONE
;
1191 } else if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1192 ev_sub_code
== FSE_AZ_RX_DESCQ_FLS_DONE_EV
) {
1193 ev_queue
= EFX_QWORD_FIELD(
1194 *event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1195 ev_failed
= EFX_QWORD_FIELD(
1196 *event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1197 if (ev_queue
< efx
->n_rx_channels
) {
1198 rx_queue
= efx_get_rx_queue(efx
, ev_queue
);
1200 ev_failed
? FLUSH_FAILED
: FLUSH_DONE
;
1204 /* We're about to destroy the queue anyway, so
1205 * it's ok to throw away every non-flush event */
1206 EFX_SET_QWORD(*event
);
1209 } while (read_ptr
!= end_ptr
);
1211 channel
->eventq_read_ptr
= read_ptr
;
1214 /* Handle tx and rx flushes at the same time, since they run in
1215 * parallel in the hardware and there's no reason for us to
1217 int efx_nic_flush_queues(struct efx_nic
*efx
)
1219 struct efx_channel
*channel
;
1220 struct efx_rx_queue
*rx_queue
;
1221 struct efx_tx_queue
*tx_queue
;
1222 int i
, tx_pending
, rx_pending
;
1224 /* If necessary prepare the hardware for flushing */
1225 efx
->type
->prepare_flush(efx
);
1227 /* Flush all tx queues in parallel */
1228 efx_for_each_channel(channel
, efx
) {
1229 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
) {
1230 if (tx_queue
->initialised
)
1231 efx_flush_tx_queue(tx_queue
);
1235 /* The hardware supports four concurrent rx flushes, each of which may
1236 * need to be retried if there is an outstanding descriptor fetch */
1237 for (i
= 0; i
< EFX_FLUSH_POLL_COUNT
; ++i
) {
1238 rx_pending
= tx_pending
= 0;
1239 efx_for_each_channel(channel
, efx
) {
1240 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1241 if (rx_queue
->flushed
== FLUSH_PENDING
)
1245 efx_for_each_channel(channel
, efx
) {
1246 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1247 if (rx_pending
== EFX_RX_FLUSH_COUNT
)
1249 if (rx_queue
->flushed
== FLUSH_FAILED
||
1250 rx_queue
->flushed
== FLUSH_NONE
) {
1251 efx_flush_rx_queue(rx_queue
);
1255 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
) {
1256 if (tx_queue
->initialised
&&
1257 tx_queue
->flushed
!= FLUSH_DONE
)
1262 if (rx_pending
== 0 && tx_pending
== 0)
1265 msleep(EFX_FLUSH_INTERVAL
);
1266 efx_poll_flush_events(efx
);
1269 /* Mark the queues as all flushed. We're going to return failure
1270 * leading to a reset, or fake up success anyway */
1271 efx_for_each_channel(channel
, efx
) {
1272 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
) {
1273 if (tx_queue
->initialised
&&
1274 tx_queue
->flushed
!= FLUSH_DONE
)
1275 netif_err(efx
, hw
, efx
->net_dev
,
1276 "tx queue %d flush command timed out\n",
1278 tx_queue
->flushed
= FLUSH_DONE
;
1280 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1281 if (rx_queue
->flushed
!= FLUSH_DONE
)
1282 netif_err(efx
, hw
, efx
->net_dev
,
1283 "rx queue %d flush command timed out\n",
1284 efx_rx_queue_index(rx_queue
));
1285 rx_queue
->flushed
= FLUSH_DONE
;
1292 /**************************************************************************
1294 * Hardware interrupts
1295 * The hardware interrupt handler does very little work; all the event
1296 * queue processing is carried out by per-channel tasklets.
1298 **************************************************************************/
1300 /* Enable/disable/generate interrupts */
1301 static inline void efx_nic_interrupts(struct efx_nic
*efx
,
1302 bool enabled
, bool force
)
1304 efx_oword_t int_en_reg_ker
;
1306 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1307 FRF_AZ_KER_INT_LEVE_SEL
, efx
->irq_level
,
1308 FRF_AZ_KER_INT_KER
, force
,
1309 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1310 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1313 void efx_nic_enable_interrupts(struct efx_nic
*efx
)
1315 struct efx_channel
*channel
;
1317 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1318 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1320 /* Enable interrupts */
1321 efx_nic_interrupts(efx
, true, false);
1323 /* Force processing of all the channels to get the EVQ RPTRs up to
1325 efx_for_each_channel(channel
, efx
)
1326 efx_schedule_channel(channel
);
1329 void efx_nic_disable_interrupts(struct efx_nic
*efx
)
1331 /* Disable interrupts */
1332 efx_nic_interrupts(efx
, false, false);
1335 /* Generate a test interrupt
1336 * Interrupt must already have been enabled, otherwise nasty things
1339 void efx_nic_generate_interrupt(struct efx_nic
*efx
)
1341 efx_nic_interrupts(efx
, true, true);
1344 /* Process a fatal interrupt
1345 * Disable bus mastering ASAP and schedule a reset
1347 irqreturn_t
efx_nic_fatal_interrupt(struct efx_nic
*efx
)
1349 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1350 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1351 efx_oword_t fatal_intr
;
1352 int error
, mem_perr
;
1354 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1355 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1357 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1358 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1359 EFX_OWORD_VAL(fatal_intr
),
1360 error
? "disabling bus mastering" : "no recognised error");
1362 /* If this is a memory parity error dump which blocks are offending */
1363 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1364 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1367 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1368 netif_err(efx
, hw
, efx
->net_dev
,
1369 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1370 EFX_OWORD_VAL(reg
));
1373 /* Disable both devices */
1374 pci_clear_master(efx
->pci_dev
);
1375 if (efx_nic_is_dual_func(efx
))
1376 pci_clear_master(nic_data
->pci_dev2
);
1377 efx_nic_disable_interrupts(efx
);
1379 /* Count errors and reset or disable the NIC accordingly */
1380 if (efx
->int_error_count
== 0 ||
1381 time_after(jiffies
, efx
->int_error_expire
)) {
1382 efx
->int_error_count
= 0;
1383 efx
->int_error_expire
=
1384 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1386 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1387 netif_err(efx
, hw
, efx
->net_dev
,
1388 "SYSTEM ERROR - reset scheduled\n");
1389 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1391 netif_err(efx
, hw
, efx
->net_dev
,
1392 "SYSTEM ERROR - max number of errors seen."
1393 "NIC will be disabled\n");
1394 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1400 /* Handle a legacy interrupt
1401 * Acknowledges the interrupt and schedule event queue processing.
1403 static irqreturn_t
efx_legacy_interrupt(int irq
, void *dev_id
)
1405 struct efx_nic
*efx
= dev_id
;
1406 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1407 irqreturn_t result
= IRQ_NONE
;
1408 struct efx_channel
*channel
;
1413 /* Could this be ours? If interrupts are disabled then the
1414 * channel state may not be valid.
1416 if (!efx
->legacy_irq_enabled
)
1419 /* Read the ISR which also ACKs the interrupts */
1420 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1421 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1423 /* Handle non-event-queue sources */
1424 if (queues
& (1U << efx
->irq_level
)) {
1425 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1426 if (unlikely(syserr
))
1427 return efx_nic_fatal_interrupt(efx
);
1428 efx
->last_irq_cpu
= raw_smp_processor_id();
1432 if (EFX_WORKAROUND_15783(efx
))
1433 efx
->irq_zero_count
= 0;
1435 /* Schedule processing of any interrupting queues */
1436 efx_for_each_channel(channel
, efx
) {
1438 efx_schedule_channel_irq(channel
);
1441 result
= IRQ_HANDLED
;
1443 } else if (EFX_WORKAROUND_15783(efx
)) {
1446 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1447 * because this might be a shared interrupt. */
1448 if (efx
->irq_zero_count
++ == 0)
1449 result
= IRQ_HANDLED
;
1451 /* Ensure we schedule or rearm all event queues */
1452 efx_for_each_channel(channel
, efx
) {
1453 event
= efx_event(channel
, channel
->eventq_read_ptr
);
1454 if (efx_event_present(event
))
1455 efx_schedule_channel_irq(channel
);
1457 efx_nic_eventq_read_ack(channel
);
1461 if (result
== IRQ_HANDLED
)
1462 netif_vdbg(efx
, intr
, efx
->net_dev
,
1463 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1464 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1469 /* Handle an MSI interrupt
1471 * Handle an MSI hardware interrupt. This routine schedules event
1472 * queue processing. No interrupt acknowledgement cycle is necessary.
1473 * Also, we never need to check that the interrupt is for us, since
1474 * MSI interrupts cannot be shared.
1476 static irqreturn_t
efx_msi_interrupt(int irq
, void *dev_id
)
1478 struct efx_channel
*channel
= *(struct efx_channel
**)dev_id
;
1479 struct efx_nic
*efx
= channel
->efx
;
1480 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1483 netif_vdbg(efx
, intr
, efx
->net_dev
,
1484 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1485 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1487 /* Handle non-event-queue sources */
1488 if (channel
->channel
== efx
->irq_level
) {
1489 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1490 if (unlikely(syserr
))
1491 return efx_nic_fatal_interrupt(efx
);
1492 efx
->last_irq_cpu
= raw_smp_processor_id();
1495 /* Schedule processing of the channel */
1496 efx_schedule_channel_irq(channel
);
1502 /* Setup RSS indirection table.
1503 * This maps from the hash value of the packet to RXQ
1505 void efx_nic_push_rx_indir_table(struct efx_nic
*efx
)
1510 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1513 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1514 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1516 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1517 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1518 efx
->rx_indir_table
[i
]);
1519 efx_writed_table(efx
, &dword
, FR_BZ_RX_INDIRECTION_TBL
, i
);
1523 /* Hook interrupt handler(s)
1524 * Try MSI and then legacy interrupts.
1526 int efx_nic_init_interrupt(struct efx_nic
*efx
)
1528 struct efx_channel
*channel
;
1531 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1532 irq_handler_t handler
;
1533 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1534 handler
= efx_legacy_interrupt
;
1536 handler
= falcon_legacy_interrupt_a1
;
1538 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1541 netif_err(efx
, drv
, efx
->net_dev
,
1542 "failed to hook legacy IRQ %d\n",
1549 /* Hook MSI or MSI-X interrupt */
1550 efx_for_each_channel(channel
, efx
) {
1551 rc
= request_irq(channel
->irq
, efx_msi_interrupt
,
1552 IRQF_PROBE_SHARED
, /* Not shared */
1553 efx
->channel_name
[channel
->channel
],
1554 &efx
->channel
[channel
->channel
]);
1556 netif_err(efx
, drv
, efx
->net_dev
,
1557 "failed to hook IRQ %d\n", channel
->irq
);
1565 efx_for_each_channel(channel
, efx
)
1566 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1571 void efx_nic_fini_interrupt(struct efx_nic
*efx
)
1573 struct efx_channel
*channel
;
1576 /* Disable MSI/MSI-X interrupts */
1577 efx_for_each_channel(channel
, efx
) {
1579 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1582 /* ACK legacy interrupt */
1583 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1584 efx_reado(efx
, ®
, FR_BZ_INT_ISR0
);
1586 falcon_irq_ack_a1(efx
);
1588 /* Disable legacy interrupt */
1589 if (efx
->legacy_irq
)
1590 free_irq(efx
->legacy_irq
, efx
);
1593 u32
efx_nic_fpga_ver(struct efx_nic
*efx
)
1595 efx_oword_t altera_build
;
1596 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1597 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1600 void efx_nic_init_common(struct efx_nic
*efx
)
1604 /* Set positions of descriptor caches in SRAM. */
1605 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
,
1606 efx
->type
->tx_dc_base
/ 8);
1607 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1608 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
,
1609 efx
->type
->rx_dc_base
/ 8);
1610 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1612 /* Set TX descriptor cache size. */
1613 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1614 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1615 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1617 /* Set RX descriptor cache size. Set low watermark to size-8, as
1618 * this allows most efficient prefetching.
1620 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1621 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1622 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1623 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1624 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1626 /* Program INT_KER address */
1627 EFX_POPULATE_OWORD_2(temp
,
1628 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1629 EFX_INT_MODE_USE_MSI(efx
),
1630 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1631 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1633 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1634 /* Use an interrupt level unused by event queues */
1635 efx
->irq_level
= 0x1f;
1637 /* Use a valid MSI-X vector */
1640 /* Enable all the genuinely fatal interrupts. (They are still
1641 * masked by the overall interrupt mask, controlled by
1642 * falcon_interrupts()).
1644 * Note: All other fatal interrupts are enabled
1646 EFX_POPULATE_OWORD_3(temp
,
1647 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1648 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1649 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1650 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1651 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1652 EFX_INVERT_OWORD(temp
);
1653 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1655 efx_nic_push_rx_indir_table(efx
);
1657 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1658 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1660 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1661 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1662 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1663 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1664 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1665 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1666 /* Enable SW_EV to inherit in char driver - assume harmless here */
1667 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1668 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1669 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1670 /* Disable hardware watchdog which can misfire */
1671 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1672 /* Squash TX of packets of 16 bytes or less */
1673 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1674 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1675 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1677 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1678 EFX_POPULATE_OWORD_4(temp
,
1679 /* Default values */
1680 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1681 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1682 FRF_BZ_TX_PACE_FB_BASE
, 0,
1683 /* Allow large pace values in the
1685 FRF_BZ_TX_PACE_BIN_TH
,
1686 FFE_BZ_TX_PACE_RESERVED
);
1687 efx_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1693 #define REGISTER_REVISION_A 1
1694 #define REGISTER_REVISION_B 2
1695 #define REGISTER_REVISION_C 3
1696 #define REGISTER_REVISION_Z 3 /* latest revision */
1698 struct efx_nic_reg
{
1700 u32 min_revision
:2, max_revision
:2;
1703 #define REGISTER(name, min_rev, max_rev) { \
1704 FR_ ## min_rev ## max_rev ## _ ## name, \
1705 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1707 #define REGISTER_AA(name) REGISTER(name, A, A)
1708 #define REGISTER_AB(name) REGISTER(name, A, B)
1709 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1710 #define REGISTER_BB(name) REGISTER(name, B, B)
1711 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1712 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1714 static const struct efx_nic_reg efx_nic_regs
[] = {
1715 REGISTER_AZ(ADR_REGION
),
1716 REGISTER_AZ(INT_EN_KER
),
1717 REGISTER_BZ(INT_EN_CHAR
),
1718 REGISTER_AZ(INT_ADR_KER
),
1719 REGISTER_BZ(INT_ADR_CHAR
),
1720 /* INT_ACK_KER is WO */
1721 /* INT_ISR0 is RC */
1722 REGISTER_AZ(HW_INIT
),
1723 REGISTER_CZ(USR_EV_CFG
),
1724 REGISTER_AB(EE_SPI_HCMD
),
1725 REGISTER_AB(EE_SPI_HADR
),
1726 REGISTER_AB(EE_SPI_HDATA
),
1727 REGISTER_AB(EE_BASE_PAGE
),
1728 REGISTER_AB(EE_VPD_CFG0
),
1729 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1730 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1731 /* PCIE_CORE_INDIRECT is indirect */
1732 REGISTER_AB(NIC_STAT
),
1733 REGISTER_AB(GPIO_CTL
),
1734 REGISTER_AB(GLB_CTL
),
1735 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1736 REGISTER_BZ(DP_CTRL
),
1737 REGISTER_AZ(MEM_STAT
),
1738 REGISTER_AZ(CS_DEBUG
),
1739 REGISTER_AZ(ALTERA_BUILD
),
1740 REGISTER_AZ(CSR_SPARE
),
1741 REGISTER_AB(PCIE_SD_CTL0123
),
1742 REGISTER_AB(PCIE_SD_CTL45
),
1743 REGISTER_AB(PCIE_PCS_CTL_STAT
),
1744 /* DEBUG_DATA_OUT is not used */
1746 REGISTER_AZ(EVQ_CTL
),
1747 REGISTER_AZ(EVQ_CNT1
),
1748 REGISTER_AZ(EVQ_CNT2
),
1749 REGISTER_AZ(BUF_TBL_CFG
),
1750 REGISTER_AZ(SRM_RX_DC_CFG
),
1751 REGISTER_AZ(SRM_TX_DC_CFG
),
1752 REGISTER_AZ(SRM_CFG
),
1753 /* BUF_TBL_UPD is WO */
1754 REGISTER_AZ(SRM_UPD_EVQ
),
1755 REGISTER_AZ(SRAM_PARITY
),
1756 REGISTER_AZ(RX_CFG
),
1757 REGISTER_BZ(RX_FILTER_CTL
),
1758 /* RX_FLUSH_DESCQ is WO */
1759 REGISTER_AZ(RX_DC_CFG
),
1760 REGISTER_AZ(RX_DC_PF_WM
),
1761 REGISTER_BZ(RX_RSS_TKEY
),
1762 /* RX_NODESC_DROP is RC */
1763 REGISTER_AA(RX_SELF_RST
),
1764 /* RX_DEBUG, RX_PUSH_DROP are not used */
1765 REGISTER_CZ(RX_RSS_IPV6_REG1
),
1766 REGISTER_CZ(RX_RSS_IPV6_REG2
),
1767 REGISTER_CZ(RX_RSS_IPV6_REG3
),
1768 /* TX_FLUSH_DESCQ is WO */
1769 REGISTER_AZ(TX_DC_CFG
),
1770 REGISTER_AA(TX_CHKSM_CFG
),
1771 REGISTER_AZ(TX_CFG
),
1772 /* TX_PUSH_DROP is not used */
1773 REGISTER_AZ(TX_RESERVED
),
1774 REGISTER_BZ(TX_PACE
),
1775 /* TX_PACE_DROP_QID is RC */
1776 REGISTER_BB(TX_VLAN
),
1777 REGISTER_BZ(TX_IPFIL_PORTEN
),
1778 REGISTER_AB(MD_TXD
),
1779 REGISTER_AB(MD_RXD
),
1781 REGISTER_AB(MD_PHY_ADR
),
1784 REGISTER_AB(MAC_STAT_DMA
),
1785 REGISTER_AB(MAC_CTRL
),
1786 REGISTER_BB(GEN_MODE
),
1787 REGISTER_AB(MAC_MC_HASH_REG0
),
1788 REGISTER_AB(MAC_MC_HASH_REG1
),
1789 REGISTER_AB(GM_CFG1
),
1790 REGISTER_AB(GM_CFG2
),
1791 /* GM_IPG and GM_HD are not used */
1792 REGISTER_AB(GM_MAX_FLEN
),
1793 /* GM_TEST is not used */
1794 REGISTER_AB(GM_ADR1
),
1795 REGISTER_AB(GM_ADR2
),
1796 REGISTER_AB(GMF_CFG0
),
1797 REGISTER_AB(GMF_CFG1
),
1798 REGISTER_AB(GMF_CFG2
),
1799 REGISTER_AB(GMF_CFG3
),
1800 REGISTER_AB(GMF_CFG4
),
1801 REGISTER_AB(GMF_CFG5
),
1802 REGISTER_BB(TX_SRC_MAC_CTL
),
1803 REGISTER_AB(XM_ADR_LO
),
1804 REGISTER_AB(XM_ADR_HI
),
1805 REGISTER_AB(XM_GLB_CFG
),
1806 REGISTER_AB(XM_TX_CFG
),
1807 REGISTER_AB(XM_RX_CFG
),
1808 REGISTER_AB(XM_MGT_INT_MASK
),
1810 REGISTER_AB(XM_PAUSE_TIME
),
1811 REGISTER_AB(XM_TX_PARAM
),
1812 REGISTER_AB(XM_RX_PARAM
),
1813 /* XM_MGT_INT_MSK (note no 'A') is RC */
1814 REGISTER_AB(XX_PWR_RST
),
1815 REGISTER_AB(XX_SD_CTL
),
1816 REGISTER_AB(XX_TXDRV_CTL
),
1817 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1818 /* XX_CORE_STAT is partly RC */
1821 struct efx_nic_reg_table
{
1823 u32 min_revision
:2, max_revision
:2;
1824 u32 step
:6, rows
:21;
1827 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1829 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1832 #define REGISTER_TABLE(name, min_rev, max_rev) \
1833 REGISTER_TABLE_DIMENSIONS( \
1834 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1836 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1837 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1838 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1839 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1840 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1841 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1842 #define REGISTER_TABLE_BB_CZ(name) \
1843 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1844 FR_BZ_ ## name ## _STEP, \
1845 FR_BB_ ## name ## _ROWS), \
1846 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1847 FR_BZ_ ## name ## _STEP, \
1848 FR_CZ_ ## name ## _ROWS)
1849 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1851 static const struct efx_nic_reg_table efx_nic_reg_tables
[] = {
1852 /* DRIVER is not used */
1853 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1854 REGISTER_TABLE_BB(TX_IPFIL_TBL
),
1855 REGISTER_TABLE_BB(TX_SRC_MAC_TBL
),
1856 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER
),
1857 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL
),
1858 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER
),
1859 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL
),
1860 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER
),
1861 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL
),
1862 /* We can't reasonably read all of the buffer table (up to 8MB!).
1863 * However this driver will only use a few entries. Reading
1864 * 1K entries allows for some expansion of queue count and
1865 * size before we need to change the version. */
1866 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER
, FR_AA_BUF_FULL_TBL_KER
,
1868 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL
, FR_BZ_BUF_FULL_TBL
,
1870 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0
),
1871 REGISTER_TABLE_BB_CZ(TIMER_TBL
),
1872 REGISTER_TABLE_BB_CZ(TX_PACE_TBL
),
1873 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL
),
1874 /* TX_FILTER_TBL0 is huge and not used by this driver */
1875 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0
),
1876 REGISTER_TABLE_CZ(MC_TREG_SMEM
),
1877 /* MSIX_PBA_TABLE is not mapped */
1878 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1879 REGISTER_TABLE_BZ(RX_FILTER_TBL0
),
1882 size_t efx_nic_get_regs_len(struct efx_nic
*efx
)
1884 const struct efx_nic_reg
*reg
;
1885 const struct efx_nic_reg_table
*table
;
1888 for (reg
= efx_nic_regs
;
1889 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
1891 if (efx
->type
->revision
>= reg
->min_revision
&&
1892 efx
->type
->revision
<= reg
->max_revision
)
1893 len
+= sizeof(efx_oword_t
);
1895 for (table
= efx_nic_reg_tables
;
1896 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
1898 if (efx
->type
->revision
>= table
->min_revision
&&
1899 efx
->type
->revision
<= table
->max_revision
)
1900 len
+= table
->rows
* min_t(size_t, table
->step
, 16);
1905 void efx_nic_get_regs(struct efx_nic
*efx
, void *buf
)
1907 const struct efx_nic_reg
*reg
;
1908 const struct efx_nic_reg_table
*table
;
1910 for (reg
= efx_nic_regs
;
1911 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
1913 if (efx
->type
->revision
>= reg
->min_revision
&&
1914 efx
->type
->revision
<= reg
->max_revision
) {
1915 efx_reado(efx
, (efx_oword_t
*)buf
, reg
->offset
);
1916 buf
+= sizeof(efx_oword_t
);
1920 for (table
= efx_nic_reg_tables
;
1921 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
1925 if (!(efx
->type
->revision
>= table
->min_revision
&&
1926 efx
->type
->revision
<= table
->max_revision
))
1929 size
= min_t(size_t, table
->step
, 16);
1931 for (i
= 0; i
< table
->rows
; i
++) {
1932 switch (table
->step
) {
1933 case 4: /* 32-bit register or SRAM */
1934 efx_readd_table(efx
, buf
, table
->offset
, i
);
1936 case 8: /* 64-bit SRAM */
1938 efx
->membase
+ table
->offset
,
1941 case 16: /* 128-bit register */
1942 efx_reado_table(efx
, buf
, table
->offset
, i
);
1944 case 32: /* 128-bit register, interleaved */
1945 efx_reado_table(efx
, buf
, table
->offset
, 2 * i
);