]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/sfc/nic.c
Merge remote-tracking branch 'origin/x86/boot' into x86/mm2
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / sfc / nic.c
1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
18 #include "bitfield.h"
19 #include "efx.h"
20 #include "nic.h"
21 #include "regs.h"
22 #include "io.h"
23 #include "workarounds.h"
24
25 /**************************************************************************
26 *
27 * Configurable values
28 *
29 **************************************************************************
30 */
31
32 /* This is set to 16 for a good reason. In summary, if larger than
33 * 16, the descriptor cache holds more than a default socket
34 * buffer's worth of packets (for UDP we can only have at most one
35 * socket buffer's worth outstanding). This combined with the fact
36 * that we only get 1 TX event per descriptor cache means the NIC
37 * goes idle.
38 */
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
41
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
44
45 /* If EFX_MAX_INT_ERRORS internal errors occur within
46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47 * disable it.
48 */
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
51
52 /* Depth of RX flush request fifo */
53 #define EFX_RX_FLUSH_COUNT 4
54
55 /* Driver generated events */
56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
60
61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
63
64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
68 efx_rx_queue_index(_rx_queue))
69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
71 efx_rx_queue_index(_rx_queue))
72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue)
75
76 static void efx_magic_event(struct efx_channel *channel, u32 magic);
77
78 /**************************************************************************
79 *
80 * Solarstorm hardware access
81 *
82 **************************************************************************/
83
84 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
85 unsigned int index)
86 {
87 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
88 value, index);
89 }
90
91 /* Read the current event from the event queue */
92 static inline efx_qword_t *efx_event(struct efx_channel *channel,
93 unsigned int index)
94 {
95 return ((efx_qword_t *) (channel->eventq.addr)) +
96 (index & channel->eventq_mask);
97 }
98
99 /* See if an event is present
100 *
101 * We check both the high and low dword of the event for all ones. We
102 * wrote all ones when we cleared the event, and no valid event can
103 * have all ones in either its high or low dwords. This approach is
104 * robust against reordering.
105 *
106 * Note that using a single 64-bit comparison is incorrect; even
107 * though the CPU read will be atomic, the DMA write may not be.
108 */
109 static inline int efx_event_present(efx_qword_t *event)
110 {
111 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
112 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
113 }
114
115 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
116 const efx_oword_t *mask)
117 {
118 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
119 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
120 }
121
122 int efx_nic_test_registers(struct efx_nic *efx,
123 const struct efx_nic_register_test *regs,
124 size_t n_regs)
125 {
126 unsigned address = 0, i, j;
127 efx_oword_t mask, imask, original, reg, buf;
128
129 for (i = 0; i < n_regs; ++i) {
130 address = regs[i].address;
131 mask = imask = regs[i].mask;
132 EFX_INVERT_OWORD(imask);
133
134 efx_reado(efx, &original, address);
135
136 /* bit sweep on and off */
137 for (j = 0; j < 128; j++) {
138 if (!EFX_EXTRACT_OWORD32(mask, j, j))
139 continue;
140
141 /* Test this testable bit can be set in isolation */
142 EFX_AND_OWORD(reg, original, mask);
143 EFX_SET_OWORD32(reg, j, j, 1);
144
145 efx_writeo(efx, &reg, address);
146 efx_reado(efx, &buf, address);
147
148 if (efx_masked_compare_oword(&reg, &buf, &mask))
149 goto fail;
150
151 /* Test this testable bit can be cleared in isolation */
152 EFX_OR_OWORD(reg, original, mask);
153 EFX_SET_OWORD32(reg, j, j, 0);
154
155 efx_writeo(efx, &reg, address);
156 efx_reado(efx, &buf, address);
157
158 if (efx_masked_compare_oword(&reg, &buf, &mask))
159 goto fail;
160 }
161
162 efx_writeo(efx, &original, address);
163 }
164
165 return 0;
166
167 fail:
168 netif_err(efx, hw, efx->net_dev,
169 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
170 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
171 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
172 return -EIO;
173 }
174
175 /**************************************************************************
176 *
177 * Special buffer handling
178 * Special buffers are used for event queues and the TX and RX
179 * descriptor rings.
180 *
181 *************************************************************************/
182
183 /*
184 * Initialise a special buffer
185 *
186 * This will define a buffer (previously allocated via
187 * efx_alloc_special_buffer()) in the buffer table, allowing
188 * it to be used for event queues, descriptor rings etc.
189 */
190 static void
191 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
192 {
193 efx_qword_t buf_desc;
194 unsigned int index;
195 dma_addr_t dma_addr;
196 int i;
197
198 EFX_BUG_ON_PARANOID(!buffer->addr);
199
200 /* Write buffer descriptors to NIC */
201 for (i = 0; i < buffer->entries; i++) {
202 index = buffer->index + i;
203 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
204 netif_dbg(efx, probe, efx->net_dev,
205 "mapping special buffer %d at %llx\n",
206 index, (unsigned long long)dma_addr);
207 EFX_POPULATE_QWORD_3(buf_desc,
208 FRF_AZ_BUF_ADR_REGION, 0,
209 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
210 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
211 efx_write_buf_tbl(efx, &buf_desc, index);
212 }
213 }
214
215 /* Unmaps a buffer and clears the buffer table entries */
216 static void
217 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
218 {
219 efx_oword_t buf_tbl_upd;
220 unsigned int start = buffer->index;
221 unsigned int end = (buffer->index + buffer->entries - 1);
222
223 if (!buffer->entries)
224 return;
225
226 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
227 buffer->index, buffer->index + buffer->entries - 1);
228
229 EFX_POPULATE_OWORD_4(buf_tbl_upd,
230 FRF_AZ_BUF_UPD_CMD, 0,
231 FRF_AZ_BUF_CLR_CMD, 1,
232 FRF_AZ_BUF_CLR_END_ID, end,
233 FRF_AZ_BUF_CLR_START_ID, start);
234 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
235 }
236
237 /*
238 * Allocate a new special buffer
239 *
240 * This allocates memory for a new buffer, clears it and allocates a
241 * new buffer ID range. It does not write into the buffer table.
242 *
243 * This call will allocate 4KB buffers, since 8KB buffers can't be
244 * used for event queues and descriptor rings.
245 */
246 static int efx_alloc_special_buffer(struct efx_nic *efx,
247 struct efx_special_buffer *buffer,
248 unsigned int len)
249 {
250 len = ALIGN(len, EFX_BUF_SIZE);
251
252 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
253 &buffer->dma_addr, GFP_KERNEL);
254 if (!buffer->addr)
255 return -ENOMEM;
256 buffer->len = len;
257 buffer->entries = len / EFX_BUF_SIZE;
258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
259
260 /* Select new buffer ID */
261 buffer->index = efx->next_buffer_table;
262 efx->next_buffer_table += buffer->entries;
263 #ifdef CONFIG_SFC_SRIOV
264 BUG_ON(efx_sriov_enabled(efx) &&
265 efx->vf_buftbl_base < efx->next_buffer_table);
266 #endif
267
268 netif_dbg(efx, probe, efx->net_dev,
269 "allocating special buffers %d-%d at %llx+%x "
270 "(virt %p phys %llx)\n", buffer->index,
271 buffer->index + buffer->entries - 1,
272 (u64)buffer->dma_addr, len,
273 buffer->addr, (u64)virt_to_phys(buffer->addr));
274
275 return 0;
276 }
277
278 static void
279 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
280 {
281 if (!buffer->addr)
282 return;
283
284 netif_dbg(efx, hw, efx->net_dev,
285 "deallocating special buffers %d-%d at %llx+%x "
286 "(virt %p phys %llx)\n", buffer->index,
287 buffer->index + buffer->entries - 1,
288 (u64)buffer->dma_addr, buffer->len,
289 buffer->addr, (u64)virt_to_phys(buffer->addr));
290
291 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
292 buffer->dma_addr);
293 buffer->addr = NULL;
294 buffer->entries = 0;
295 }
296
297 /**************************************************************************
298 *
299 * Generic buffer handling
300 * These buffers are used for interrupt status, MAC stats, etc.
301 *
302 **************************************************************************/
303
304 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
305 unsigned int len)
306 {
307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
308 &buffer->dma_addr, GFP_ATOMIC);
309 if (!buffer->addr)
310 return -ENOMEM;
311 buffer->len = len;
312 memset(buffer->addr, 0, len);
313 return 0;
314 }
315
316 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
317 {
318 if (buffer->addr) {
319 dma_free_coherent(&efx->pci_dev->dev, buffer->len,
320 buffer->addr, buffer->dma_addr);
321 buffer->addr = NULL;
322 }
323 }
324
325 /**************************************************************************
326 *
327 * TX path
328 *
329 **************************************************************************/
330
331 /* Returns a pointer to the specified transmit descriptor in the TX
332 * descriptor queue belonging to the specified channel.
333 */
334 static inline efx_qword_t *
335 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
336 {
337 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
338 }
339
340 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
341 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
342 {
343 unsigned write_ptr;
344 efx_dword_t reg;
345
346 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
347 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
348 efx_writed_page(tx_queue->efx, &reg,
349 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
350 }
351
352 /* Write pointer and first descriptor for TX descriptor ring */
353 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
354 const efx_qword_t *txd)
355 {
356 unsigned write_ptr;
357 efx_oword_t reg;
358
359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
361
362 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
363 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
364 FRF_AZ_TX_DESC_WPTR, write_ptr);
365 reg.qword[0] = *txd;
366 efx_writeo_page(tx_queue->efx, &reg,
367 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
368 }
369
370 static inline bool
371 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
372 {
373 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
374
375 if (empty_read_count == 0)
376 return false;
377
378 tx_queue->empty_read_count = 0;
379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
380 }
381
382 /* For each entry inserted into the software descriptor ring, create a
383 * descriptor in the hardware TX descriptor ring (in host memory), and
384 * write a doorbell.
385 */
386 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
387 {
388
389 struct efx_tx_buffer *buffer;
390 efx_qword_t *txd;
391 unsigned write_ptr;
392 unsigned old_write_count = tx_queue->write_count;
393
394 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
395
396 do {
397 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
398 buffer = &tx_queue->buffer[write_ptr];
399 txd = efx_tx_desc(tx_queue, write_ptr);
400 ++tx_queue->write_count;
401
402 /* Create TX descriptor ring entry */
403 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
404 EFX_POPULATE_QWORD_4(*txd,
405 FSF_AZ_TX_KER_CONT,
406 buffer->flags & EFX_TX_BUF_CONT,
407 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
408 FSF_AZ_TX_KER_BUF_REGION, 0,
409 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
410 } while (tx_queue->write_count != tx_queue->insert_count);
411
412 wmb(); /* Ensure descriptors are written before they are fetched */
413
414 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
415 txd = efx_tx_desc(tx_queue,
416 old_write_count & tx_queue->ptr_mask);
417 efx_push_tx_desc(tx_queue, txd);
418 ++tx_queue->pushes;
419 } else {
420 efx_notify_tx_desc(tx_queue);
421 }
422 }
423
424 /* Allocate hardware resources for a TX queue */
425 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
426 {
427 struct efx_nic *efx = tx_queue->efx;
428 unsigned entries;
429
430 entries = tx_queue->ptr_mask + 1;
431 return efx_alloc_special_buffer(efx, &tx_queue->txd,
432 entries * sizeof(efx_qword_t));
433 }
434
435 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
436 {
437 struct efx_nic *efx = tx_queue->efx;
438 efx_oword_t reg;
439
440 /* Pin TX descriptor ring */
441 efx_init_special_buffer(efx, &tx_queue->txd);
442
443 /* Push TX descriptor ring to card */
444 EFX_POPULATE_OWORD_10(reg,
445 FRF_AZ_TX_DESCQ_EN, 1,
446 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
447 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
448 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
449 FRF_AZ_TX_DESCQ_EVQ_ID,
450 tx_queue->channel->channel,
451 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
452 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
453 FRF_AZ_TX_DESCQ_SIZE,
454 __ffs(tx_queue->txd.entries),
455 FRF_AZ_TX_DESCQ_TYPE, 0,
456 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
457
458 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
459 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
460 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
462 !csum);
463 }
464
465 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
466 tx_queue->queue);
467
468 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
469 /* Only 128 bits in this register */
470 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
471
472 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
473 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
474 __clear_bit_le(tx_queue->queue, &reg);
475 else
476 __set_bit_le(tx_queue->queue, &reg);
477 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
478 }
479
480 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
481 EFX_POPULATE_OWORD_1(reg,
482 FRF_BZ_TX_PACE,
483 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
484 FFE_BZ_TX_PACE_OFF :
485 FFE_BZ_TX_PACE_RESERVED);
486 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
487 tx_queue->queue);
488 }
489 }
490
491 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
492 {
493 struct efx_nic *efx = tx_queue->efx;
494 efx_oword_t tx_flush_descq;
495
496 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
497 atomic_set(&tx_queue->flush_outstanding, 1);
498
499 EFX_POPULATE_OWORD_2(tx_flush_descq,
500 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
501 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
502 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
503 }
504
505 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
506 {
507 struct efx_nic *efx = tx_queue->efx;
508 efx_oword_t tx_desc_ptr;
509
510 /* Remove TX descriptor ring from card */
511 EFX_ZERO_OWORD(tx_desc_ptr);
512 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
513 tx_queue->queue);
514
515 /* Unpin TX descriptor ring */
516 efx_fini_special_buffer(efx, &tx_queue->txd);
517 }
518
519 /* Free buffers backing TX queue */
520 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
521 {
522 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
523 }
524
525 /**************************************************************************
526 *
527 * RX path
528 *
529 **************************************************************************/
530
531 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
532 static inline efx_qword_t *
533 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
534 {
535 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
536 }
537
538 /* This creates an entry in the RX descriptor queue */
539 static inline void
540 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
541 {
542 struct efx_rx_buffer *rx_buf;
543 efx_qword_t *rxd;
544
545 rxd = efx_rx_desc(rx_queue, index);
546 rx_buf = efx_rx_buffer(rx_queue, index);
547 EFX_POPULATE_QWORD_3(*rxd,
548 FSF_AZ_RX_KER_BUF_SIZE,
549 rx_buf->len -
550 rx_queue->efx->type->rx_buffer_padding,
551 FSF_AZ_RX_KER_BUF_REGION, 0,
552 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
553 }
554
555 /* This writes to the RX_DESC_WPTR register for the specified receive
556 * descriptor ring.
557 */
558 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
559 {
560 struct efx_nic *efx = rx_queue->efx;
561 efx_dword_t reg;
562 unsigned write_ptr;
563
564 while (rx_queue->notified_count != rx_queue->added_count) {
565 efx_build_rx_desc(
566 rx_queue,
567 rx_queue->notified_count & rx_queue->ptr_mask);
568 ++rx_queue->notified_count;
569 }
570
571 wmb();
572 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
573 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
574 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
575 efx_rx_queue_index(rx_queue));
576 }
577
578 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
579 {
580 struct efx_nic *efx = rx_queue->efx;
581 unsigned entries;
582
583 entries = rx_queue->ptr_mask + 1;
584 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
585 entries * sizeof(efx_qword_t));
586 }
587
588 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
589 {
590 efx_oword_t rx_desc_ptr;
591 struct efx_nic *efx = rx_queue->efx;
592 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
593 bool iscsi_digest_en = is_b0;
594
595 netif_dbg(efx, hw, efx->net_dev,
596 "RX queue %d ring in special buffers %d-%d\n",
597 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
598 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
599
600 /* Pin RX descriptor ring */
601 efx_init_special_buffer(efx, &rx_queue->rxd);
602
603 /* Push RX descriptor ring to card */
604 EFX_POPULATE_OWORD_10(rx_desc_ptr,
605 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
606 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
607 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
608 FRF_AZ_RX_DESCQ_EVQ_ID,
609 efx_rx_queue_channel(rx_queue)->channel,
610 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
611 FRF_AZ_RX_DESCQ_LABEL,
612 efx_rx_queue_index(rx_queue),
613 FRF_AZ_RX_DESCQ_SIZE,
614 __ffs(rx_queue->rxd.entries),
615 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
616 /* For >=B0 this is scatter so disable */
617 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
618 FRF_AZ_RX_DESCQ_EN, 1);
619 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
620 efx_rx_queue_index(rx_queue));
621 }
622
623 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
624 {
625 struct efx_nic *efx = rx_queue->efx;
626 efx_oword_t rx_flush_descq;
627
628 EFX_POPULATE_OWORD_2(rx_flush_descq,
629 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
630 FRF_AZ_RX_FLUSH_DESCQ,
631 efx_rx_queue_index(rx_queue));
632 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
633 }
634
635 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
636 {
637 efx_oword_t rx_desc_ptr;
638 struct efx_nic *efx = rx_queue->efx;
639
640 /* Remove RX descriptor ring from card */
641 EFX_ZERO_OWORD(rx_desc_ptr);
642 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
643 efx_rx_queue_index(rx_queue));
644
645 /* Unpin RX descriptor ring */
646 efx_fini_special_buffer(efx, &rx_queue->rxd);
647 }
648
649 /* Free buffers backing RX queue */
650 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
651 {
652 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
653 }
654
655 /**************************************************************************
656 *
657 * Flush handling
658 *
659 **************************************************************************/
660
661 /* efx_nic_flush_queues() must be woken up when all flushes are completed,
662 * or more RX flushes can be kicked off.
663 */
664 static bool efx_flush_wake(struct efx_nic *efx)
665 {
666 /* Ensure that all updates are visible to efx_nic_flush_queues() */
667 smp_mb();
668
669 return (atomic_read(&efx->drain_pending) == 0 ||
670 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
671 && atomic_read(&efx->rxq_flush_pending) > 0));
672 }
673
674 static bool efx_check_tx_flush_complete(struct efx_nic *efx)
675 {
676 bool i = true;
677 efx_oword_t txd_ptr_tbl;
678 struct efx_channel *channel;
679 struct efx_tx_queue *tx_queue;
680
681 efx_for_each_channel(channel, efx) {
682 efx_for_each_channel_tx_queue(tx_queue, channel) {
683 efx_reado_table(efx, &txd_ptr_tbl,
684 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
685 if (EFX_OWORD_FIELD(txd_ptr_tbl,
686 FRF_AZ_TX_DESCQ_FLUSH) ||
687 EFX_OWORD_FIELD(txd_ptr_tbl,
688 FRF_AZ_TX_DESCQ_EN)) {
689 netif_dbg(efx, hw, efx->net_dev,
690 "flush did not complete on TXQ %d\n",
691 tx_queue->queue);
692 i = false;
693 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
694 1, 0)) {
695 /* The flush is complete, but we didn't
696 * receive a flush completion event
697 */
698 netif_dbg(efx, hw, efx->net_dev,
699 "flush complete on TXQ %d, so drain "
700 "the queue\n", tx_queue->queue);
701 /* Don't need to increment drain_pending as it
702 * has already been incremented for the queues
703 * which did not drain
704 */
705 efx_magic_event(channel,
706 EFX_CHANNEL_MAGIC_TX_DRAIN(
707 tx_queue));
708 }
709 }
710 }
711
712 return i;
713 }
714
715 /* Flush all the transmit queues, and continue flushing receive queues until
716 * they're all flushed. Wait for the DRAIN events to be recieved so that there
717 * are no more RX and TX events left on any channel. */
718 int efx_nic_flush_queues(struct efx_nic *efx)
719 {
720 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
721 struct efx_channel *channel;
722 struct efx_rx_queue *rx_queue;
723 struct efx_tx_queue *tx_queue;
724 int rc = 0;
725
726 efx->type->prepare_flush(efx);
727
728 efx_for_each_channel(channel, efx) {
729 efx_for_each_channel_tx_queue(tx_queue, channel) {
730 atomic_inc(&efx->drain_pending);
731 efx_flush_tx_queue(tx_queue);
732 }
733 efx_for_each_channel_rx_queue(rx_queue, channel) {
734 atomic_inc(&efx->drain_pending);
735 rx_queue->flush_pending = true;
736 atomic_inc(&efx->rxq_flush_pending);
737 }
738 }
739
740 while (timeout && atomic_read(&efx->drain_pending) > 0) {
741 /* If SRIOV is enabled, then offload receive queue flushing to
742 * the firmware (though we will still have to poll for
743 * completion). If that fails, fall back to the old scheme.
744 */
745 if (efx_sriov_enabled(efx)) {
746 rc = efx_mcdi_flush_rxqs(efx);
747 if (!rc)
748 goto wait;
749 }
750
751 /* The hardware supports four concurrent rx flushes, each of
752 * which may need to be retried if there is an outstanding
753 * descriptor fetch
754 */
755 efx_for_each_channel(channel, efx) {
756 efx_for_each_channel_rx_queue(rx_queue, channel) {
757 if (atomic_read(&efx->rxq_flush_outstanding) >=
758 EFX_RX_FLUSH_COUNT)
759 break;
760
761 if (rx_queue->flush_pending) {
762 rx_queue->flush_pending = false;
763 atomic_dec(&efx->rxq_flush_pending);
764 atomic_inc(&efx->rxq_flush_outstanding);
765 efx_flush_rx_queue(rx_queue);
766 }
767 }
768 }
769
770 wait:
771 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
772 timeout);
773 }
774
775 if (atomic_read(&efx->drain_pending) &&
776 !efx_check_tx_flush_complete(efx)) {
777 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
778 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
779 atomic_read(&efx->rxq_flush_outstanding),
780 atomic_read(&efx->rxq_flush_pending));
781 rc = -ETIMEDOUT;
782
783 atomic_set(&efx->drain_pending, 0);
784 atomic_set(&efx->rxq_flush_pending, 0);
785 atomic_set(&efx->rxq_flush_outstanding, 0);
786 }
787
788 efx->type->finish_flush(efx);
789
790 return rc;
791 }
792
793 /**************************************************************************
794 *
795 * Event queue processing
796 * Event queues are processed by per-channel tasklets.
797 *
798 **************************************************************************/
799
800 /* Update a channel's event queue's read pointer (RPTR) register
801 *
802 * This writes the EVQ_RPTR_REG register for the specified channel's
803 * event queue.
804 */
805 void efx_nic_eventq_read_ack(struct efx_channel *channel)
806 {
807 efx_dword_t reg;
808 struct efx_nic *efx = channel->efx;
809
810 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
811 channel->eventq_read_ptr & channel->eventq_mask);
812
813 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
814 * of 4 bytes, but it is really 16 bytes just like later revisions.
815 */
816 efx_writed(efx, &reg,
817 efx->type->evq_rptr_tbl_base +
818 FR_BZ_EVQ_RPTR_STEP * channel->channel);
819 }
820
821 /* Use HW to insert a SW defined event */
822 void efx_generate_event(struct efx_nic *efx, unsigned int evq,
823 efx_qword_t *event)
824 {
825 efx_oword_t drv_ev_reg;
826
827 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
828 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
829 drv_ev_reg.u32[0] = event->u32[0];
830 drv_ev_reg.u32[1] = event->u32[1];
831 drv_ev_reg.u32[2] = 0;
832 drv_ev_reg.u32[3] = 0;
833 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
834 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
835 }
836
837 static void efx_magic_event(struct efx_channel *channel, u32 magic)
838 {
839 efx_qword_t event;
840
841 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
842 FSE_AZ_EV_CODE_DRV_GEN_EV,
843 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
844 efx_generate_event(channel->efx, channel->channel, &event);
845 }
846
847 /* Handle a transmit completion event
848 *
849 * The NIC batches TX completion events; the message we receive is of
850 * the form "complete all TX events up to this index".
851 */
852 static int
853 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
854 {
855 unsigned int tx_ev_desc_ptr;
856 unsigned int tx_ev_q_label;
857 struct efx_tx_queue *tx_queue;
858 struct efx_nic *efx = channel->efx;
859 int tx_packets = 0;
860
861 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
862 return 0;
863
864 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
865 /* Transmit completion */
866 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
867 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
868 tx_queue = efx_channel_get_tx_queue(
869 channel, tx_ev_q_label % EFX_TXQ_TYPES);
870 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
871 tx_queue->ptr_mask);
872 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
873 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
874 /* Rewrite the FIFO write pointer */
875 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
876 tx_queue = efx_channel_get_tx_queue(
877 channel, tx_ev_q_label % EFX_TXQ_TYPES);
878
879 netif_tx_lock(efx->net_dev);
880 efx_notify_tx_desc(tx_queue);
881 netif_tx_unlock(efx->net_dev);
882 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
883 EFX_WORKAROUND_10727(efx)) {
884 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
885 } else {
886 netif_err(efx, tx_err, efx->net_dev,
887 "channel %d unexpected TX event "
888 EFX_QWORD_FMT"\n", channel->channel,
889 EFX_QWORD_VAL(*event));
890 }
891
892 return tx_packets;
893 }
894
895 /* Detect errors included in the rx_evt_pkt_ok bit. */
896 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
897 const efx_qword_t *event)
898 {
899 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
900 struct efx_nic *efx = rx_queue->efx;
901 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
902 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
903 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
904 bool rx_ev_other_err, rx_ev_pause_frm;
905 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
906 unsigned rx_ev_pkt_type;
907
908 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
909 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
910 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
911 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
912 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
913 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
914 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
915 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
916 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
917 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
918 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
919 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
920 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
921 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
922 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
923
924 /* Every error apart from tobe_disc and pause_frm */
925 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
926 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
927 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
928
929 /* Count errors that are not in MAC stats. Ignore expected
930 * checksum errors during self-test. */
931 if (rx_ev_frm_trunc)
932 ++channel->n_rx_frm_trunc;
933 else if (rx_ev_tobe_disc)
934 ++channel->n_rx_tobe_disc;
935 else if (!efx->loopback_selftest) {
936 if (rx_ev_ip_hdr_chksum_err)
937 ++channel->n_rx_ip_hdr_chksum_err;
938 else if (rx_ev_tcp_udp_chksum_err)
939 ++channel->n_rx_tcp_udp_chksum_err;
940 }
941
942 /* TOBE_DISC is expected on unicast mismatches; don't print out an
943 * error message. FRM_TRUNC indicates RXDP dropped the packet due
944 * to a FIFO overflow.
945 */
946 #ifdef DEBUG
947 if (rx_ev_other_err && net_ratelimit()) {
948 netif_dbg(efx, rx_err, efx->net_dev,
949 " RX queue %d unexpected RX event "
950 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
951 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
952 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
953 rx_ev_ip_hdr_chksum_err ?
954 " [IP_HDR_CHKSUM_ERR]" : "",
955 rx_ev_tcp_udp_chksum_err ?
956 " [TCP_UDP_CHKSUM_ERR]" : "",
957 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
958 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
959 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
960 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
961 rx_ev_pause_frm ? " [PAUSE]" : "");
962 }
963 #endif
964
965 /* The frame must be discarded if any of these are true. */
966 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
967 rx_ev_tobe_disc | rx_ev_pause_frm) ?
968 EFX_RX_PKT_DISCARD : 0;
969 }
970
971 /* Handle receive events that are not in-order. */
972 static void
973 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
974 {
975 struct efx_nic *efx = rx_queue->efx;
976 unsigned expected, dropped;
977
978 expected = rx_queue->removed_count & rx_queue->ptr_mask;
979 dropped = (index - expected) & rx_queue->ptr_mask;
980 netif_info(efx, rx_err, efx->net_dev,
981 "dropped %d events (index=%d expected=%d)\n",
982 dropped, index, expected);
983
984 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
985 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
986 }
987
988 /* Handle a packet received event
989 *
990 * The NIC gives a "discard" flag if it's a unicast packet with the
991 * wrong destination address
992 * Also "is multicast" and "matches multicast filter" flags can be used to
993 * discard non-matching multicast packets.
994 */
995 static void
996 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
997 {
998 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
999 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1000 unsigned expected_ptr;
1001 bool rx_ev_pkt_ok;
1002 u16 flags;
1003 struct efx_rx_queue *rx_queue;
1004 struct efx_nic *efx = channel->efx;
1005
1006 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1007 return;
1008
1009 /* Basic packet information */
1010 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1011 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1012 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1013 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
1014 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
1015 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1016 channel->channel);
1017
1018 rx_queue = efx_channel_get_rx_queue(channel);
1019
1020 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1021 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1022 if (unlikely(rx_ev_desc_ptr != expected_ptr))
1023 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1024
1025 if (likely(rx_ev_pkt_ok)) {
1026 /* If packet is marked as OK and packet type is TCP/IP or
1027 * UDP/IP, then we can rely on the hardware checksum.
1028 */
1029 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
1030 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ?
1031 EFX_RX_PKT_CSUMMED : 0;
1032 } else {
1033 flags = efx_handle_rx_not_ok(rx_queue, event);
1034 }
1035
1036 /* Detect multicast packets that didn't match the filter */
1037 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1038 if (rx_ev_mcast_pkt) {
1039 unsigned int rx_ev_mcast_hash_match =
1040 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1041
1042 if (unlikely(!rx_ev_mcast_hash_match)) {
1043 ++channel->n_rx_mcast_mismatch;
1044 flags |= EFX_RX_PKT_DISCARD;
1045 }
1046 }
1047
1048 channel->irq_mod_score += 2;
1049
1050 /* Handle received packet */
1051 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
1052 }
1053
1054 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1055 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1056 * of all transmit completions.
1057 */
1058 static void
1059 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1060 {
1061 struct efx_tx_queue *tx_queue;
1062 int qid;
1063
1064 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1065 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1066 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1067 qid % EFX_TXQ_TYPES);
1068 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1069 efx_magic_event(tx_queue->channel,
1070 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1071 }
1072 }
1073 }
1074
1075 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1076 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1077 * the RX queue back to the mask of RX queues in need of flushing.
1078 */
1079 static void
1080 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1081 {
1082 struct efx_channel *channel;
1083 struct efx_rx_queue *rx_queue;
1084 int qid;
1085 bool failed;
1086
1087 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1088 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1089 if (qid >= efx->n_channels)
1090 return;
1091 channel = efx_get_channel(efx, qid);
1092 if (!efx_channel_has_rx_queue(channel))
1093 return;
1094 rx_queue = efx_channel_get_rx_queue(channel);
1095
1096 if (failed) {
1097 netif_info(efx, hw, efx->net_dev,
1098 "RXQ %d flush retry\n", qid);
1099 rx_queue->flush_pending = true;
1100 atomic_inc(&efx->rxq_flush_pending);
1101 } else {
1102 efx_magic_event(efx_rx_queue_channel(rx_queue),
1103 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1104 }
1105 atomic_dec(&efx->rxq_flush_outstanding);
1106 if (efx_flush_wake(efx))
1107 wake_up(&efx->flush_wq);
1108 }
1109
1110 static void
1111 efx_handle_drain_event(struct efx_channel *channel)
1112 {
1113 struct efx_nic *efx = channel->efx;
1114
1115 WARN_ON(atomic_read(&efx->drain_pending) == 0);
1116 atomic_dec(&efx->drain_pending);
1117 if (efx_flush_wake(efx))
1118 wake_up(&efx->flush_wq);
1119 }
1120
1121 static void
1122 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
1123 {
1124 struct efx_nic *efx = channel->efx;
1125 struct efx_rx_queue *rx_queue =
1126 efx_channel_has_rx_queue(channel) ?
1127 efx_channel_get_rx_queue(channel) : NULL;
1128 unsigned magic, code;
1129
1130 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1131 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1132
1133 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1134 channel->event_test_cpu = raw_smp_processor_id();
1135 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1136 /* The queue must be empty, so we won't receive any rx
1137 * events, so efx_process_channel() won't refill the
1138 * queue. Refill it here */
1139 efx_fast_push_rx_descriptors(rx_queue);
1140 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1141 rx_queue->enabled = false;
1142 efx_handle_drain_event(channel);
1143 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1144 efx_handle_drain_event(channel);
1145 } else {
1146 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1147 "generated event "EFX_QWORD_FMT"\n",
1148 channel->channel, EFX_QWORD_VAL(*event));
1149 }
1150 }
1151
1152 static void
1153 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1154 {
1155 struct efx_nic *efx = channel->efx;
1156 unsigned int ev_sub_code;
1157 unsigned int ev_sub_data;
1158
1159 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1160 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1161
1162 switch (ev_sub_code) {
1163 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1164 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1165 channel->channel, ev_sub_data);
1166 efx_handle_tx_flush_done(efx, event);
1167 efx_sriov_tx_flush_done(efx, event);
1168 break;
1169 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1170 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1171 channel->channel, ev_sub_data);
1172 efx_handle_rx_flush_done(efx, event);
1173 efx_sriov_rx_flush_done(efx, event);
1174 break;
1175 case FSE_AZ_EVQ_INIT_DONE_EV:
1176 netif_dbg(efx, hw, efx->net_dev,
1177 "channel %d EVQ %d initialised\n",
1178 channel->channel, ev_sub_data);
1179 break;
1180 case FSE_AZ_SRM_UPD_DONE_EV:
1181 netif_vdbg(efx, hw, efx->net_dev,
1182 "channel %d SRAM update done\n", channel->channel);
1183 break;
1184 case FSE_AZ_WAKE_UP_EV:
1185 netif_vdbg(efx, hw, efx->net_dev,
1186 "channel %d RXQ %d wakeup event\n",
1187 channel->channel, ev_sub_data);
1188 break;
1189 case FSE_AZ_TIMER_EV:
1190 netif_vdbg(efx, hw, efx->net_dev,
1191 "channel %d RX queue %d timer expired\n",
1192 channel->channel, ev_sub_data);
1193 break;
1194 case FSE_AA_RX_RECOVER_EV:
1195 netif_err(efx, rx_err, efx->net_dev,
1196 "channel %d seen DRIVER RX_RESET event. "
1197 "Resetting.\n", channel->channel);
1198 atomic_inc(&efx->rx_reset);
1199 efx_schedule_reset(efx,
1200 EFX_WORKAROUND_6555(efx) ?
1201 RESET_TYPE_RX_RECOVERY :
1202 RESET_TYPE_DISABLE);
1203 break;
1204 case FSE_BZ_RX_DSC_ERROR_EV:
1205 if (ev_sub_data < EFX_VI_BASE) {
1206 netif_err(efx, rx_err, efx->net_dev,
1207 "RX DMA Q %d reports descriptor fetch error."
1208 " RX Q %d is disabled.\n", ev_sub_data,
1209 ev_sub_data);
1210 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
1211 } else
1212 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1213 break;
1214 case FSE_BZ_TX_DSC_ERROR_EV:
1215 if (ev_sub_data < EFX_VI_BASE) {
1216 netif_err(efx, tx_err, efx->net_dev,
1217 "TX DMA Q %d reports descriptor fetch error."
1218 " TX Q %d is disabled.\n", ev_sub_data,
1219 ev_sub_data);
1220 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
1221 } else
1222 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1223 break;
1224 default:
1225 netif_vdbg(efx, hw, efx->net_dev,
1226 "channel %d unknown driver event code %d "
1227 "data %04x\n", channel->channel, ev_sub_code,
1228 ev_sub_data);
1229 break;
1230 }
1231 }
1232
1233 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1234 {
1235 struct efx_nic *efx = channel->efx;
1236 unsigned int read_ptr;
1237 efx_qword_t event, *p_event;
1238 int ev_code;
1239 int tx_packets = 0;
1240 int spent = 0;
1241
1242 read_ptr = channel->eventq_read_ptr;
1243
1244 for (;;) {
1245 p_event = efx_event(channel, read_ptr);
1246 event = *p_event;
1247
1248 if (!efx_event_present(&event))
1249 /* End of events */
1250 break;
1251
1252 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1253 "channel %d event is "EFX_QWORD_FMT"\n",
1254 channel->channel, EFX_QWORD_VAL(event));
1255
1256 /* Clear this event by marking it all ones */
1257 EFX_SET_QWORD(*p_event);
1258
1259 ++read_ptr;
1260
1261 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1262
1263 switch (ev_code) {
1264 case FSE_AZ_EV_CODE_RX_EV:
1265 efx_handle_rx_event(channel, &event);
1266 if (++spent == budget)
1267 goto out;
1268 break;
1269 case FSE_AZ_EV_CODE_TX_EV:
1270 tx_packets += efx_handle_tx_event(channel, &event);
1271 if (tx_packets > efx->txq_entries) {
1272 spent = budget;
1273 goto out;
1274 }
1275 break;
1276 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1277 efx_handle_generated_event(channel, &event);
1278 break;
1279 case FSE_AZ_EV_CODE_DRIVER_EV:
1280 efx_handle_driver_event(channel, &event);
1281 break;
1282 case FSE_CZ_EV_CODE_USER_EV:
1283 efx_sriov_event(channel, &event);
1284 break;
1285 case FSE_CZ_EV_CODE_MCDI_EV:
1286 efx_mcdi_process_event(channel, &event);
1287 break;
1288 case FSE_AZ_EV_CODE_GLOBAL_EV:
1289 if (efx->type->handle_global_event &&
1290 efx->type->handle_global_event(channel, &event))
1291 break;
1292 /* else fall through */
1293 default:
1294 netif_err(channel->efx, hw, channel->efx->net_dev,
1295 "channel %d unknown event type %d (data "
1296 EFX_QWORD_FMT ")\n", channel->channel,
1297 ev_code, EFX_QWORD_VAL(event));
1298 }
1299 }
1300
1301 out:
1302 channel->eventq_read_ptr = read_ptr;
1303 return spent;
1304 }
1305
1306 /* Check whether an event is present in the eventq at the current
1307 * read pointer. Only useful for self-test.
1308 */
1309 bool efx_nic_event_present(struct efx_channel *channel)
1310 {
1311 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1312 }
1313
1314 /* Allocate buffer table entries for event queue */
1315 int efx_nic_probe_eventq(struct efx_channel *channel)
1316 {
1317 struct efx_nic *efx = channel->efx;
1318 unsigned entries;
1319
1320 entries = channel->eventq_mask + 1;
1321 return efx_alloc_special_buffer(efx, &channel->eventq,
1322 entries * sizeof(efx_qword_t));
1323 }
1324
1325 void efx_nic_init_eventq(struct efx_channel *channel)
1326 {
1327 efx_oword_t reg;
1328 struct efx_nic *efx = channel->efx;
1329
1330 netif_dbg(efx, hw, efx->net_dev,
1331 "channel %d event queue in special buffers %d-%d\n",
1332 channel->channel, channel->eventq.index,
1333 channel->eventq.index + channel->eventq.entries - 1);
1334
1335 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1336 EFX_POPULATE_OWORD_3(reg,
1337 FRF_CZ_TIMER_Q_EN, 1,
1338 FRF_CZ_HOST_NOTIFY_MODE, 0,
1339 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1340 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1341 }
1342
1343 /* Pin event queue buffer */
1344 efx_init_special_buffer(efx, &channel->eventq);
1345
1346 /* Fill event queue with all ones (i.e. empty events) */
1347 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1348
1349 /* Push event queue to card */
1350 EFX_POPULATE_OWORD_3(reg,
1351 FRF_AZ_EVQ_EN, 1,
1352 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1353 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1354 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1355 channel->channel);
1356
1357 efx->type->push_irq_moderation(channel);
1358 }
1359
1360 void efx_nic_fini_eventq(struct efx_channel *channel)
1361 {
1362 efx_oword_t reg;
1363 struct efx_nic *efx = channel->efx;
1364
1365 /* Remove event queue from card */
1366 EFX_ZERO_OWORD(reg);
1367 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1368 channel->channel);
1369 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1370 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1371
1372 /* Unpin event queue */
1373 efx_fini_special_buffer(efx, &channel->eventq);
1374 }
1375
1376 /* Free buffers backing event queue */
1377 void efx_nic_remove_eventq(struct efx_channel *channel)
1378 {
1379 efx_free_special_buffer(channel->efx, &channel->eventq);
1380 }
1381
1382
1383 void efx_nic_event_test_start(struct efx_channel *channel)
1384 {
1385 channel->event_test_cpu = -1;
1386 smp_wmb();
1387 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1388 }
1389
1390 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
1391 {
1392 efx_magic_event(efx_rx_queue_channel(rx_queue),
1393 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1394 }
1395
1396 /**************************************************************************
1397 *
1398 * Hardware interrupts
1399 * The hardware interrupt handler does very little work; all the event
1400 * queue processing is carried out by per-channel tasklets.
1401 *
1402 **************************************************************************/
1403
1404 /* Enable/disable/generate interrupts */
1405 static inline void efx_nic_interrupts(struct efx_nic *efx,
1406 bool enabled, bool force)
1407 {
1408 efx_oword_t int_en_reg_ker;
1409
1410 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1411 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1412 FRF_AZ_KER_INT_KER, force,
1413 FRF_AZ_DRV_INT_EN_KER, enabled);
1414 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1415 }
1416
1417 void efx_nic_enable_interrupts(struct efx_nic *efx)
1418 {
1419 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1420 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1421
1422 efx_nic_interrupts(efx, true, false);
1423 }
1424
1425 void efx_nic_disable_interrupts(struct efx_nic *efx)
1426 {
1427 /* Disable interrupts */
1428 efx_nic_interrupts(efx, false, false);
1429 }
1430
1431 /* Generate a test interrupt
1432 * Interrupt must already have been enabled, otherwise nasty things
1433 * may happen.
1434 */
1435 void efx_nic_irq_test_start(struct efx_nic *efx)
1436 {
1437 efx->last_irq_cpu = -1;
1438 smp_wmb();
1439 efx_nic_interrupts(efx, true, true);
1440 }
1441
1442 /* Process a fatal interrupt
1443 * Disable bus mastering ASAP and schedule a reset
1444 */
1445 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1446 {
1447 struct falcon_nic_data *nic_data = efx->nic_data;
1448 efx_oword_t *int_ker = efx->irq_status.addr;
1449 efx_oword_t fatal_intr;
1450 int error, mem_perr;
1451
1452 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1453 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1454
1455 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1456 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1457 EFX_OWORD_VAL(fatal_intr),
1458 error ? "disabling bus mastering" : "no recognised error");
1459
1460 /* If this is a memory parity error dump which blocks are offending */
1461 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1462 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1463 if (mem_perr) {
1464 efx_oword_t reg;
1465 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1466 netif_err(efx, hw, efx->net_dev,
1467 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1468 EFX_OWORD_VAL(reg));
1469 }
1470
1471 /* Disable both devices */
1472 pci_clear_master(efx->pci_dev);
1473 if (efx_nic_is_dual_func(efx))
1474 pci_clear_master(nic_data->pci_dev2);
1475 efx_nic_disable_interrupts(efx);
1476
1477 /* Count errors and reset or disable the NIC accordingly */
1478 if (efx->int_error_count == 0 ||
1479 time_after(jiffies, efx->int_error_expire)) {
1480 efx->int_error_count = 0;
1481 efx->int_error_expire =
1482 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1483 }
1484 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1485 netif_err(efx, hw, efx->net_dev,
1486 "SYSTEM ERROR - reset scheduled\n");
1487 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1488 } else {
1489 netif_err(efx, hw, efx->net_dev,
1490 "SYSTEM ERROR - max number of errors seen."
1491 "NIC will be disabled\n");
1492 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1493 }
1494
1495 return IRQ_HANDLED;
1496 }
1497
1498 /* Handle a legacy interrupt
1499 * Acknowledges the interrupt and schedule event queue processing.
1500 */
1501 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1502 {
1503 struct efx_nic *efx = dev_id;
1504 efx_oword_t *int_ker = efx->irq_status.addr;
1505 irqreturn_t result = IRQ_NONE;
1506 struct efx_channel *channel;
1507 efx_dword_t reg;
1508 u32 queues;
1509 int syserr;
1510
1511 /* Could this be ours? If interrupts are disabled then the
1512 * channel state may not be valid.
1513 */
1514 if (!efx->legacy_irq_enabled)
1515 return result;
1516
1517 /* Read the ISR which also ACKs the interrupts */
1518 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1519 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1520
1521 /* Handle non-event-queue sources */
1522 if (queues & (1U << efx->irq_level)) {
1523 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1524 if (unlikely(syserr))
1525 return efx_nic_fatal_interrupt(efx);
1526 efx->last_irq_cpu = raw_smp_processor_id();
1527 }
1528
1529 if (queues != 0) {
1530 if (EFX_WORKAROUND_15783(efx))
1531 efx->irq_zero_count = 0;
1532
1533 /* Schedule processing of any interrupting queues */
1534 efx_for_each_channel(channel, efx) {
1535 if (queues & 1)
1536 efx_schedule_channel_irq(channel);
1537 queues >>= 1;
1538 }
1539 result = IRQ_HANDLED;
1540
1541 } else if (EFX_WORKAROUND_15783(efx)) {
1542 efx_qword_t *event;
1543
1544 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1545 * because this might be a shared interrupt. */
1546 if (efx->irq_zero_count++ == 0)
1547 result = IRQ_HANDLED;
1548
1549 /* Ensure we schedule or rearm all event queues */
1550 efx_for_each_channel(channel, efx) {
1551 event = efx_event(channel, channel->eventq_read_ptr);
1552 if (efx_event_present(event))
1553 efx_schedule_channel_irq(channel);
1554 else
1555 efx_nic_eventq_read_ack(channel);
1556 }
1557 }
1558
1559 if (result == IRQ_HANDLED)
1560 netif_vdbg(efx, intr, efx->net_dev,
1561 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1562 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1563
1564 return result;
1565 }
1566
1567 /* Handle an MSI interrupt
1568 *
1569 * Handle an MSI hardware interrupt. This routine schedules event
1570 * queue processing. No interrupt acknowledgement cycle is necessary.
1571 * Also, we never need to check that the interrupt is for us, since
1572 * MSI interrupts cannot be shared.
1573 */
1574 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1575 {
1576 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1577 struct efx_nic *efx = channel->efx;
1578 efx_oword_t *int_ker = efx->irq_status.addr;
1579 int syserr;
1580
1581 netif_vdbg(efx, intr, efx->net_dev,
1582 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1583 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1584
1585 /* Handle non-event-queue sources */
1586 if (channel->channel == efx->irq_level) {
1587 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1588 if (unlikely(syserr))
1589 return efx_nic_fatal_interrupt(efx);
1590 efx->last_irq_cpu = raw_smp_processor_id();
1591 }
1592
1593 /* Schedule processing of the channel */
1594 efx_schedule_channel_irq(channel);
1595
1596 return IRQ_HANDLED;
1597 }
1598
1599
1600 /* Setup RSS indirection table.
1601 * This maps from the hash value of the packet to RXQ
1602 */
1603 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1604 {
1605 size_t i = 0;
1606 efx_dword_t dword;
1607
1608 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1609 return;
1610
1611 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1612 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1613
1614 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1615 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1616 efx->rx_indir_table[i]);
1617 efx_writed(efx, &dword,
1618 FR_BZ_RX_INDIRECTION_TBL +
1619 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1620 }
1621 }
1622
1623 /* Hook interrupt handler(s)
1624 * Try MSI and then legacy interrupts.
1625 */
1626 int efx_nic_init_interrupt(struct efx_nic *efx)
1627 {
1628 struct efx_channel *channel;
1629 int rc;
1630
1631 if (!EFX_INT_MODE_USE_MSI(efx)) {
1632 irq_handler_t handler;
1633 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1634 handler = efx_legacy_interrupt;
1635 else
1636 handler = falcon_legacy_interrupt_a1;
1637
1638 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1639 efx->name, efx);
1640 if (rc) {
1641 netif_err(efx, drv, efx->net_dev,
1642 "failed to hook legacy IRQ %d\n",
1643 efx->pci_dev->irq);
1644 goto fail1;
1645 }
1646 return 0;
1647 }
1648
1649 /* Hook MSI or MSI-X interrupt */
1650 efx_for_each_channel(channel, efx) {
1651 rc = request_irq(channel->irq, efx_msi_interrupt,
1652 IRQF_PROBE_SHARED, /* Not shared */
1653 efx->channel_name[channel->channel],
1654 &efx->channel[channel->channel]);
1655 if (rc) {
1656 netif_err(efx, drv, efx->net_dev,
1657 "failed to hook IRQ %d\n", channel->irq);
1658 goto fail2;
1659 }
1660 }
1661
1662 return 0;
1663
1664 fail2:
1665 efx_for_each_channel(channel, efx)
1666 free_irq(channel->irq, &efx->channel[channel->channel]);
1667 fail1:
1668 return rc;
1669 }
1670
1671 void efx_nic_fini_interrupt(struct efx_nic *efx)
1672 {
1673 struct efx_channel *channel;
1674 efx_oword_t reg;
1675
1676 /* Disable MSI/MSI-X interrupts */
1677 efx_for_each_channel(channel, efx) {
1678 if (channel->irq)
1679 free_irq(channel->irq, &efx->channel[channel->channel]);
1680 }
1681
1682 /* ACK legacy interrupt */
1683 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1684 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1685 else
1686 falcon_irq_ack_a1(efx);
1687
1688 /* Disable legacy interrupt */
1689 if (efx->legacy_irq)
1690 free_irq(efx->legacy_irq, efx);
1691 }
1692
1693 /* Looks at available SRAM resources and works out how many queues we
1694 * can support, and where things like descriptor caches should live.
1695 *
1696 * SRAM is split up as follows:
1697 * 0 buftbl entries for channels
1698 * efx->vf_buftbl_base buftbl entries for SR-IOV
1699 * efx->rx_dc_base RX descriptor caches
1700 * efx->tx_dc_base TX descriptor caches
1701 */
1702 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1703 {
1704 unsigned vi_count, buftbl_min;
1705
1706 /* Account for the buffer table entries backing the datapath channels
1707 * and the descriptor caches for those channels.
1708 */
1709 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1710 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1711 efx->n_channels * EFX_MAX_EVQ_SIZE)
1712 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1713 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1714
1715 #ifdef CONFIG_SFC_SRIOV
1716 if (efx_sriov_wanted(efx)) {
1717 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1718
1719 efx->vf_buftbl_base = buftbl_min;
1720
1721 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1722 vi_count = max(vi_count, EFX_VI_BASE);
1723 buftbl_free = (sram_lim_qw - buftbl_min -
1724 vi_count * vi_dc_entries);
1725
1726 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1727 efx_vf_size(efx));
1728 vf_limit = min(buftbl_free / entries_per_vf,
1729 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1730
1731 if (efx->vf_count > vf_limit) {
1732 netif_err(efx, probe, efx->net_dev,
1733 "Reducing VF count from from %d to %d\n",
1734 efx->vf_count, vf_limit);
1735 efx->vf_count = vf_limit;
1736 }
1737 vi_count += efx->vf_count * efx_vf_size(efx);
1738 }
1739 #endif
1740
1741 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1742 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1743 }
1744
1745 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1746 {
1747 efx_oword_t altera_build;
1748 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1749 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1750 }
1751
1752 void efx_nic_init_common(struct efx_nic *efx)
1753 {
1754 efx_oword_t temp;
1755
1756 /* Set positions of descriptor caches in SRAM. */
1757 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1758 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1759 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1760 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1761
1762 /* Set TX descriptor cache size. */
1763 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1764 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1765 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1766
1767 /* Set RX descriptor cache size. Set low watermark to size-8, as
1768 * this allows most efficient prefetching.
1769 */
1770 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1771 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1772 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1773 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1774 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1775
1776 /* Program INT_KER address */
1777 EFX_POPULATE_OWORD_2(temp,
1778 FRF_AZ_NORM_INT_VEC_DIS_KER,
1779 EFX_INT_MODE_USE_MSI(efx),
1780 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1781 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1782
1783 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1784 /* Use an interrupt level unused by event queues */
1785 efx->irq_level = 0x1f;
1786 else
1787 /* Use a valid MSI-X vector */
1788 efx->irq_level = 0;
1789
1790 /* Enable all the genuinely fatal interrupts. (They are still
1791 * masked by the overall interrupt mask, controlled by
1792 * falcon_interrupts()).
1793 *
1794 * Note: All other fatal interrupts are enabled
1795 */
1796 EFX_POPULATE_OWORD_3(temp,
1797 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1798 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1799 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1800 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1801 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1802 EFX_INVERT_OWORD(temp);
1803 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1804
1805 efx_nic_push_rx_indir_table(efx);
1806
1807 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1808 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1809 */
1810 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1811 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1812 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1813 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1814 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1815 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1816 /* Enable SW_EV to inherit in char driver - assume harmless here */
1817 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1818 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1819 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1820 /* Disable hardware watchdog which can misfire */
1821 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1822 /* Squash TX of packets of 16 bytes or less */
1823 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1824 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1825 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1826
1827 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1828 EFX_POPULATE_OWORD_4(temp,
1829 /* Default values */
1830 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1831 FRF_BZ_TX_PACE_SB_AF, 0xb,
1832 FRF_BZ_TX_PACE_FB_BASE, 0,
1833 /* Allow large pace values in the
1834 * fast bin. */
1835 FRF_BZ_TX_PACE_BIN_TH,
1836 FFE_BZ_TX_PACE_RESERVED);
1837 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1838 }
1839 }
1840
1841 /* Register dump */
1842
1843 #define REGISTER_REVISION_A 1
1844 #define REGISTER_REVISION_B 2
1845 #define REGISTER_REVISION_C 3
1846 #define REGISTER_REVISION_Z 3 /* latest revision */
1847
1848 struct efx_nic_reg {
1849 u32 offset:24;
1850 u32 min_revision:2, max_revision:2;
1851 };
1852
1853 #define REGISTER(name, min_rev, max_rev) { \
1854 FR_ ## min_rev ## max_rev ## _ ## name, \
1855 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1856 }
1857 #define REGISTER_AA(name) REGISTER(name, A, A)
1858 #define REGISTER_AB(name) REGISTER(name, A, B)
1859 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1860 #define REGISTER_BB(name) REGISTER(name, B, B)
1861 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1862 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1863
1864 static const struct efx_nic_reg efx_nic_regs[] = {
1865 REGISTER_AZ(ADR_REGION),
1866 REGISTER_AZ(INT_EN_KER),
1867 REGISTER_BZ(INT_EN_CHAR),
1868 REGISTER_AZ(INT_ADR_KER),
1869 REGISTER_BZ(INT_ADR_CHAR),
1870 /* INT_ACK_KER is WO */
1871 /* INT_ISR0 is RC */
1872 REGISTER_AZ(HW_INIT),
1873 REGISTER_CZ(USR_EV_CFG),
1874 REGISTER_AB(EE_SPI_HCMD),
1875 REGISTER_AB(EE_SPI_HADR),
1876 REGISTER_AB(EE_SPI_HDATA),
1877 REGISTER_AB(EE_BASE_PAGE),
1878 REGISTER_AB(EE_VPD_CFG0),
1879 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1880 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1881 /* PCIE_CORE_INDIRECT is indirect */
1882 REGISTER_AB(NIC_STAT),
1883 REGISTER_AB(GPIO_CTL),
1884 REGISTER_AB(GLB_CTL),
1885 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1886 REGISTER_BZ(DP_CTRL),
1887 REGISTER_AZ(MEM_STAT),
1888 REGISTER_AZ(CS_DEBUG),
1889 REGISTER_AZ(ALTERA_BUILD),
1890 REGISTER_AZ(CSR_SPARE),
1891 REGISTER_AB(PCIE_SD_CTL0123),
1892 REGISTER_AB(PCIE_SD_CTL45),
1893 REGISTER_AB(PCIE_PCS_CTL_STAT),
1894 /* DEBUG_DATA_OUT is not used */
1895 /* DRV_EV is WO */
1896 REGISTER_AZ(EVQ_CTL),
1897 REGISTER_AZ(EVQ_CNT1),
1898 REGISTER_AZ(EVQ_CNT2),
1899 REGISTER_AZ(BUF_TBL_CFG),
1900 REGISTER_AZ(SRM_RX_DC_CFG),
1901 REGISTER_AZ(SRM_TX_DC_CFG),
1902 REGISTER_AZ(SRM_CFG),
1903 /* BUF_TBL_UPD is WO */
1904 REGISTER_AZ(SRM_UPD_EVQ),
1905 REGISTER_AZ(SRAM_PARITY),
1906 REGISTER_AZ(RX_CFG),
1907 REGISTER_BZ(RX_FILTER_CTL),
1908 /* RX_FLUSH_DESCQ is WO */
1909 REGISTER_AZ(RX_DC_CFG),
1910 REGISTER_AZ(RX_DC_PF_WM),
1911 REGISTER_BZ(RX_RSS_TKEY),
1912 /* RX_NODESC_DROP is RC */
1913 REGISTER_AA(RX_SELF_RST),
1914 /* RX_DEBUG, RX_PUSH_DROP are not used */
1915 REGISTER_CZ(RX_RSS_IPV6_REG1),
1916 REGISTER_CZ(RX_RSS_IPV6_REG2),
1917 REGISTER_CZ(RX_RSS_IPV6_REG3),
1918 /* TX_FLUSH_DESCQ is WO */
1919 REGISTER_AZ(TX_DC_CFG),
1920 REGISTER_AA(TX_CHKSM_CFG),
1921 REGISTER_AZ(TX_CFG),
1922 /* TX_PUSH_DROP is not used */
1923 REGISTER_AZ(TX_RESERVED),
1924 REGISTER_BZ(TX_PACE),
1925 /* TX_PACE_DROP_QID is RC */
1926 REGISTER_BB(TX_VLAN),
1927 REGISTER_BZ(TX_IPFIL_PORTEN),
1928 REGISTER_AB(MD_TXD),
1929 REGISTER_AB(MD_RXD),
1930 REGISTER_AB(MD_CS),
1931 REGISTER_AB(MD_PHY_ADR),
1932 REGISTER_AB(MD_ID),
1933 /* MD_STAT is RC */
1934 REGISTER_AB(MAC_STAT_DMA),
1935 REGISTER_AB(MAC_CTRL),
1936 REGISTER_BB(GEN_MODE),
1937 REGISTER_AB(MAC_MC_HASH_REG0),
1938 REGISTER_AB(MAC_MC_HASH_REG1),
1939 REGISTER_AB(GM_CFG1),
1940 REGISTER_AB(GM_CFG2),
1941 /* GM_IPG and GM_HD are not used */
1942 REGISTER_AB(GM_MAX_FLEN),
1943 /* GM_TEST is not used */
1944 REGISTER_AB(GM_ADR1),
1945 REGISTER_AB(GM_ADR2),
1946 REGISTER_AB(GMF_CFG0),
1947 REGISTER_AB(GMF_CFG1),
1948 REGISTER_AB(GMF_CFG2),
1949 REGISTER_AB(GMF_CFG3),
1950 REGISTER_AB(GMF_CFG4),
1951 REGISTER_AB(GMF_CFG5),
1952 REGISTER_BB(TX_SRC_MAC_CTL),
1953 REGISTER_AB(XM_ADR_LO),
1954 REGISTER_AB(XM_ADR_HI),
1955 REGISTER_AB(XM_GLB_CFG),
1956 REGISTER_AB(XM_TX_CFG),
1957 REGISTER_AB(XM_RX_CFG),
1958 REGISTER_AB(XM_MGT_INT_MASK),
1959 REGISTER_AB(XM_FC),
1960 REGISTER_AB(XM_PAUSE_TIME),
1961 REGISTER_AB(XM_TX_PARAM),
1962 REGISTER_AB(XM_RX_PARAM),
1963 /* XM_MGT_INT_MSK (note no 'A') is RC */
1964 REGISTER_AB(XX_PWR_RST),
1965 REGISTER_AB(XX_SD_CTL),
1966 REGISTER_AB(XX_TXDRV_CTL),
1967 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1968 /* XX_CORE_STAT is partly RC */
1969 };
1970
1971 struct efx_nic_reg_table {
1972 u32 offset:24;
1973 u32 min_revision:2, max_revision:2;
1974 u32 step:6, rows:21;
1975 };
1976
1977 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1978 offset, \
1979 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1980 step, rows \
1981 }
1982 #define REGISTER_TABLE(name, min_rev, max_rev) \
1983 REGISTER_TABLE_DIMENSIONS( \
1984 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1985 min_rev, max_rev, \
1986 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1987 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1988 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1989 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1990 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1991 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1992 #define REGISTER_TABLE_BB_CZ(name) \
1993 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1994 FR_BZ_ ## name ## _STEP, \
1995 FR_BB_ ## name ## _ROWS), \
1996 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1997 FR_BZ_ ## name ## _STEP, \
1998 FR_CZ_ ## name ## _ROWS)
1999 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
2000
2001 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
2002 /* DRIVER is not used */
2003 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
2004 REGISTER_TABLE_BB(TX_IPFIL_TBL),
2005 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
2006 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
2007 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
2008 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
2009 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
2010 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
2011 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
2012 /* We can't reasonably read all of the buffer table (up to 8MB!).
2013 * However this driver will only use a few entries. Reading
2014 * 1K entries allows for some expansion of queue count and
2015 * size before we need to change the version. */
2016 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
2017 A, A, 8, 1024),
2018 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
2019 B, Z, 8, 1024),
2020 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
2021 REGISTER_TABLE_BB_CZ(TIMER_TBL),
2022 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
2023 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
2024 /* TX_FILTER_TBL0 is huge and not used by this driver */
2025 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
2026 REGISTER_TABLE_CZ(MC_TREG_SMEM),
2027 /* MSIX_PBA_TABLE is not mapped */
2028 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
2029 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
2030 };
2031
2032 size_t efx_nic_get_regs_len(struct efx_nic *efx)
2033 {
2034 const struct efx_nic_reg *reg;
2035 const struct efx_nic_reg_table *table;
2036 size_t len = 0;
2037
2038 for (reg = efx_nic_regs;
2039 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
2040 reg++)
2041 if (efx->type->revision >= reg->min_revision &&
2042 efx->type->revision <= reg->max_revision)
2043 len += sizeof(efx_oword_t);
2044
2045 for (table = efx_nic_reg_tables;
2046 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
2047 table++)
2048 if (efx->type->revision >= table->min_revision &&
2049 efx->type->revision <= table->max_revision)
2050 len += table->rows * min_t(size_t, table->step, 16);
2051
2052 return len;
2053 }
2054
2055 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2056 {
2057 const struct efx_nic_reg *reg;
2058 const struct efx_nic_reg_table *table;
2059
2060 for (reg = efx_nic_regs;
2061 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
2062 reg++) {
2063 if (efx->type->revision >= reg->min_revision &&
2064 efx->type->revision <= reg->max_revision) {
2065 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
2066 buf += sizeof(efx_oword_t);
2067 }
2068 }
2069
2070 for (table = efx_nic_reg_tables;
2071 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
2072 table++) {
2073 size_t size, i;
2074
2075 if (!(efx->type->revision >= table->min_revision &&
2076 efx->type->revision <= table->max_revision))
2077 continue;
2078
2079 size = min_t(size_t, table->step, 16);
2080
2081 for (i = 0; i < table->rows; i++) {
2082 switch (table->step) {
2083 case 4: /* 32-bit SRAM */
2084 efx_readd(efx, buf, table->offset + 4 * i);
2085 break;
2086 case 8: /* 64-bit SRAM */
2087 efx_sram_readq(efx,
2088 efx->membase + table->offset,
2089 buf, i);
2090 break;
2091 case 16: /* 128-bit-readable register */
2092 efx_reado_table(efx, buf, table->offset, i);
2093 break;
2094 case 32: /* 128-bit register, interleaved */
2095 efx_reado_table(efx, buf, table->offset, 2 * i);
2096 break;
2097 default:
2098 WARN_ON(1);
2099 return;
2100 }
2101 buf += size;
2102 }
2103 }
2104 }