]>
Commit | Line | Data |
---|---|---|
1 | /**************************************************************************** | |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
4 | * Copyright 2006-2011 Solarflare Communications Inc. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/pci.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/seq_file.h> | |
17 | #include "net_driver.h" | |
18 | #include "bitfield.h" | |
19 | #include "efx.h" | |
20 | #include "nic.h" | |
21 | #include "farch_regs.h" | |
22 | #include "io.h" | |
23 | #include "workarounds.h" | |
24 | ||
25 | /* Falcon-architecture (SFC4000 and SFC9000-family) support */ | |
26 | ||
27 | /************************************************************************** | |
28 | * | |
29 | * Configurable values | |
30 | * | |
31 | ************************************************************************** | |
32 | */ | |
33 | ||
34 | /* This is set to 16 for a good reason. In summary, if larger than | |
35 | * 16, the descriptor cache holds more than a default socket | |
36 | * buffer's worth of packets (for UDP we can only have at most one | |
37 | * socket buffer's worth outstanding). This combined with the fact | |
38 | * that we only get 1 TX event per descriptor cache means the NIC | |
39 | * goes idle. | |
40 | */ | |
41 | #define TX_DC_ENTRIES 16 | |
42 | #define TX_DC_ENTRIES_ORDER 1 | |
43 | ||
44 | #define RX_DC_ENTRIES 64 | |
45 | #define RX_DC_ENTRIES_ORDER 3 | |
46 | ||
47 | /* If EFX_MAX_INT_ERRORS internal errors occur within | |
48 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | |
49 | * disable it. | |
50 | */ | |
51 | #define EFX_INT_ERROR_EXPIRE 3600 | |
52 | #define EFX_MAX_INT_ERRORS 5 | |
53 | ||
54 | /* Depth of RX flush request fifo */ | |
55 | #define EFX_RX_FLUSH_COUNT 4 | |
56 | ||
57 | /* Driver generated events */ | |
58 | #define _EFX_CHANNEL_MAGIC_TEST 0x000101 | |
59 | #define _EFX_CHANNEL_MAGIC_FILL 0x000102 | |
60 | #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 | |
61 | #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 | |
62 | ||
63 | #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) | |
64 | #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) | |
65 | ||
66 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | |
67 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) | |
68 | #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ | |
69 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ | |
70 | efx_rx_queue_index(_rx_queue)) | |
71 | #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ | |
72 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ | |
73 | efx_rx_queue_index(_rx_queue)) | |
74 | #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ | |
75 | _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ | |
76 | (_tx_queue)->queue) | |
77 | ||
78 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); | |
79 | ||
80 | /************************************************************************** | |
81 | * | |
82 | * Hardware access | |
83 | * | |
84 | **************************************************************************/ | |
85 | ||
86 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | |
87 | unsigned int index) | |
88 | { | |
89 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | |
90 | value, index); | |
91 | } | |
92 | ||
93 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | |
94 | const efx_oword_t *mask) | |
95 | { | |
96 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | |
97 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | |
98 | } | |
99 | ||
100 | int efx_farch_test_registers(struct efx_nic *efx, | |
101 | const struct efx_farch_register_test *regs, | |
102 | size_t n_regs) | |
103 | { | |
104 | unsigned address = 0, i, j; | |
105 | efx_oword_t mask, imask, original, reg, buf; | |
106 | ||
107 | for (i = 0; i < n_regs; ++i) { | |
108 | address = regs[i].address; | |
109 | mask = imask = regs[i].mask; | |
110 | EFX_INVERT_OWORD(imask); | |
111 | ||
112 | efx_reado(efx, &original, address); | |
113 | ||
114 | /* bit sweep on and off */ | |
115 | for (j = 0; j < 128; j++) { | |
116 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | |
117 | continue; | |
118 | ||
119 | /* Test this testable bit can be set in isolation */ | |
120 | EFX_AND_OWORD(reg, original, mask); | |
121 | EFX_SET_OWORD32(reg, j, j, 1); | |
122 | ||
123 | efx_writeo(efx, ®, address); | |
124 | efx_reado(efx, &buf, address); | |
125 | ||
126 | if (efx_masked_compare_oword(®, &buf, &mask)) | |
127 | goto fail; | |
128 | ||
129 | /* Test this testable bit can be cleared in isolation */ | |
130 | EFX_OR_OWORD(reg, original, mask); | |
131 | EFX_SET_OWORD32(reg, j, j, 0); | |
132 | ||
133 | efx_writeo(efx, ®, address); | |
134 | efx_reado(efx, &buf, address); | |
135 | ||
136 | if (efx_masked_compare_oword(®, &buf, &mask)) | |
137 | goto fail; | |
138 | } | |
139 | ||
140 | efx_writeo(efx, &original, address); | |
141 | } | |
142 | ||
143 | return 0; | |
144 | ||
145 | fail: | |
146 | netif_err(efx, hw, efx->net_dev, | |
147 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | |
148 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | |
149 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | |
150 | return -EIO; | |
151 | } | |
152 | ||
153 | /************************************************************************** | |
154 | * | |
155 | * Special buffer handling | |
156 | * Special buffers are used for event queues and the TX and RX | |
157 | * descriptor rings. | |
158 | * | |
159 | *************************************************************************/ | |
160 | ||
161 | /* | |
162 | * Initialise a special buffer | |
163 | * | |
164 | * This will define a buffer (previously allocated via | |
165 | * efx_alloc_special_buffer()) in the buffer table, allowing | |
166 | * it to be used for event queues, descriptor rings etc. | |
167 | */ | |
168 | static void | |
169 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |
170 | { | |
171 | efx_qword_t buf_desc; | |
172 | unsigned int index; | |
173 | dma_addr_t dma_addr; | |
174 | int i; | |
175 | ||
176 | EFX_BUG_ON_PARANOID(!buffer->buf.addr); | |
177 | ||
178 | /* Write buffer descriptors to NIC */ | |
179 | for (i = 0; i < buffer->entries; i++) { | |
180 | index = buffer->index + i; | |
181 | dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); | |
182 | netif_dbg(efx, probe, efx->net_dev, | |
183 | "mapping special buffer %d at %llx\n", | |
184 | index, (unsigned long long)dma_addr); | |
185 | EFX_POPULATE_QWORD_3(buf_desc, | |
186 | FRF_AZ_BUF_ADR_REGION, 0, | |
187 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | |
188 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | |
189 | efx_write_buf_tbl(efx, &buf_desc, index); | |
190 | } | |
191 | } | |
192 | ||
193 | /* Unmaps a buffer and clears the buffer table entries */ | |
194 | static void | |
195 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |
196 | { | |
197 | efx_oword_t buf_tbl_upd; | |
198 | unsigned int start = buffer->index; | |
199 | unsigned int end = (buffer->index + buffer->entries - 1); | |
200 | ||
201 | if (!buffer->entries) | |
202 | return; | |
203 | ||
204 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", | |
205 | buffer->index, buffer->index + buffer->entries - 1); | |
206 | ||
207 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | |
208 | FRF_AZ_BUF_UPD_CMD, 0, | |
209 | FRF_AZ_BUF_CLR_CMD, 1, | |
210 | FRF_AZ_BUF_CLR_END_ID, end, | |
211 | FRF_AZ_BUF_CLR_START_ID, start); | |
212 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | |
213 | } | |
214 | ||
215 | /* | |
216 | * Allocate a new special buffer | |
217 | * | |
218 | * This allocates memory for a new buffer, clears it and allocates a | |
219 | * new buffer ID range. It does not write into the buffer table. | |
220 | * | |
221 | * This call will allocate 4KB buffers, since 8KB buffers can't be | |
222 | * used for event queues and descriptor rings. | |
223 | */ | |
224 | static int efx_alloc_special_buffer(struct efx_nic *efx, | |
225 | struct efx_special_buffer *buffer, | |
226 | unsigned int len) | |
227 | { | |
228 | len = ALIGN(len, EFX_BUF_SIZE); | |
229 | ||
230 | if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) | |
231 | return -ENOMEM; | |
232 | buffer->entries = len / EFX_BUF_SIZE; | |
233 | BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); | |
234 | ||
235 | /* Select new buffer ID */ | |
236 | buffer->index = efx->next_buffer_table; | |
237 | efx->next_buffer_table += buffer->entries; | |
238 | #ifdef CONFIG_SFC_SRIOV | |
239 | BUG_ON(efx_sriov_enabled(efx) && | |
240 | efx->vf_buftbl_base < efx->next_buffer_table); | |
241 | #endif | |
242 | ||
243 | netif_dbg(efx, probe, efx->net_dev, | |
244 | "allocating special buffers %d-%d at %llx+%x " | |
245 | "(virt %p phys %llx)\n", buffer->index, | |
246 | buffer->index + buffer->entries - 1, | |
247 | (u64)buffer->buf.dma_addr, len, | |
248 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | static void | |
254 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | |
255 | { | |
256 | if (!buffer->buf.addr) | |
257 | return; | |
258 | ||
259 | netif_dbg(efx, hw, efx->net_dev, | |
260 | "deallocating special buffers %d-%d at %llx+%x " | |
261 | "(virt %p phys %llx)\n", buffer->index, | |
262 | buffer->index + buffer->entries - 1, | |
263 | (u64)buffer->buf.dma_addr, buffer->buf.len, | |
264 | buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); | |
265 | ||
266 | efx_nic_free_buffer(efx, &buffer->buf); | |
267 | buffer->entries = 0; | |
268 | } | |
269 | ||
270 | /************************************************************************** | |
271 | * | |
272 | * TX path | |
273 | * | |
274 | **************************************************************************/ | |
275 | ||
276 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | |
277 | static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) | |
278 | { | |
279 | unsigned write_ptr; | |
280 | efx_dword_t reg; | |
281 | ||
282 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
283 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | |
284 | efx_writed_page(tx_queue->efx, ®, | |
285 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | |
286 | } | |
287 | ||
288 | /* Write pointer and first descriptor for TX descriptor ring */ | |
289 | static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, | |
290 | const efx_qword_t *txd) | |
291 | { | |
292 | unsigned write_ptr; | |
293 | efx_oword_t reg; | |
294 | ||
295 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | |
296 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | |
297 | ||
298 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
299 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | |
300 | FRF_AZ_TX_DESC_WPTR, write_ptr); | |
301 | reg.qword[0] = *txd; | |
302 | efx_writeo_page(tx_queue->efx, ®, | |
303 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | |
304 | } | |
305 | ||
306 | ||
307 | /* For each entry inserted into the software descriptor ring, create a | |
308 | * descriptor in the hardware TX descriptor ring (in host memory), and | |
309 | * write a doorbell. | |
310 | */ | |
311 | void efx_farch_tx_write(struct efx_tx_queue *tx_queue) | |
312 | { | |
313 | ||
314 | struct efx_tx_buffer *buffer; | |
315 | efx_qword_t *txd; | |
316 | unsigned write_ptr; | |
317 | unsigned old_write_count = tx_queue->write_count; | |
318 | ||
319 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | |
320 | ||
321 | do { | |
322 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | |
323 | buffer = &tx_queue->buffer[write_ptr]; | |
324 | txd = efx_tx_desc(tx_queue, write_ptr); | |
325 | ++tx_queue->write_count; | |
326 | ||
327 | /* Create TX descriptor ring entry */ | |
328 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); | |
329 | EFX_POPULATE_QWORD_4(*txd, | |
330 | FSF_AZ_TX_KER_CONT, | |
331 | buffer->flags & EFX_TX_BUF_CONT, | |
332 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | |
333 | FSF_AZ_TX_KER_BUF_REGION, 0, | |
334 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | |
335 | } while (tx_queue->write_count != tx_queue->insert_count); | |
336 | ||
337 | wmb(); /* Ensure descriptors are written before they are fetched */ | |
338 | ||
339 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { | |
340 | txd = efx_tx_desc(tx_queue, | |
341 | old_write_count & tx_queue->ptr_mask); | |
342 | efx_farch_push_tx_desc(tx_queue, txd); | |
343 | ++tx_queue->pushes; | |
344 | } else { | |
345 | efx_farch_notify_tx_desc(tx_queue); | |
346 | } | |
347 | } | |
348 | ||
349 | /* Allocate hardware resources for a TX queue */ | |
350 | int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) | |
351 | { | |
352 | struct efx_nic *efx = tx_queue->efx; | |
353 | unsigned entries; | |
354 | ||
355 | entries = tx_queue->ptr_mask + 1; | |
356 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | |
357 | entries * sizeof(efx_qword_t)); | |
358 | } | |
359 | ||
360 | void efx_farch_tx_init(struct efx_tx_queue *tx_queue) | |
361 | { | |
362 | struct efx_nic *efx = tx_queue->efx; | |
363 | efx_oword_t reg; | |
364 | ||
365 | /* Pin TX descriptor ring */ | |
366 | efx_init_special_buffer(efx, &tx_queue->txd); | |
367 | ||
368 | /* Push TX descriptor ring to card */ | |
369 | EFX_POPULATE_OWORD_10(reg, | |
370 | FRF_AZ_TX_DESCQ_EN, 1, | |
371 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | |
372 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | |
373 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | |
374 | FRF_AZ_TX_DESCQ_EVQ_ID, | |
375 | tx_queue->channel->channel, | |
376 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | |
377 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | |
378 | FRF_AZ_TX_DESCQ_SIZE, | |
379 | __ffs(tx_queue->txd.entries), | |
380 | FRF_AZ_TX_DESCQ_TYPE, 0, | |
381 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | |
382 | ||
383 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | |
384 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | |
385 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | |
386 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, | |
387 | !csum); | |
388 | } | |
389 | ||
390 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, | |
391 | tx_queue->queue); | |
392 | ||
393 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | |
394 | /* Only 128 bits in this register */ | |
395 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | |
396 | ||
397 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | |
398 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | |
399 | __clear_bit_le(tx_queue->queue, ®); | |
400 | else | |
401 | __set_bit_le(tx_queue->queue, ®); | |
402 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | |
403 | } | |
404 | ||
405 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | |
406 | EFX_POPULATE_OWORD_1(reg, | |
407 | FRF_BZ_TX_PACE, | |
408 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | |
409 | FFE_BZ_TX_PACE_OFF : | |
410 | FFE_BZ_TX_PACE_RESERVED); | |
411 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, | |
412 | tx_queue->queue); | |
413 | } | |
414 | } | |
415 | ||
416 | static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) | |
417 | { | |
418 | struct efx_nic *efx = tx_queue->efx; | |
419 | efx_oword_t tx_flush_descq; | |
420 | ||
421 | WARN_ON(atomic_read(&tx_queue->flush_outstanding)); | |
422 | atomic_set(&tx_queue->flush_outstanding, 1); | |
423 | ||
424 | EFX_POPULATE_OWORD_2(tx_flush_descq, | |
425 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | |
426 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | |
427 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | |
428 | } | |
429 | ||
430 | void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) | |
431 | { | |
432 | struct efx_nic *efx = tx_queue->efx; | |
433 | efx_oword_t tx_desc_ptr; | |
434 | ||
435 | /* Remove TX descriptor ring from card */ | |
436 | EFX_ZERO_OWORD(tx_desc_ptr); | |
437 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | |
438 | tx_queue->queue); | |
439 | ||
440 | /* Unpin TX descriptor ring */ | |
441 | efx_fini_special_buffer(efx, &tx_queue->txd); | |
442 | } | |
443 | ||
444 | /* Free buffers backing TX queue */ | |
445 | void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) | |
446 | { | |
447 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | |
448 | } | |
449 | ||
450 | /************************************************************************** | |
451 | * | |
452 | * RX path | |
453 | * | |
454 | **************************************************************************/ | |
455 | ||
456 | /* This creates an entry in the RX descriptor queue */ | |
457 | static inline void | |
458 | efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | |
459 | { | |
460 | struct efx_rx_buffer *rx_buf; | |
461 | efx_qword_t *rxd; | |
462 | ||
463 | rxd = efx_rx_desc(rx_queue, index); | |
464 | rx_buf = efx_rx_buffer(rx_queue, index); | |
465 | EFX_POPULATE_QWORD_3(*rxd, | |
466 | FSF_AZ_RX_KER_BUF_SIZE, | |
467 | rx_buf->len - | |
468 | rx_queue->efx->type->rx_buffer_padding, | |
469 | FSF_AZ_RX_KER_BUF_REGION, 0, | |
470 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | |
471 | } | |
472 | ||
473 | /* This writes to the RX_DESC_WPTR register for the specified receive | |
474 | * descriptor ring. | |
475 | */ | |
476 | void efx_farch_rx_write(struct efx_rx_queue *rx_queue) | |
477 | { | |
478 | struct efx_nic *efx = rx_queue->efx; | |
479 | efx_dword_t reg; | |
480 | unsigned write_ptr; | |
481 | ||
482 | while (rx_queue->notified_count != rx_queue->added_count) { | |
483 | efx_farch_build_rx_desc( | |
484 | rx_queue, | |
485 | rx_queue->notified_count & rx_queue->ptr_mask); | |
486 | ++rx_queue->notified_count; | |
487 | } | |
488 | ||
489 | wmb(); | |
490 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; | |
491 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | |
492 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | |
493 | efx_rx_queue_index(rx_queue)); | |
494 | } | |
495 | ||
496 | int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) | |
497 | { | |
498 | struct efx_nic *efx = rx_queue->efx; | |
499 | unsigned entries; | |
500 | ||
501 | entries = rx_queue->ptr_mask + 1; | |
502 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | |
503 | entries * sizeof(efx_qword_t)); | |
504 | } | |
505 | ||
506 | void efx_farch_rx_init(struct efx_rx_queue *rx_queue) | |
507 | { | |
508 | efx_oword_t rx_desc_ptr; | |
509 | struct efx_nic *efx = rx_queue->efx; | |
510 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | |
511 | bool iscsi_digest_en = is_b0; | |
512 | bool jumbo_en; | |
513 | ||
514 | /* For kernel-mode queues in Falcon A1, the JUMBO flag enables | |
515 | * DMA to continue after a PCIe page boundary (and scattering | |
516 | * is not possible). In Falcon B0 and Siena, it enables | |
517 | * scatter. | |
518 | */ | |
519 | jumbo_en = !is_b0 || efx->rx_scatter; | |
520 | ||
521 | netif_dbg(efx, hw, efx->net_dev, | |
522 | "RX queue %d ring in special buffers %d-%d\n", | |
523 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, | |
524 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | |
525 | ||
526 | rx_queue->scatter_n = 0; | |
527 | ||
528 | /* Pin RX descriptor ring */ | |
529 | efx_init_special_buffer(efx, &rx_queue->rxd); | |
530 | ||
531 | /* Push RX descriptor ring to card */ | |
532 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | |
533 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | |
534 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | |
535 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | |
536 | FRF_AZ_RX_DESCQ_EVQ_ID, | |
537 | efx_rx_queue_channel(rx_queue)->channel, | |
538 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | |
539 | FRF_AZ_RX_DESCQ_LABEL, | |
540 | efx_rx_queue_index(rx_queue), | |
541 | FRF_AZ_RX_DESCQ_SIZE, | |
542 | __ffs(rx_queue->rxd.entries), | |
543 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | |
544 | FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, | |
545 | FRF_AZ_RX_DESCQ_EN, 1); | |
546 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | |
547 | efx_rx_queue_index(rx_queue)); | |
548 | } | |
549 | ||
550 | static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) | |
551 | { | |
552 | struct efx_nic *efx = rx_queue->efx; | |
553 | efx_oword_t rx_flush_descq; | |
554 | ||
555 | EFX_POPULATE_OWORD_2(rx_flush_descq, | |
556 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | |
557 | FRF_AZ_RX_FLUSH_DESCQ, | |
558 | efx_rx_queue_index(rx_queue)); | |
559 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | |
560 | } | |
561 | ||
562 | void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) | |
563 | { | |
564 | efx_oword_t rx_desc_ptr; | |
565 | struct efx_nic *efx = rx_queue->efx; | |
566 | ||
567 | /* Remove RX descriptor ring from card */ | |
568 | EFX_ZERO_OWORD(rx_desc_ptr); | |
569 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | |
570 | efx_rx_queue_index(rx_queue)); | |
571 | ||
572 | /* Unpin RX descriptor ring */ | |
573 | efx_fini_special_buffer(efx, &rx_queue->rxd); | |
574 | } | |
575 | ||
576 | /* Free buffers backing RX queue */ | |
577 | void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) | |
578 | { | |
579 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | |
580 | } | |
581 | ||
582 | /************************************************************************** | |
583 | * | |
584 | * Flush handling | |
585 | * | |
586 | **************************************************************************/ | |
587 | ||
588 | /* efx_farch_flush_queues() must be woken up when all flushes are completed, | |
589 | * or more RX flushes can be kicked off. | |
590 | */ | |
591 | static bool efx_farch_flush_wake(struct efx_nic *efx) | |
592 | { | |
593 | /* Ensure that all updates are visible to efx_farch_flush_queues() */ | |
594 | smp_mb(); | |
595 | ||
596 | return (atomic_read(&efx->drain_pending) == 0 || | |
597 | (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT | |
598 | && atomic_read(&efx->rxq_flush_pending) > 0)); | |
599 | } | |
600 | ||
601 | static bool efx_check_tx_flush_complete(struct efx_nic *efx) | |
602 | { | |
603 | bool i = true; | |
604 | efx_oword_t txd_ptr_tbl; | |
605 | struct efx_channel *channel; | |
606 | struct efx_tx_queue *tx_queue; | |
607 | ||
608 | efx_for_each_channel(channel, efx) { | |
609 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
610 | efx_reado_table(efx, &txd_ptr_tbl, | |
611 | FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); | |
612 | if (EFX_OWORD_FIELD(txd_ptr_tbl, | |
613 | FRF_AZ_TX_DESCQ_FLUSH) || | |
614 | EFX_OWORD_FIELD(txd_ptr_tbl, | |
615 | FRF_AZ_TX_DESCQ_EN)) { | |
616 | netif_dbg(efx, hw, efx->net_dev, | |
617 | "flush did not complete on TXQ %d\n", | |
618 | tx_queue->queue); | |
619 | i = false; | |
620 | } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, | |
621 | 1, 0)) { | |
622 | /* The flush is complete, but we didn't | |
623 | * receive a flush completion event | |
624 | */ | |
625 | netif_dbg(efx, hw, efx->net_dev, | |
626 | "flush complete on TXQ %d, so drain " | |
627 | "the queue\n", tx_queue->queue); | |
628 | /* Don't need to increment drain_pending as it | |
629 | * has already been incremented for the queues | |
630 | * which did not drain | |
631 | */ | |
632 | efx_farch_magic_event(channel, | |
633 | EFX_CHANNEL_MAGIC_TX_DRAIN( | |
634 | tx_queue)); | |
635 | } | |
636 | } | |
637 | } | |
638 | ||
639 | return i; | |
640 | } | |
641 | ||
642 | /* Flush all the transmit queues, and continue flushing receive queues until | |
643 | * they're all flushed. Wait for the DRAIN events to be recieved so that there | |
644 | * are no more RX and TX events left on any channel. */ | |
645 | static int efx_farch_do_flush(struct efx_nic *efx) | |
646 | { | |
647 | unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ | |
648 | struct efx_channel *channel; | |
649 | struct efx_rx_queue *rx_queue; | |
650 | struct efx_tx_queue *tx_queue; | |
651 | int rc = 0; | |
652 | ||
653 | efx_for_each_channel(channel, efx) { | |
654 | efx_for_each_channel_tx_queue(tx_queue, channel) { | |
655 | atomic_inc(&efx->drain_pending); | |
656 | efx_farch_flush_tx_queue(tx_queue); | |
657 | } | |
658 | efx_for_each_channel_rx_queue(rx_queue, channel) { | |
659 | atomic_inc(&efx->drain_pending); | |
660 | rx_queue->flush_pending = true; | |
661 | atomic_inc(&efx->rxq_flush_pending); | |
662 | } | |
663 | } | |
664 | ||
665 | while (timeout && atomic_read(&efx->drain_pending) > 0) { | |
666 | /* If SRIOV is enabled, then offload receive queue flushing to | |
667 | * the firmware (though we will still have to poll for | |
668 | * completion). If that fails, fall back to the old scheme. | |
669 | */ | |
670 | if (efx_sriov_enabled(efx)) { | |
671 | rc = efx_mcdi_flush_rxqs(efx); | |
672 | if (!rc) | |
673 | goto wait; | |
674 | } | |
675 | ||
676 | /* The hardware supports four concurrent rx flushes, each of | |
677 | * which may need to be retried if there is an outstanding | |
678 | * descriptor fetch | |
679 | */ | |
680 | efx_for_each_channel(channel, efx) { | |
681 | efx_for_each_channel_rx_queue(rx_queue, channel) { | |
682 | if (atomic_read(&efx->rxq_flush_outstanding) >= | |
683 | EFX_RX_FLUSH_COUNT) | |
684 | break; | |
685 | ||
686 | if (rx_queue->flush_pending) { | |
687 | rx_queue->flush_pending = false; | |
688 | atomic_dec(&efx->rxq_flush_pending); | |
689 | atomic_inc(&efx->rxq_flush_outstanding); | |
690 | efx_farch_flush_rx_queue(rx_queue); | |
691 | } | |
692 | } | |
693 | } | |
694 | ||
695 | wait: | |
696 | timeout = wait_event_timeout(efx->flush_wq, | |
697 | efx_farch_flush_wake(efx), | |
698 | timeout); | |
699 | } | |
700 | ||
701 | if (atomic_read(&efx->drain_pending) && | |
702 | !efx_check_tx_flush_complete(efx)) { | |
703 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " | |
704 | "(rx %d+%d)\n", atomic_read(&efx->drain_pending), | |
705 | atomic_read(&efx->rxq_flush_outstanding), | |
706 | atomic_read(&efx->rxq_flush_pending)); | |
707 | rc = -ETIMEDOUT; | |
708 | ||
709 | atomic_set(&efx->drain_pending, 0); | |
710 | atomic_set(&efx->rxq_flush_pending, 0); | |
711 | atomic_set(&efx->rxq_flush_outstanding, 0); | |
712 | } | |
713 | ||
714 | return rc; | |
715 | } | |
716 | ||
717 | int efx_farch_fini_dmaq(struct efx_nic *efx) | |
718 | { | |
719 | struct efx_channel *channel; | |
720 | struct efx_tx_queue *tx_queue; | |
721 | struct efx_rx_queue *rx_queue; | |
722 | int rc = 0; | |
723 | ||
724 | /* Do not attempt to write to the NIC during EEH recovery */ | |
725 | if (efx->state != STATE_RECOVERY) { | |
726 | /* Only perform flush if DMA is enabled */ | |
727 | if (efx->pci_dev->is_busmaster) { | |
728 | efx->type->prepare_flush(efx); | |
729 | rc = efx_farch_do_flush(efx); | |
730 | efx->type->finish_flush(efx); | |
731 | } | |
732 | ||
733 | efx_for_each_channel(channel, efx) { | |
734 | efx_for_each_channel_rx_queue(rx_queue, channel) | |
735 | efx_farch_rx_fini(rx_queue); | |
736 | efx_for_each_channel_tx_queue(tx_queue, channel) | |
737 | efx_farch_tx_fini(tx_queue); | |
738 | } | |
739 | } | |
740 | ||
741 | return rc; | |
742 | } | |
743 | ||
744 | /************************************************************************** | |
745 | * | |
746 | * Event queue processing | |
747 | * Event queues are processed by per-channel tasklets. | |
748 | * | |
749 | **************************************************************************/ | |
750 | ||
751 | /* Update a channel's event queue's read pointer (RPTR) register | |
752 | * | |
753 | * This writes the EVQ_RPTR_REG register for the specified channel's | |
754 | * event queue. | |
755 | */ | |
756 | void efx_farch_ev_read_ack(struct efx_channel *channel) | |
757 | { | |
758 | efx_dword_t reg; | |
759 | struct efx_nic *efx = channel->efx; | |
760 | ||
761 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, | |
762 | channel->eventq_read_ptr & channel->eventq_mask); | |
763 | ||
764 | /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size | |
765 | * of 4 bytes, but it is really 16 bytes just like later revisions. | |
766 | */ | |
767 | efx_writed(efx, ®, | |
768 | efx->type->evq_rptr_tbl_base + | |
769 | FR_BZ_EVQ_RPTR_STEP * channel->channel); | |
770 | } | |
771 | ||
772 | /* Use HW to insert a SW defined event */ | |
773 | void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, | |
774 | efx_qword_t *event) | |
775 | { | |
776 | efx_oword_t drv_ev_reg; | |
777 | ||
778 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | |
779 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | |
780 | drv_ev_reg.u32[0] = event->u32[0]; | |
781 | drv_ev_reg.u32[1] = event->u32[1]; | |
782 | drv_ev_reg.u32[2] = 0; | |
783 | drv_ev_reg.u32[3] = 0; | |
784 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); | |
785 | efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); | |
786 | } | |
787 | ||
788 | static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) | |
789 | { | |
790 | efx_qword_t event; | |
791 | ||
792 | EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, | |
793 | FSE_AZ_EV_CODE_DRV_GEN_EV, | |
794 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | |
795 | efx_farch_generate_event(channel->efx, channel->channel, &event); | |
796 | } | |
797 | ||
798 | /* Handle a transmit completion event | |
799 | * | |
800 | * The NIC batches TX completion events; the message we receive is of | |
801 | * the form "complete all TX events up to this index". | |
802 | */ | |
803 | static int | |
804 | efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |
805 | { | |
806 | unsigned int tx_ev_desc_ptr; | |
807 | unsigned int tx_ev_q_label; | |
808 | struct efx_tx_queue *tx_queue; | |
809 | struct efx_nic *efx = channel->efx; | |
810 | int tx_packets = 0; | |
811 | ||
812 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
813 | return 0; | |
814 | ||
815 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | |
816 | /* Transmit completion */ | |
817 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | |
818 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | |
819 | tx_queue = efx_channel_get_tx_queue( | |
820 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | |
821 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | |
822 | tx_queue->ptr_mask); | |
823 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | |
824 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | |
825 | /* Rewrite the FIFO write pointer */ | |
826 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | |
827 | tx_queue = efx_channel_get_tx_queue( | |
828 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | |
829 | ||
830 | netif_tx_lock(efx->net_dev); | |
831 | efx_farch_notify_tx_desc(tx_queue); | |
832 | netif_tx_unlock(efx->net_dev); | |
833 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { | |
834 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | |
835 | } else { | |
836 | netif_err(efx, tx_err, efx->net_dev, | |
837 | "channel %d unexpected TX event " | |
838 | EFX_QWORD_FMT"\n", channel->channel, | |
839 | EFX_QWORD_VAL(*event)); | |
840 | } | |
841 | ||
842 | return tx_packets; | |
843 | } | |
844 | ||
845 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | |
846 | static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |
847 | const efx_qword_t *event) | |
848 | { | |
849 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
850 | struct efx_nic *efx = rx_queue->efx; | |
851 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | |
852 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | |
853 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | |
854 | bool rx_ev_other_err, rx_ev_pause_frm; | |
855 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | |
856 | unsigned rx_ev_pkt_type; | |
857 | ||
858 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | |
859 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | |
860 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | |
861 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | |
862 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | |
863 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | |
864 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | |
865 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | |
866 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | |
867 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | |
868 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | |
869 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | |
870 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | |
871 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | |
872 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | |
873 | ||
874 | /* Every error apart from tobe_disc and pause_frm */ | |
875 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | |
876 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | |
877 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | |
878 | ||
879 | /* Count errors that are not in MAC stats. Ignore expected | |
880 | * checksum errors during self-test. */ | |
881 | if (rx_ev_frm_trunc) | |
882 | ++channel->n_rx_frm_trunc; | |
883 | else if (rx_ev_tobe_disc) | |
884 | ++channel->n_rx_tobe_disc; | |
885 | else if (!efx->loopback_selftest) { | |
886 | if (rx_ev_ip_hdr_chksum_err) | |
887 | ++channel->n_rx_ip_hdr_chksum_err; | |
888 | else if (rx_ev_tcp_udp_chksum_err) | |
889 | ++channel->n_rx_tcp_udp_chksum_err; | |
890 | } | |
891 | ||
892 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | |
893 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | |
894 | * to a FIFO overflow. | |
895 | */ | |
896 | #ifdef DEBUG | |
897 | if (rx_ev_other_err && net_ratelimit()) { | |
898 | netif_dbg(efx, rx_err, efx->net_dev, | |
899 | " RX queue %d unexpected RX event " | |
900 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | |
901 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), | |
902 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | |
903 | rx_ev_ip_hdr_chksum_err ? | |
904 | " [IP_HDR_CHKSUM_ERR]" : "", | |
905 | rx_ev_tcp_udp_chksum_err ? | |
906 | " [TCP_UDP_CHKSUM_ERR]" : "", | |
907 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | |
908 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | |
909 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | |
910 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | |
911 | rx_ev_pause_frm ? " [PAUSE]" : ""); | |
912 | } | |
913 | #endif | |
914 | ||
915 | /* The frame must be discarded if any of these are true. */ | |
916 | return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | |
917 | rx_ev_tobe_disc | rx_ev_pause_frm) ? | |
918 | EFX_RX_PKT_DISCARD : 0; | |
919 | } | |
920 | ||
921 | /* Handle receive events that are not in-order. Return true if this | |
922 | * can be handled as a partial packet discard, false if it's more | |
923 | * serious. | |
924 | */ | |
925 | static bool | |
926 | efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | |
927 | { | |
928 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | |
929 | struct efx_nic *efx = rx_queue->efx; | |
930 | unsigned expected, dropped; | |
931 | ||
932 | if (rx_queue->scatter_n && | |
933 | index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & | |
934 | rx_queue->ptr_mask)) { | |
935 | ++channel->n_rx_nodesc_trunc; | |
936 | return true; | |
937 | } | |
938 | ||
939 | expected = rx_queue->removed_count & rx_queue->ptr_mask; | |
940 | dropped = (index - expected) & rx_queue->ptr_mask; | |
941 | netif_info(efx, rx_err, efx->net_dev, | |
942 | "dropped %d events (index=%d expected=%d)\n", | |
943 | dropped, index, expected); | |
944 | ||
945 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | |
946 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | |
947 | return false; | |
948 | } | |
949 | ||
950 | /* Handle a packet received event | |
951 | * | |
952 | * The NIC gives a "discard" flag if it's a unicast packet with the | |
953 | * wrong destination address | |
954 | * Also "is multicast" and "matches multicast filter" flags can be used to | |
955 | * discard non-matching multicast packets. | |
956 | */ | |
957 | static void | |
958 | efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |
959 | { | |
960 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | |
961 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | |
962 | unsigned expected_ptr; | |
963 | bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; | |
964 | u16 flags; | |
965 | struct efx_rx_queue *rx_queue; | |
966 | struct efx_nic *efx = channel->efx; | |
967 | ||
968 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | |
969 | return; | |
970 | ||
971 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | |
972 | rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); | |
973 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | |
974 | channel->channel); | |
975 | ||
976 | rx_queue = efx_channel_get_rx_queue(channel); | |
977 | ||
978 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | |
979 | expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & | |
980 | rx_queue->ptr_mask); | |
981 | ||
982 | /* Check for partial drops and other errors */ | |
983 | if (unlikely(rx_ev_desc_ptr != expected_ptr) || | |
984 | unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { | |
985 | if (rx_ev_desc_ptr != expected_ptr && | |
986 | !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) | |
987 | return; | |
988 | ||
989 | /* Discard all pending fragments */ | |
990 | if (rx_queue->scatter_n) { | |
991 | efx_rx_packet( | |
992 | rx_queue, | |
993 | rx_queue->removed_count & rx_queue->ptr_mask, | |
994 | rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); | |
995 | rx_queue->removed_count += rx_queue->scatter_n; | |
996 | rx_queue->scatter_n = 0; | |
997 | } | |
998 | ||
999 | /* Return if there is no new fragment */ | |
1000 | if (rx_ev_desc_ptr != expected_ptr) | |
1001 | return; | |
1002 | ||
1003 | /* Discard new fragment if not SOP */ | |
1004 | if (!rx_ev_sop) { | |
1005 | efx_rx_packet( | |
1006 | rx_queue, | |
1007 | rx_queue->removed_count & rx_queue->ptr_mask, | |
1008 | 1, 0, EFX_RX_PKT_DISCARD); | |
1009 | ++rx_queue->removed_count; | |
1010 | return; | |
1011 | } | |
1012 | } | |
1013 | ||
1014 | ++rx_queue->scatter_n; | |
1015 | if (rx_ev_cont) | |
1016 | return; | |
1017 | ||
1018 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | |
1019 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | |
1020 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | |
1021 | ||
1022 | if (likely(rx_ev_pkt_ok)) { | |
1023 | /* If packet is marked as OK then we can rely on the | |
1024 | * hardware checksum and classification. | |
1025 | */ | |
1026 | flags = 0; | |
1027 | switch (rx_ev_hdr_type) { | |
1028 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: | |
1029 | flags |= EFX_RX_PKT_TCP; | |
1030 | /* fall through */ | |
1031 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: | |
1032 | flags |= EFX_RX_PKT_CSUMMED; | |
1033 | /* fall through */ | |
1034 | case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: | |
1035 | case FSE_AZ_RX_EV_HDR_TYPE_OTHER: | |
1036 | break; | |
1037 | } | |
1038 | } else { | |
1039 | flags = efx_farch_handle_rx_not_ok(rx_queue, event); | |
1040 | } | |
1041 | ||
1042 | /* Detect multicast packets that didn't match the filter */ | |
1043 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | |
1044 | if (rx_ev_mcast_pkt) { | |
1045 | unsigned int rx_ev_mcast_hash_match = | |
1046 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | |
1047 | ||
1048 | if (unlikely(!rx_ev_mcast_hash_match)) { | |
1049 | ++channel->n_rx_mcast_mismatch; | |
1050 | flags |= EFX_RX_PKT_DISCARD; | |
1051 | } | |
1052 | } | |
1053 | ||
1054 | channel->irq_mod_score += 2; | |
1055 | ||
1056 | /* Handle received packet */ | |
1057 | efx_rx_packet(rx_queue, | |
1058 | rx_queue->removed_count & rx_queue->ptr_mask, | |
1059 | rx_queue->scatter_n, rx_ev_byte_cnt, flags); | |
1060 | rx_queue->removed_count += rx_queue->scatter_n; | |
1061 | rx_queue->scatter_n = 0; | |
1062 | } | |
1063 | ||
1064 | /* If this flush done event corresponds to a &struct efx_tx_queue, then | |
1065 | * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue | |
1066 | * of all transmit completions. | |
1067 | */ | |
1068 | static void | |
1069 | efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) | |
1070 | { | |
1071 | struct efx_tx_queue *tx_queue; | |
1072 | int qid; | |
1073 | ||
1074 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | |
1075 | if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { | |
1076 | tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, | |
1077 | qid % EFX_TXQ_TYPES); | |
1078 | if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { | |
1079 | efx_farch_magic_event(tx_queue->channel, | |
1080 | EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); | |
1081 | } | |
1082 | } | |
1083 | } | |
1084 | ||
1085 | /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush | |
1086 | * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add | |
1087 | * the RX queue back to the mask of RX queues in need of flushing. | |
1088 | */ | |
1089 | static void | |
1090 | efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) | |
1091 | { | |
1092 | struct efx_channel *channel; | |
1093 | struct efx_rx_queue *rx_queue; | |
1094 | int qid; | |
1095 | bool failed; | |
1096 | ||
1097 | qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | |
1098 | failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | |
1099 | if (qid >= efx->n_channels) | |
1100 | return; | |
1101 | channel = efx_get_channel(efx, qid); | |
1102 | if (!efx_channel_has_rx_queue(channel)) | |
1103 | return; | |
1104 | rx_queue = efx_channel_get_rx_queue(channel); | |
1105 | ||
1106 | if (failed) { | |
1107 | netif_info(efx, hw, efx->net_dev, | |
1108 | "RXQ %d flush retry\n", qid); | |
1109 | rx_queue->flush_pending = true; | |
1110 | atomic_inc(&efx->rxq_flush_pending); | |
1111 | } else { | |
1112 | efx_farch_magic_event(efx_rx_queue_channel(rx_queue), | |
1113 | EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); | |
1114 | } | |
1115 | atomic_dec(&efx->rxq_flush_outstanding); | |
1116 | if (efx_farch_flush_wake(efx)) | |
1117 | wake_up(&efx->flush_wq); | |
1118 | } | |
1119 | ||
1120 | static void | |
1121 | efx_farch_handle_drain_event(struct efx_channel *channel) | |
1122 | { | |
1123 | struct efx_nic *efx = channel->efx; | |
1124 | ||
1125 | WARN_ON(atomic_read(&efx->drain_pending) == 0); | |
1126 | atomic_dec(&efx->drain_pending); | |
1127 | if (efx_farch_flush_wake(efx)) | |
1128 | wake_up(&efx->flush_wq); | |
1129 | } | |
1130 | ||
1131 | static void efx_farch_handle_generated_event(struct efx_channel *channel, | |
1132 | efx_qword_t *event) | |
1133 | { | |
1134 | struct efx_nic *efx = channel->efx; | |
1135 | struct efx_rx_queue *rx_queue = | |
1136 | efx_channel_has_rx_queue(channel) ? | |
1137 | efx_channel_get_rx_queue(channel) : NULL; | |
1138 | unsigned magic, code; | |
1139 | ||
1140 | magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | |
1141 | code = _EFX_CHANNEL_MAGIC_CODE(magic); | |
1142 | ||
1143 | if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { | |
1144 | channel->event_test_cpu = raw_smp_processor_id(); | |
1145 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { | |
1146 | /* The queue must be empty, so we won't receive any rx | |
1147 | * events, so efx_process_channel() won't refill the | |
1148 | * queue. Refill it here */ | |
1149 | efx_fast_push_rx_descriptors(rx_queue); | |
1150 | } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { | |
1151 | efx_farch_handle_drain_event(channel); | |
1152 | } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { | |
1153 | efx_farch_handle_drain_event(channel); | |
1154 | } else { | |
1155 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | |
1156 | "generated event "EFX_QWORD_FMT"\n", | |
1157 | channel->channel, EFX_QWORD_VAL(*event)); | |
1158 | } | |
1159 | } | |
1160 | ||
1161 | static void | |
1162 | efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | |
1163 | { | |
1164 | struct efx_nic *efx = channel->efx; | |
1165 | unsigned int ev_sub_code; | |
1166 | unsigned int ev_sub_data; | |
1167 | ||
1168 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | |
1169 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | |
1170 | ||
1171 | switch (ev_sub_code) { | |
1172 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | |
1173 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | |
1174 | channel->channel, ev_sub_data); | |
1175 | efx_farch_handle_tx_flush_done(efx, event); | |
1176 | efx_sriov_tx_flush_done(efx, event); | |
1177 | break; | |
1178 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | |
1179 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | |
1180 | channel->channel, ev_sub_data); | |
1181 | efx_farch_handle_rx_flush_done(efx, event); | |
1182 | efx_sriov_rx_flush_done(efx, event); | |
1183 | break; | |
1184 | case FSE_AZ_EVQ_INIT_DONE_EV: | |
1185 | netif_dbg(efx, hw, efx->net_dev, | |
1186 | "channel %d EVQ %d initialised\n", | |
1187 | channel->channel, ev_sub_data); | |
1188 | break; | |
1189 | case FSE_AZ_SRM_UPD_DONE_EV: | |
1190 | netif_vdbg(efx, hw, efx->net_dev, | |
1191 | "channel %d SRAM update done\n", channel->channel); | |
1192 | break; | |
1193 | case FSE_AZ_WAKE_UP_EV: | |
1194 | netif_vdbg(efx, hw, efx->net_dev, | |
1195 | "channel %d RXQ %d wakeup event\n", | |
1196 | channel->channel, ev_sub_data); | |
1197 | break; | |
1198 | case FSE_AZ_TIMER_EV: | |
1199 | netif_vdbg(efx, hw, efx->net_dev, | |
1200 | "channel %d RX queue %d timer expired\n", | |
1201 | channel->channel, ev_sub_data); | |
1202 | break; | |
1203 | case FSE_AA_RX_RECOVER_EV: | |
1204 | netif_err(efx, rx_err, efx->net_dev, | |
1205 | "channel %d seen DRIVER RX_RESET event. " | |
1206 | "Resetting.\n", channel->channel); | |
1207 | atomic_inc(&efx->rx_reset); | |
1208 | efx_schedule_reset(efx, | |
1209 | EFX_WORKAROUND_6555(efx) ? | |
1210 | RESET_TYPE_RX_RECOVERY : | |
1211 | RESET_TYPE_DISABLE); | |
1212 | break; | |
1213 | case FSE_BZ_RX_DSC_ERROR_EV: | |
1214 | if (ev_sub_data < EFX_VI_BASE) { | |
1215 | netif_err(efx, rx_err, efx->net_dev, | |
1216 | "RX DMA Q %d reports descriptor fetch error." | |
1217 | " RX Q %d is disabled.\n", ev_sub_data, | |
1218 | ev_sub_data); | |
1219 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | |
1220 | } else | |
1221 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | |
1222 | break; | |
1223 | case FSE_BZ_TX_DSC_ERROR_EV: | |
1224 | if (ev_sub_data < EFX_VI_BASE) { | |
1225 | netif_err(efx, tx_err, efx->net_dev, | |
1226 | "TX DMA Q %d reports descriptor fetch error." | |
1227 | " TX Q %d is disabled.\n", ev_sub_data, | |
1228 | ev_sub_data); | |
1229 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | |
1230 | } else | |
1231 | efx_sriov_desc_fetch_err(efx, ev_sub_data); | |
1232 | break; | |
1233 | default: | |
1234 | netif_vdbg(efx, hw, efx->net_dev, | |
1235 | "channel %d unknown driver event code %d " | |
1236 | "data %04x\n", channel->channel, ev_sub_code, | |
1237 | ev_sub_data); | |
1238 | break; | |
1239 | } | |
1240 | } | |
1241 | ||
1242 | int efx_farch_ev_process(struct efx_channel *channel, int budget) | |
1243 | { | |
1244 | struct efx_nic *efx = channel->efx; | |
1245 | unsigned int read_ptr; | |
1246 | efx_qword_t event, *p_event; | |
1247 | int ev_code; | |
1248 | int tx_packets = 0; | |
1249 | int spent = 0; | |
1250 | ||
1251 | read_ptr = channel->eventq_read_ptr; | |
1252 | ||
1253 | for (;;) { | |
1254 | p_event = efx_event(channel, read_ptr); | |
1255 | event = *p_event; | |
1256 | ||
1257 | if (!efx_event_present(&event)) | |
1258 | /* End of events */ | |
1259 | break; | |
1260 | ||
1261 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | |
1262 | "channel %d event is "EFX_QWORD_FMT"\n", | |
1263 | channel->channel, EFX_QWORD_VAL(event)); | |
1264 | ||
1265 | /* Clear this event by marking it all ones */ | |
1266 | EFX_SET_QWORD(*p_event); | |
1267 | ||
1268 | ++read_ptr; | |
1269 | ||
1270 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | |
1271 | ||
1272 | switch (ev_code) { | |
1273 | case FSE_AZ_EV_CODE_RX_EV: | |
1274 | efx_farch_handle_rx_event(channel, &event); | |
1275 | if (++spent == budget) | |
1276 | goto out; | |
1277 | break; | |
1278 | case FSE_AZ_EV_CODE_TX_EV: | |
1279 | tx_packets += efx_farch_handle_tx_event(channel, | |
1280 | &event); | |
1281 | if (tx_packets > efx->txq_entries) { | |
1282 | spent = budget; | |
1283 | goto out; | |
1284 | } | |
1285 | break; | |
1286 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | |
1287 | efx_farch_handle_generated_event(channel, &event); | |
1288 | break; | |
1289 | case FSE_AZ_EV_CODE_DRIVER_EV: | |
1290 | efx_farch_handle_driver_event(channel, &event); | |
1291 | break; | |
1292 | case FSE_CZ_EV_CODE_USER_EV: | |
1293 | efx_sriov_event(channel, &event); | |
1294 | break; | |
1295 | case FSE_CZ_EV_CODE_MCDI_EV: | |
1296 | efx_mcdi_process_event(channel, &event); | |
1297 | break; | |
1298 | case FSE_AZ_EV_CODE_GLOBAL_EV: | |
1299 | if (efx->type->handle_global_event && | |
1300 | efx->type->handle_global_event(channel, &event)) | |
1301 | break; | |
1302 | /* else fall through */ | |
1303 | default: | |
1304 | netif_err(channel->efx, hw, channel->efx->net_dev, | |
1305 | "channel %d unknown event type %d (data " | |
1306 | EFX_QWORD_FMT ")\n", channel->channel, | |
1307 | ev_code, EFX_QWORD_VAL(event)); | |
1308 | } | |
1309 | } | |
1310 | ||
1311 | out: | |
1312 | channel->eventq_read_ptr = read_ptr; | |
1313 | return spent; | |
1314 | } | |
1315 | ||
1316 | /* Allocate buffer table entries for event queue */ | |
1317 | int efx_farch_ev_probe(struct efx_channel *channel) | |
1318 | { | |
1319 | struct efx_nic *efx = channel->efx; | |
1320 | unsigned entries; | |
1321 | ||
1322 | entries = channel->eventq_mask + 1; | |
1323 | return efx_alloc_special_buffer(efx, &channel->eventq, | |
1324 | entries * sizeof(efx_qword_t)); | |
1325 | } | |
1326 | ||
1327 | void efx_farch_ev_init(struct efx_channel *channel) | |
1328 | { | |
1329 | efx_oword_t reg; | |
1330 | struct efx_nic *efx = channel->efx; | |
1331 | ||
1332 | netif_dbg(efx, hw, efx->net_dev, | |
1333 | "channel %d event queue in special buffers %d-%d\n", | |
1334 | channel->channel, channel->eventq.index, | |
1335 | channel->eventq.index + channel->eventq.entries - 1); | |
1336 | ||
1337 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | |
1338 | EFX_POPULATE_OWORD_3(reg, | |
1339 | FRF_CZ_TIMER_Q_EN, 1, | |
1340 | FRF_CZ_HOST_NOTIFY_MODE, 0, | |
1341 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | |
1342 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | |
1343 | } | |
1344 | ||
1345 | /* Pin event queue buffer */ | |
1346 | efx_init_special_buffer(efx, &channel->eventq); | |
1347 | ||
1348 | /* Fill event queue with all ones (i.e. empty events) */ | |
1349 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); | |
1350 | ||
1351 | /* Push event queue to card */ | |
1352 | EFX_POPULATE_OWORD_3(reg, | |
1353 | FRF_AZ_EVQ_EN, 1, | |
1354 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | |
1355 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | |
1356 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | |
1357 | channel->channel); | |
1358 | ||
1359 | efx->type->push_irq_moderation(channel); | |
1360 | } | |
1361 | ||
1362 | void efx_farch_ev_fini(struct efx_channel *channel) | |
1363 | { | |
1364 | efx_oword_t reg; | |
1365 | struct efx_nic *efx = channel->efx; | |
1366 | ||
1367 | /* Remove event queue from card */ | |
1368 | EFX_ZERO_OWORD(reg); | |
1369 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | |
1370 | channel->channel); | |
1371 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | |
1372 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | |
1373 | ||
1374 | /* Unpin event queue */ | |
1375 | efx_fini_special_buffer(efx, &channel->eventq); | |
1376 | } | |
1377 | ||
1378 | /* Free buffers backing event queue */ | |
1379 | void efx_farch_ev_remove(struct efx_channel *channel) | |
1380 | { | |
1381 | efx_free_special_buffer(channel->efx, &channel->eventq); | |
1382 | } | |
1383 | ||
1384 | ||
1385 | void efx_farch_ev_test_generate(struct efx_channel *channel) | |
1386 | { | |
1387 | efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); | |
1388 | } | |
1389 | ||
1390 | void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) | |
1391 | { | |
1392 | efx_farch_magic_event(efx_rx_queue_channel(rx_queue), | |
1393 | EFX_CHANNEL_MAGIC_FILL(rx_queue)); | |
1394 | } | |
1395 | ||
1396 | /************************************************************************** | |
1397 | * | |
1398 | * Hardware interrupts | |
1399 | * The hardware interrupt handler does very little work; all the event | |
1400 | * queue processing is carried out by per-channel tasklets. | |
1401 | * | |
1402 | **************************************************************************/ | |
1403 | ||
1404 | /* Enable/disable/generate interrupts */ | |
1405 | static inline void efx_farch_interrupts(struct efx_nic *efx, | |
1406 | bool enabled, bool force) | |
1407 | { | |
1408 | efx_oword_t int_en_reg_ker; | |
1409 | ||
1410 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | |
1411 | FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, | |
1412 | FRF_AZ_KER_INT_KER, force, | |
1413 | FRF_AZ_DRV_INT_EN_KER, enabled); | |
1414 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | |
1415 | } | |
1416 | ||
1417 | void efx_farch_irq_enable_master(struct efx_nic *efx) | |
1418 | { | |
1419 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | |
1420 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | |
1421 | ||
1422 | efx_farch_interrupts(efx, true, false); | |
1423 | } | |
1424 | ||
1425 | void efx_farch_irq_disable_master(struct efx_nic *efx) | |
1426 | { | |
1427 | /* Disable interrupts */ | |
1428 | efx_farch_interrupts(efx, false, false); | |
1429 | } | |
1430 | ||
1431 | /* Generate a test interrupt | |
1432 | * Interrupt must already have been enabled, otherwise nasty things | |
1433 | * may happen. | |
1434 | */ | |
1435 | void efx_farch_irq_test_generate(struct efx_nic *efx) | |
1436 | { | |
1437 | efx_farch_interrupts(efx, true, true); | |
1438 | } | |
1439 | ||
1440 | /* Process a fatal interrupt | |
1441 | * Disable bus mastering ASAP and schedule a reset | |
1442 | */ | |
1443 | irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) | |
1444 | { | |
1445 | struct falcon_nic_data *nic_data = efx->nic_data; | |
1446 | efx_oword_t *int_ker = efx->irq_status.addr; | |
1447 | efx_oword_t fatal_intr; | |
1448 | int error, mem_perr; | |
1449 | ||
1450 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | |
1451 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | |
1452 | ||
1453 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " | |
1454 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | |
1455 | EFX_OWORD_VAL(fatal_intr), | |
1456 | error ? "disabling bus mastering" : "no recognised error"); | |
1457 | ||
1458 | /* If this is a memory parity error dump which blocks are offending */ | |
1459 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | |
1460 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); | |
1461 | if (mem_perr) { | |
1462 | efx_oword_t reg; | |
1463 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | |
1464 | netif_err(efx, hw, efx->net_dev, | |
1465 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", | |
1466 | EFX_OWORD_VAL(reg)); | |
1467 | } | |
1468 | ||
1469 | /* Disable both devices */ | |
1470 | pci_clear_master(efx->pci_dev); | |
1471 | if (efx_nic_is_dual_func(efx)) | |
1472 | pci_clear_master(nic_data->pci_dev2); | |
1473 | efx_farch_irq_disable_master(efx); | |
1474 | ||
1475 | /* Count errors and reset or disable the NIC accordingly */ | |
1476 | if (efx->int_error_count == 0 || | |
1477 | time_after(jiffies, efx->int_error_expire)) { | |
1478 | efx->int_error_count = 0; | |
1479 | efx->int_error_expire = | |
1480 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | |
1481 | } | |
1482 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | |
1483 | netif_err(efx, hw, efx->net_dev, | |
1484 | "SYSTEM ERROR - reset scheduled\n"); | |
1485 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | |
1486 | } else { | |
1487 | netif_err(efx, hw, efx->net_dev, | |
1488 | "SYSTEM ERROR - max number of errors seen." | |
1489 | "NIC will be disabled\n"); | |
1490 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | |
1491 | } | |
1492 | ||
1493 | return IRQ_HANDLED; | |
1494 | } | |
1495 | ||
1496 | /* Handle a legacy interrupt | |
1497 | * Acknowledges the interrupt and schedule event queue processing. | |
1498 | */ | |
1499 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) | |
1500 | { | |
1501 | struct efx_nic *efx = dev_id; | |
1502 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | |
1503 | efx_oword_t *int_ker = efx->irq_status.addr; | |
1504 | irqreturn_t result = IRQ_NONE; | |
1505 | struct efx_channel *channel; | |
1506 | efx_dword_t reg; | |
1507 | u32 queues; | |
1508 | int syserr; | |
1509 | ||
1510 | /* Read the ISR which also ACKs the interrupts */ | |
1511 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | |
1512 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | |
1513 | ||
1514 | /* Legacy interrupts are disabled too late by the EEH kernel | |
1515 | * code. Disable them earlier. | |
1516 | * If an EEH error occurred, the read will have returned all ones. | |
1517 | */ | |
1518 | if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && | |
1519 | !efx->eeh_disabled_legacy_irq) { | |
1520 | disable_irq_nosync(efx->legacy_irq); | |
1521 | efx->eeh_disabled_legacy_irq = true; | |
1522 | } | |
1523 | ||
1524 | /* Handle non-event-queue sources */ | |
1525 | if (queues & (1U << efx->irq_level) && soft_enabled) { | |
1526 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | |
1527 | if (unlikely(syserr)) | |
1528 | return efx_farch_fatal_interrupt(efx); | |
1529 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1530 | } | |
1531 | ||
1532 | if (queues != 0) { | |
1533 | efx->irq_zero_count = 0; | |
1534 | ||
1535 | /* Schedule processing of any interrupting queues */ | |
1536 | if (likely(soft_enabled)) { | |
1537 | efx_for_each_channel(channel, efx) { | |
1538 | if (queues & 1) | |
1539 | efx_schedule_channel_irq(channel); | |
1540 | queues >>= 1; | |
1541 | } | |
1542 | } | |
1543 | result = IRQ_HANDLED; | |
1544 | ||
1545 | } else { | |
1546 | efx_qword_t *event; | |
1547 | ||
1548 | /* Legacy ISR read can return zero once (SF bug 15783) */ | |
1549 | ||
1550 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 | |
1551 | * because this might be a shared interrupt. */ | |
1552 | if (efx->irq_zero_count++ == 0) | |
1553 | result = IRQ_HANDLED; | |
1554 | ||
1555 | /* Ensure we schedule or rearm all event queues */ | |
1556 | if (likely(soft_enabled)) { | |
1557 | efx_for_each_channel(channel, efx) { | |
1558 | event = efx_event(channel, | |
1559 | channel->eventq_read_ptr); | |
1560 | if (efx_event_present(event)) | |
1561 | efx_schedule_channel_irq(channel); | |
1562 | else | |
1563 | efx_farch_ev_read_ack(channel); | |
1564 | } | |
1565 | } | |
1566 | } | |
1567 | ||
1568 | if (result == IRQ_HANDLED) | |
1569 | netif_vdbg(efx, intr, efx->net_dev, | |
1570 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | |
1571 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | |
1572 | ||
1573 | return result; | |
1574 | } | |
1575 | ||
1576 | /* Handle an MSI interrupt | |
1577 | * | |
1578 | * Handle an MSI hardware interrupt. This routine schedules event | |
1579 | * queue processing. No interrupt acknowledgement cycle is necessary. | |
1580 | * Also, we never need to check that the interrupt is for us, since | |
1581 | * MSI interrupts cannot be shared. | |
1582 | */ | |
1583 | irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) | |
1584 | { | |
1585 | struct efx_msi_context *context = dev_id; | |
1586 | struct efx_nic *efx = context->efx; | |
1587 | efx_oword_t *int_ker = efx->irq_status.addr; | |
1588 | int syserr; | |
1589 | ||
1590 | netif_vdbg(efx, intr, efx->net_dev, | |
1591 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | |
1592 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | |
1593 | ||
1594 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | |
1595 | return IRQ_HANDLED; | |
1596 | ||
1597 | /* Handle non-event-queue sources */ | |
1598 | if (context->index == efx->irq_level) { | |
1599 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | |
1600 | if (unlikely(syserr)) | |
1601 | return efx_farch_fatal_interrupt(efx); | |
1602 | efx->last_irq_cpu = raw_smp_processor_id(); | |
1603 | } | |
1604 | ||
1605 | /* Schedule processing of the channel */ | |
1606 | efx_schedule_channel_irq(efx->channel[context->index]); | |
1607 | ||
1608 | return IRQ_HANDLED; | |
1609 | } | |
1610 | ||
1611 | ||
1612 | /* Setup RSS indirection table. | |
1613 | * This maps from the hash value of the packet to RXQ | |
1614 | */ | |
1615 | void efx_farch_rx_push_indir_table(struct efx_nic *efx) | |
1616 | { | |
1617 | size_t i = 0; | |
1618 | efx_dword_t dword; | |
1619 | ||
1620 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | |
1621 | return; | |
1622 | ||
1623 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | |
1624 | FR_BZ_RX_INDIRECTION_TBL_ROWS); | |
1625 | ||
1626 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { | |
1627 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | |
1628 | efx->rx_indir_table[i]); | |
1629 | efx_writed(efx, &dword, | |
1630 | FR_BZ_RX_INDIRECTION_TBL + | |
1631 | FR_BZ_RX_INDIRECTION_TBL_STEP * i); | |
1632 | } | |
1633 | } | |
1634 | ||
1635 | /* Looks at available SRAM resources and works out how many queues we | |
1636 | * can support, and where things like descriptor caches should live. | |
1637 | * | |
1638 | * SRAM is split up as follows: | |
1639 | * 0 buftbl entries for channels | |
1640 | * efx->vf_buftbl_base buftbl entries for SR-IOV | |
1641 | * efx->rx_dc_base RX descriptor caches | |
1642 | * efx->tx_dc_base TX descriptor caches | |
1643 | */ | |
1644 | void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) | |
1645 | { | |
1646 | unsigned vi_count, buftbl_min; | |
1647 | ||
1648 | /* Account for the buffer table entries backing the datapath channels | |
1649 | * and the descriptor caches for those channels. | |
1650 | */ | |
1651 | buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + | |
1652 | efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + | |
1653 | efx->n_channels * EFX_MAX_EVQ_SIZE) | |
1654 | * sizeof(efx_qword_t) / EFX_BUF_SIZE); | |
1655 | vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); | |
1656 | ||
1657 | #ifdef CONFIG_SFC_SRIOV | |
1658 | if (efx_sriov_wanted(efx)) { | |
1659 | unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; | |
1660 | ||
1661 | efx->vf_buftbl_base = buftbl_min; | |
1662 | ||
1663 | vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; | |
1664 | vi_count = max(vi_count, EFX_VI_BASE); | |
1665 | buftbl_free = (sram_lim_qw - buftbl_min - | |
1666 | vi_count * vi_dc_entries); | |
1667 | ||
1668 | entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * | |
1669 | efx_vf_size(efx)); | |
1670 | vf_limit = min(buftbl_free / entries_per_vf, | |
1671 | (1024U - EFX_VI_BASE) >> efx->vi_scale); | |
1672 | ||
1673 | if (efx->vf_count > vf_limit) { | |
1674 | netif_err(efx, probe, efx->net_dev, | |
1675 | "Reducing VF count from from %d to %d\n", | |
1676 | efx->vf_count, vf_limit); | |
1677 | efx->vf_count = vf_limit; | |
1678 | } | |
1679 | vi_count += efx->vf_count * efx_vf_size(efx); | |
1680 | } | |
1681 | #endif | |
1682 | ||
1683 | efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; | |
1684 | efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; | |
1685 | } | |
1686 | ||
1687 | u32 efx_farch_fpga_ver(struct efx_nic *efx) | |
1688 | { | |
1689 | efx_oword_t altera_build; | |
1690 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | |
1691 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | |
1692 | } | |
1693 | ||
1694 | void efx_farch_init_common(struct efx_nic *efx) | |
1695 | { | |
1696 | efx_oword_t temp; | |
1697 | ||
1698 | /* Set positions of descriptor caches in SRAM. */ | |
1699 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); | |
1700 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | |
1701 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); | |
1702 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | |
1703 | ||
1704 | /* Set TX descriptor cache size. */ | |
1705 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | |
1706 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | |
1707 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | |
1708 | ||
1709 | /* Set RX descriptor cache size. Set low watermark to size-8, as | |
1710 | * this allows most efficient prefetching. | |
1711 | */ | |
1712 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | |
1713 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | |
1714 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | |
1715 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | |
1716 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | |
1717 | ||
1718 | /* Program INT_KER address */ | |
1719 | EFX_POPULATE_OWORD_2(temp, | |
1720 | FRF_AZ_NORM_INT_VEC_DIS_KER, | |
1721 | EFX_INT_MODE_USE_MSI(efx), | |
1722 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | |
1723 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | |
1724 | ||
1725 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | |
1726 | /* Use an interrupt level unused by event queues */ | |
1727 | efx->irq_level = 0x1f; | |
1728 | else | |
1729 | /* Use a valid MSI-X vector */ | |
1730 | efx->irq_level = 0; | |
1731 | ||
1732 | /* Enable all the genuinely fatal interrupts. (They are still | |
1733 | * masked by the overall interrupt mask, controlled by | |
1734 | * falcon_interrupts()). | |
1735 | * | |
1736 | * Note: All other fatal interrupts are enabled | |
1737 | */ | |
1738 | EFX_POPULATE_OWORD_3(temp, | |
1739 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | |
1740 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | |
1741 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | |
1742 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | |
1743 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); | |
1744 | EFX_INVERT_OWORD(temp); | |
1745 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | |
1746 | ||
1747 | efx_farch_rx_push_indir_table(efx); | |
1748 | ||
1749 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | |
1750 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | |
1751 | */ | |
1752 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | |
1753 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | |
1754 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | |
1755 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | |
1756 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); | |
1757 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | |
1758 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | |
1759 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | |
1760 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | |
1761 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | |
1762 | /* Disable hardware watchdog which can misfire */ | |
1763 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | |
1764 | /* Squash TX of packets of 16 bytes or less */ | |
1765 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | |
1766 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | |
1767 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | |
1768 | ||
1769 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | |
1770 | EFX_POPULATE_OWORD_4(temp, | |
1771 | /* Default values */ | |
1772 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, | |
1773 | FRF_BZ_TX_PACE_SB_AF, 0xb, | |
1774 | FRF_BZ_TX_PACE_FB_BASE, 0, | |
1775 | /* Allow large pace values in the | |
1776 | * fast bin. */ | |
1777 | FRF_BZ_TX_PACE_BIN_TH, | |
1778 | FFE_BZ_TX_PACE_RESERVED); | |
1779 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); | |
1780 | } | |
1781 | } | |
1782 | ||
1783 | /************************************************************************** | |
1784 | * | |
1785 | * Filter tables | |
1786 | * | |
1787 | ************************************************************************** | |
1788 | */ | |
1789 | ||
1790 | /* "Fudge factors" - difference between programmed value and actual depth. | |
1791 | * Due to pipelined implementation we need to program H/W with a value that | |
1792 | * is larger than the hop limit we want. | |
1793 | */ | |
1794 | #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 | |
1795 | #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 | |
1796 | ||
1797 | /* Hard maximum search limit. Hardware will time-out beyond 200-something. | |
1798 | * We also need to avoid infinite loops in efx_farch_filter_search() when the | |
1799 | * table is full. | |
1800 | */ | |
1801 | #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 | |
1802 | ||
1803 | /* Don't try very hard to find space for performance hints, as this is | |
1804 | * counter-productive. */ | |
1805 | #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 | |
1806 | ||
1807 | enum efx_farch_filter_type { | |
1808 | EFX_FARCH_FILTER_TCP_FULL = 0, | |
1809 | EFX_FARCH_FILTER_TCP_WILD, | |
1810 | EFX_FARCH_FILTER_UDP_FULL, | |
1811 | EFX_FARCH_FILTER_UDP_WILD, | |
1812 | EFX_FARCH_FILTER_MAC_FULL = 4, | |
1813 | EFX_FARCH_FILTER_MAC_WILD, | |
1814 | EFX_FARCH_FILTER_UC_DEF = 8, | |
1815 | EFX_FARCH_FILTER_MC_DEF, | |
1816 | EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ | |
1817 | }; | |
1818 | ||
1819 | enum efx_farch_filter_table_id { | |
1820 | EFX_FARCH_FILTER_TABLE_RX_IP = 0, | |
1821 | EFX_FARCH_FILTER_TABLE_RX_MAC, | |
1822 | EFX_FARCH_FILTER_TABLE_RX_DEF, | |
1823 | EFX_FARCH_FILTER_TABLE_TX_MAC, | |
1824 | EFX_FARCH_FILTER_TABLE_COUNT, | |
1825 | }; | |
1826 | ||
1827 | enum efx_farch_filter_index { | |
1828 | EFX_FARCH_FILTER_INDEX_UC_DEF, | |
1829 | EFX_FARCH_FILTER_INDEX_MC_DEF, | |
1830 | EFX_FARCH_FILTER_SIZE_RX_DEF, | |
1831 | }; | |
1832 | ||
1833 | struct efx_farch_filter_spec { | |
1834 | u8 type:4; | |
1835 | u8 priority:4; | |
1836 | u8 flags; | |
1837 | u16 dmaq_id; | |
1838 | u32 data[3]; | |
1839 | }; | |
1840 | ||
1841 | struct efx_farch_filter_table { | |
1842 | enum efx_farch_filter_table_id id; | |
1843 | u32 offset; /* address of table relative to BAR */ | |
1844 | unsigned size; /* number of entries */ | |
1845 | unsigned step; /* step between entries */ | |
1846 | unsigned used; /* number currently used */ | |
1847 | unsigned long *used_bitmap; | |
1848 | struct efx_farch_filter_spec *spec; | |
1849 | unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; | |
1850 | }; | |
1851 | ||
1852 | struct efx_farch_filter_state { | |
1853 | struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; | |
1854 | }; | |
1855 | ||
1856 | static void | |
1857 | efx_farch_filter_table_clear_entry(struct efx_nic *efx, | |
1858 | struct efx_farch_filter_table *table, | |
1859 | unsigned int filter_idx); | |
1860 | ||
1861 | /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit | |
1862 | * key derived from the n-tuple. The initial LFSR state is 0xffff. */ | |
1863 | static u16 efx_farch_filter_hash(u32 key) | |
1864 | { | |
1865 | u16 tmp; | |
1866 | ||
1867 | /* First 16 rounds */ | |
1868 | tmp = 0x1fff ^ key >> 16; | |
1869 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; | |
1870 | tmp = tmp ^ tmp >> 9; | |
1871 | /* Last 16 rounds */ | |
1872 | tmp = tmp ^ tmp << 13 ^ key; | |
1873 | tmp = tmp ^ tmp >> 3 ^ tmp >> 6; | |
1874 | return tmp ^ tmp >> 9; | |
1875 | } | |
1876 | ||
1877 | /* To allow for hash collisions, filter search continues at these | |
1878 | * increments from the first possible entry selected by the hash. */ | |
1879 | static u16 efx_farch_filter_increment(u32 key) | |
1880 | { | |
1881 | return key * 2 - 1; | |
1882 | } | |
1883 | ||
1884 | static enum efx_farch_filter_table_id | |
1885 | efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) | |
1886 | { | |
1887 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != | |
1888 | (EFX_FARCH_FILTER_TCP_FULL >> 2)); | |
1889 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != | |
1890 | (EFX_FARCH_FILTER_TCP_WILD >> 2)); | |
1891 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != | |
1892 | (EFX_FARCH_FILTER_UDP_FULL >> 2)); | |
1893 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != | |
1894 | (EFX_FARCH_FILTER_UDP_WILD >> 2)); | |
1895 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != | |
1896 | (EFX_FARCH_FILTER_MAC_FULL >> 2)); | |
1897 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != | |
1898 | (EFX_FARCH_FILTER_MAC_WILD >> 2)); | |
1899 | BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != | |
1900 | EFX_FARCH_FILTER_TABLE_RX_MAC + 2); | |
1901 | return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); | |
1902 | } | |
1903 | ||
1904 | static void efx_farch_filter_push_rx_config(struct efx_nic *efx) | |
1905 | { | |
1906 | struct efx_farch_filter_state *state = efx->filter_state; | |
1907 | struct efx_farch_filter_table *table; | |
1908 | efx_oword_t filter_ctl; | |
1909 | ||
1910 | efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); | |
1911 | ||
1912 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; | |
1913 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, | |
1914 | table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + | |
1915 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); | |
1916 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, | |
1917 | table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + | |
1918 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); | |
1919 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, | |
1920 | table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + | |
1921 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); | |
1922 | EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, | |
1923 | table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + | |
1924 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); | |
1925 | ||
1926 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; | |
1927 | if (table->size) { | |
1928 | EFX_SET_OWORD_FIELD( | |
1929 | filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, | |
1930 | table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + | |
1931 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); | |
1932 | EFX_SET_OWORD_FIELD( | |
1933 | filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, | |
1934 | table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + | |
1935 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); | |
1936 | } | |
1937 | ||
1938 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; | |
1939 | if (table->size) { | |
1940 | EFX_SET_OWORD_FIELD( | |
1941 | filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, | |
1942 | table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); | |
1943 | EFX_SET_OWORD_FIELD( | |
1944 | filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, | |
1945 | !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & | |
1946 | EFX_FILTER_FLAG_RX_RSS)); | |
1947 | EFX_SET_OWORD_FIELD( | |
1948 | filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, | |
1949 | table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); | |
1950 | EFX_SET_OWORD_FIELD( | |
1951 | filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, | |
1952 | !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & | |
1953 | EFX_FILTER_FLAG_RX_RSS)); | |
1954 | ||
1955 | /* There is a single bit to enable RX scatter for all | |
1956 | * unmatched packets. Only set it if scatter is | |
1957 | * enabled in both filter specs. | |
1958 | */ | |
1959 | EFX_SET_OWORD_FIELD( | |
1960 | filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, | |
1961 | !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & | |
1962 | table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & | |
1963 | EFX_FILTER_FLAG_RX_SCATTER)); | |
1964 | } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | |
1965 | /* We don't expose 'default' filters because unmatched | |
1966 | * packets always go to the queue number found in the | |
1967 | * RSS table. But we still need to set the RX scatter | |
1968 | * bit here. | |
1969 | */ | |
1970 | EFX_SET_OWORD_FIELD( | |
1971 | filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, | |
1972 | efx->rx_scatter); | |
1973 | } | |
1974 | ||
1975 | efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); | |
1976 | } | |
1977 | ||
1978 | static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) | |
1979 | { | |
1980 | struct efx_farch_filter_state *state = efx->filter_state; | |
1981 | struct efx_farch_filter_table *table; | |
1982 | efx_oword_t tx_cfg; | |
1983 | ||
1984 | efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); | |
1985 | ||
1986 | table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; | |
1987 | if (table->size) { | |
1988 | EFX_SET_OWORD_FIELD( | |
1989 | tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, | |
1990 | table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + | |
1991 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); | |
1992 | EFX_SET_OWORD_FIELD( | |
1993 | tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, | |
1994 | table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + | |
1995 | EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); | |
1996 | } | |
1997 | ||
1998 | efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); | |
1999 | } | |
2000 | ||
2001 | static int | |
2002 | efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, | |
2003 | const struct efx_filter_spec *gen_spec) | |
2004 | { | |
2005 | bool is_full = false; | |
2006 | ||
2007 | if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && | |
2008 | gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT) | |
2009 | return -EINVAL; | |
2010 | ||
2011 | spec->priority = gen_spec->priority; | |
2012 | spec->flags = gen_spec->flags; | |
2013 | spec->dmaq_id = gen_spec->dmaq_id; | |
2014 | ||
2015 | switch (gen_spec->match_flags) { | |
2016 | case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | | |
2017 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | | |
2018 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): | |
2019 | is_full = true; | |
2020 | /* fall through */ | |
2021 | case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | | |
2022 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { | |
2023 | __be32 rhost, host1, host2; | |
2024 | __be16 rport, port1, port2; | |
2025 | ||
2026 | EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); | |
2027 | ||
2028 | if (gen_spec->ether_type != htons(ETH_P_IP)) | |
2029 | return -EPROTONOSUPPORT; | |
2030 | if (gen_spec->loc_port == 0 || | |
2031 | (is_full && gen_spec->rem_port == 0)) | |
2032 | return -EADDRNOTAVAIL; | |
2033 | switch (gen_spec->ip_proto) { | |
2034 | case IPPROTO_TCP: | |
2035 | spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : | |
2036 | EFX_FARCH_FILTER_TCP_WILD); | |
2037 | break; | |
2038 | case IPPROTO_UDP: | |
2039 | spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : | |
2040 | EFX_FARCH_FILTER_UDP_WILD); | |
2041 | break; | |
2042 | default: | |
2043 | return -EPROTONOSUPPORT; | |
2044 | } | |
2045 | ||
2046 | /* Filter is constructed in terms of source and destination, | |
2047 | * with the odd wrinkle that the ports are swapped in a UDP | |
2048 | * wildcard filter. We need to convert from local and remote | |
2049 | * (= zero for wildcard) addresses. | |
2050 | */ | |
2051 | rhost = is_full ? gen_spec->rem_host[0] : 0; | |
2052 | rport = is_full ? gen_spec->rem_port : 0; | |
2053 | host1 = rhost; | |
2054 | host2 = gen_spec->loc_host[0]; | |
2055 | if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { | |
2056 | port1 = gen_spec->loc_port; | |
2057 | port2 = rport; | |
2058 | } else { | |
2059 | port1 = rport; | |
2060 | port2 = gen_spec->loc_port; | |
2061 | } | |
2062 | spec->data[0] = ntohl(host1) << 16 | ntohs(port1); | |
2063 | spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; | |
2064 | spec->data[2] = ntohl(host2); | |
2065 | ||
2066 | break; | |
2067 | } | |
2068 | ||
2069 | case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: | |
2070 | is_full = true; | |
2071 | /* fall through */ | |
2072 | case EFX_FILTER_MATCH_LOC_MAC: | |
2073 | spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : | |
2074 | EFX_FARCH_FILTER_MAC_WILD); | |
2075 | spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; | |
2076 | spec->data[1] = (gen_spec->loc_mac[2] << 24 | | |
2077 | gen_spec->loc_mac[3] << 16 | | |
2078 | gen_spec->loc_mac[4] << 8 | | |
2079 | gen_spec->loc_mac[5]); | |
2080 | spec->data[2] = (gen_spec->loc_mac[0] << 8 | | |
2081 | gen_spec->loc_mac[1]); | |
2082 | break; | |
2083 | ||
2084 | case EFX_FILTER_MATCH_LOC_MAC_IG: | |
2085 | spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? | |
2086 | EFX_FARCH_FILTER_MC_DEF : | |
2087 | EFX_FARCH_FILTER_UC_DEF); | |
2088 | memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ | |
2089 | break; | |
2090 | ||
2091 | default: | |
2092 | return -EPROTONOSUPPORT; | |
2093 | } | |
2094 | ||
2095 | return 0; | |
2096 | } | |
2097 | ||
2098 | static void | |
2099 | efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, | |
2100 | const struct efx_farch_filter_spec *spec) | |
2101 | { | |
2102 | bool is_full = false; | |
2103 | ||
2104 | /* *gen_spec should be completely initialised, to be consistent | |
2105 | * with efx_filter_init_{rx,tx}() and in case we want to copy | |
2106 | * it back to userland. | |
2107 | */ | |
2108 | memset(gen_spec, 0, sizeof(*gen_spec)); | |
2109 | ||
2110 | gen_spec->priority = spec->priority; | |
2111 | gen_spec->flags = spec->flags; | |
2112 | gen_spec->dmaq_id = spec->dmaq_id; | |
2113 | ||
2114 | switch (spec->type) { | |
2115 | case EFX_FARCH_FILTER_TCP_FULL: | |
2116 | case EFX_FARCH_FILTER_UDP_FULL: | |
2117 | is_full = true; | |
2118 | /* fall through */ | |
2119 | case EFX_FARCH_FILTER_TCP_WILD: | |
2120 | case EFX_FARCH_FILTER_UDP_WILD: { | |
2121 | __be32 host1, host2; | |
2122 | __be16 port1, port2; | |
2123 | ||
2124 | gen_spec->match_flags = | |
2125 | EFX_FILTER_MATCH_ETHER_TYPE | | |
2126 | EFX_FILTER_MATCH_IP_PROTO | | |
2127 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; | |
2128 | if (is_full) | |
2129 | gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | | |
2130 | EFX_FILTER_MATCH_REM_PORT); | |
2131 | gen_spec->ether_type = htons(ETH_P_IP); | |
2132 | gen_spec->ip_proto = | |
2133 | (spec->type == EFX_FARCH_FILTER_TCP_FULL || | |
2134 | spec->type == EFX_FARCH_FILTER_TCP_WILD) ? | |
2135 | IPPROTO_TCP : IPPROTO_UDP; | |
2136 | ||
2137 | host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); | |
2138 | port1 = htons(spec->data[0]); | |
2139 | host2 = htonl(spec->data[2]); | |
2140 | port2 = htons(spec->data[1] >> 16); | |
2141 | if (spec->flags & EFX_FILTER_FLAG_TX) { | |
2142 | gen_spec->loc_host[0] = host1; | |
2143 | gen_spec->rem_host[0] = host2; | |
2144 | } else { | |
2145 | gen_spec->loc_host[0] = host2; | |
2146 | gen_spec->rem_host[0] = host1; | |
2147 | } | |
2148 | if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ | |
2149 | (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { | |
2150 | gen_spec->loc_port = port1; | |
2151 | gen_spec->rem_port = port2; | |
2152 | } else { | |
2153 | gen_spec->loc_port = port2; | |
2154 | gen_spec->rem_port = port1; | |
2155 | } | |
2156 | ||
2157 | break; | |
2158 | } | |
2159 | ||
2160 | case EFX_FARCH_FILTER_MAC_FULL: | |
2161 | is_full = true; | |
2162 | /* fall through */ | |
2163 | case EFX_FARCH_FILTER_MAC_WILD: | |
2164 | gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; | |
2165 | if (is_full) | |
2166 | gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; | |
2167 | gen_spec->loc_mac[0] = spec->data[2] >> 8; | |
2168 | gen_spec->loc_mac[1] = spec->data[2]; | |
2169 | gen_spec->loc_mac[2] = spec->data[1] >> 24; | |
2170 | gen_spec->loc_mac[3] = spec->data[1] >> 16; | |
2171 | gen_spec->loc_mac[4] = spec->data[1] >> 8; | |
2172 | gen_spec->loc_mac[5] = spec->data[1]; | |
2173 | gen_spec->outer_vid = htons(spec->data[0]); | |
2174 | break; | |
2175 | ||
2176 | case EFX_FARCH_FILTER_UC_DEF: | |
2177 | case EFX_FARCH_FILTER_MC_DEF: | |
2178 | gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; | |
2179 | gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; | |
2180 | break; | |
2181 | ||
2182 | default: | |
2183 | WARN_ON(1); | |
2184 | break; | |
2185 | } | |
2186 | } | |
2187 | ||
2188 | static void | |
2189 | efx_farch_filter_init_rx_for_stack(struct efx_nic *efx, | |
2190 | struct efx_farch_filter_spec *spec) | |
2191 | { | |
2192 | /* If there's only one channel then disable RSS for non VF | |
2193 | * traffic, thereby allowing VFs to use RSS when the PF can't. | |
2194 | */ | |
2195 | spec->priority = EFX_FILTER_PRI_REQUIRED; | |
2196 | spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK | | |
2197 | (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | | |
2198 | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); | |
2199 | spec->dmaq_id = 0; | |
2200 | } | |
2201 | ||
2202 | /* Build a filter entry and return its n-tuple key. */ | |
2203 | static u32 efx_farch_filter_build(efx_oword_t *filter, | |
2204 | struct efx_farch_filter_spec *spec) | |
2205 | { | |
2206 | u32 data3; | |
2207 | ||
2208 | switch (efx_farch_filter_spec_table_id(spec)) { | |
2209 | case EFX_FARCH_FILTER_TABLE_RX_IP: { | |
2210 | bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || | |
2211 | spec->type == EFX_FARCH_FILTER_UDP_WILD); | |
2212 | EFX_POPULATE_OWORD_7( | |
2213 | *filter, | |
2214 | FRF_BZ_RSS_EN, | |
2215 | !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), | |
2216 | FRF_BZ_SCATTER_EN, | |
2217 | !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), | |
2218 | FRF_BZ_TCP_UDP, is_udp, | |
2219 | FRF_BZ_RXQ_ID, spec->dmaq_id, | |
2220 | EFX_DWORD_2, spec->data[2], | |
2221 | EFX_DWORD_1, spec->data[1], | |
2222 | EFX_DWORD_0, spec->data[0]); | |
2223 | data3 = is_udp; | |
2224 | break; | |
2225 | } | |
2226 | ||
2227 | case EFX_FARCH_FILTER_TABLE_RX_MAC: { | |
2228 | bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; | |
2229 | EFX_POPULATE_OWORD_7( | |
2230 | *filter, | |
2231 | FRF_CZ_RMFT_RSS_EN, | |
2232 | !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), | |
2233 | FRF_CZ_RMFT_SCATTER_EN, | |
2234 | !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), | |
2235 | FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, | |
2236 | FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, | |
2237 | FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], | |
2238 | FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], | |
2239 | FRF_CZ_RMFT_VLAN_ID, spec->data[0]); | |
2240 | data3 = is_wild; | |
2241 | break; | |
2242 | } | |
2243 | ||
2244 | case EFX_FARCH_FILTER_TABLE_TX_MAC: { | |
2245 | bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; | |
2246 | EFX_POPULATE_OWORD_5(*filter, | |
2247 | FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, | |
2248 | FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, | |
2249 | FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], | |
2250 | FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], | |
2251 | FRF_CZ_TMFT_VLAN_ID, spec->data[0]); | |
2252 | data3 = is_wild | spec->dmaq_id << 1; | |
2253 | break; | |
2254 | } | |
2255 | ||
2256 | default: | |
2257 | BUG(); | |
2258 | } | |
2259 | ||
2260 | return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; | |
2261 | } | |
2262 | ||
2263 | static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, | |
2264 | const struct efx_farch_filter_spec *right) | |
2265 | { | |
2266 | if (left->type != right->type || | |
2267 | memcmp(left->data, right->data, sizeof(left->data))) | |
2268 | return false; | |
2269 | ||
2270 | if (left->flags & EFX_FILTER_FLAG_TX && | |
2271 | left->dmaq_id != right->dmaq_id) | |
2272 | return false; | |
2273 | ||
2274 | return true; | |
2275 | } | |
2276 | ||
2277 | /* | |
2278 | * Construct/deconstruct external filter IDs. At least the RX filter | |
2279 | * IDs must be ordered by matching priority, for RX NFC semantics. | |
2280 | * | |
2281 | * Deconstruction needs to be robust against invalid IDs so that | |
2282 | * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can | |
2283 | * accept user-provided IDs. | |
2284 | */ | |
2285 | ||
2286 | #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 | |
2287 | ||
2288 | static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { | |
2289 | [EFX_FARCH_FILTER_TCP_FULL] = 0, | |
2290 | [EFX_FARCH_FILTER_UDP_FULL] = 0, | |
2291 | [EFX_FARCH_FILTER_TCP_WILD] = 1, | |
2292 | [EFX_FARCH_FILTER_UDP_WILD] = 1, | |
2293 | [EFX_FARCH_FILTER_MAC_FULL] = 2, | |
2294 | [EFX_FARCH_FILTER_MAC_WILD] = 3, | |
2295 | [EFX_FARCH_FILTER_UC_DEF] = 4, | |
2296 | [EFX_FARCH_FILTER_MC_DEF] = 4, | |
2297 | }; | |
2298 | ||
2299 | static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { | |
2300 | EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ | |
2301 | EFX_FARCH_FILTER_TABLE_RX_IP, | |
2302 | EFX_FARCH_FILTER_TABLE_RX_MAC, | |
2303 | EFX_FARCH_FILTER_TABLE_RX_MAC, | |
2304 | EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ | |
2305 | EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ | |
2306 | EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ | |
2307 | }; | |
2308 | ||
2309 | #define EFX_FARCH_FILTER_INDEX_WIDTH 13 | |
2310 | #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) | |
2311 | ||
2312 | static inline u32 | |
2313 | efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, | |
2314 | unsigned int index) | |
2315 | { | |
2316 | unsigned int range; | |
2317 | ||
2318 | range = efx_farch_filter_type_match_pri[spec->type]; | |
2319 | if (!(spec->flags & EFX_FILTER_FLAG_RX)) | |
2320 | range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; | |
2321 | ||
2322 | return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; | |
2323 | } | |
2324 | ||
2325 | static inline enum efx_farch_filter_table_id | |
2326 | efx_farch_filter_id_table_id(u32 id) | |
2327 | { | |
2328 | unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; | |
2329 | ||
2330 | if (range < ARRAY_SIZE(efx_farch_filter_range_table)) | |
2331 | return efx_farch_filter_range_table[range]; | |
2332 | else | |
2333 | return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ | |
2334 | } | |
2335 | ||
2336 | static inline unsigned int efx_farch_filter_id_index(u32 id) | |
2337 | { | |
2338 | return id & EFX_FARCH_FILTER_INDEX_MASK; | |
2339 | } | |
2340 | ||
2341 | u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) | |
2342 | { | |
2343 | struct efx_farch_filter_state *state = efx->filter_state; | |
2344 | unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; | |
2345 | enum efx_farch_filter_table_id table_id; | |
2346 | ||
2347 | do { | |
2348 | table_id = efx_farch_filter_range_table[range]; | |
2349 | if (state->table[table_id].size != 0) | |
2350 | return range << EFX_FARCH_FILTER_INDEX_WIDTH | | |
2351 | state->table[table_id].size; | |
2352 | } while (range--); | |
2353 | ||
2354 | return 0; | |
2355 | } | |
2356 | ||
2357 | s32 efx_farch_filter_insert(struct efx_nic *efx, | |
2358 | struct efx_filter_spec *gen_spec, | |
2359 | bool replace_equal) | |
2360 | { | |
2361 | struct efx_farch_filter_state *state = efx->filter_state; | |
2362 | struct efx_farch_filter_table *table; | |
2363 | struct efx_farch_filter_spec spec; | |
2364 | efx_oword_t filter; | |
2365 | int rep_index, ins_index; | |
2366 | unsigned int depth = 0; | |
2367 | int rc; | |
2368 | ||
2369 | rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); | |
2370 | if (rc) | |
2371 | return rc; | |
2372 | ||
2373 | table = &state->table[efx_farch_filter_spec_table_id(&spec)]; | |
2374 | if (table->size == 0) | |
2375 | return -EINVAL; | |
2376 | ||
2377 | netif_vdbg(efx, hw, efx->net_dev, | |
2378 | "%s: type %d search_limit=%d", __func__, spec.type, | |
2379 | table->search_limit[spec.type]); | |
2380 | ||
2381 | if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { | |
2382 | /* One filter spec per type */ | |
2383 | BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); | |
2384 | BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != | |
2385 | EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); | |
2386 | rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; | |
2387 | ins_index = rep_index; | |
2388 | ||
2389 | spin_lock_bh(&efx->filter_lock); | |
2390 | } else { | |
2391 | /* Search concurrently for | |
2392 | * (1) a filter to be replaced (rep_index): any filter | |
2393 | * with the same match values, up to the current | |
2394 | * search depth for this type, and | |
2395 | * (2) the insertion point (ins_index): (1) or any | |
2396 | * free slot before it or up to the maximum search | |
2397 | * depth for this priority | |
2398 | * We fail if we cannot find (2). | |
2399 | * | |
2400 | * We can stop once either | |
2401 | * (a) we find (1), in which case we have definitely | |
2402 | * found (2) as well; or | |
2403 | * (b) we have searched exhaustively for (1), and have | |
2404 | * either found (2) or searched exhaustively for it | |
2405 | */ | |
2406 | u32 key = efx_farch_filter_build(&filter, &spec); | |
2407 | unsigned int hash = efx_farch_filter_hash(key); | |
2408 | unsigned int incr = efx_farch_filter_increment(key); | |
2409 | unsigned int max_rep_depth = table->search_limit[spec.type]; | |
2410 | unsigned int max_ins_depth = | |
2411 | spec.priority <= EFX_FILTER_PRI_HINT ? | |
2412 | EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : | |
2413 | EFX_FARCH_FILTER_CTL_SRCH_MAX; | |
2414 | unsigned int i = hash & (table->size - 1); | |
2415 | ||
2416 | ins_index = -1; | |
2417 | depth = 1; | |
2418 | ||
2419 | spin_lock_bh(&efx->filter_lock); | |
2420 | ||
2421 | for (;;) { | |
2422 | if (!test_bit(i, table->used_bitmap)) { | |
2423 | if (ins_index < 0) | |
2424 | ins_index = i; | |
2425 | } else if (efx_farch_filter_equal(&spec, | |
2426 | &table->spec[i])) { | |
2427 | /* Case (a) */ | |
2428 | if (ins_index < 0) | |
2429 | ins_index = i; | |
2430 | rep_index = i; | |
2431 | break; | |
2432 | } | |
2433 | ||
2434 | if (depth >= max_rep_depth && | |
2435 | (ins_index >= 0 || depth >= max_ins_depth)) { | |
2436 | /* Case (b) */ | |
2437 | if (ins_index < 0) { | |
2438 | rc = -EBUSY; | |
2439 | goto out; | |
2440 | } | |
2441 | rep_index = -1; | |
2442 | break; | |
2443 | } | |
2444 | ||
2445 | i = (i + incr) & (table->size - 1); | |
2446 | ++depth; | |
2447 | } | |
2448 | } | |
2449 | ||
2450 | /* If we found a filter to be replaced, check whether we | |
2451 | * should do so | |
2452 | */ | |
2453 | if (rep_index >= 0) { | |
2454 | struct efx_farch_filter_spec *saved_spec = | |
2455 | &table->spec[rep_index]; | |
2456 | ||
2457 | if (spec.priority == saved_spec->priority && !replace_equal) { | |
2458 | rc = -EEXIST; | |
2459 | goto out; | |
2460 | } | |
2461 | if (spec.priority < saved_spec->priority && | |
2462 | !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED && | |
2463 | saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) { | |
2464 | rc = -EPERM; | |
2465 | goto out; | |
2466 | } | |
2467 | if (spec.flags & EFX_FILTER_FLAG_RX_STACK) { | |
2468 | /* Just make sure it won't be removed */ | |
2469 | saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK; | |
2470 | rc = 0; | |
2471 | goto out; | |
2472 | } | |
2473 | /* Retain the RX_STACK flag */ | |
2474 | spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK; | |
2475 | } | |
2476 | ||
2477 | /* Insert the filter */ | |
2478 | if (ins_index != rep_index) { | |
2479 | __set_bit(ins_index, table->used_bitmap); | |
2480 | ++table->used; | |
2481 | } | |
2482 | table->spec[ins_index] = spec; | |
2483 | ||
2484 | if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { | |
2485 | efx_farch_filter_push_rx_config(efx); | |
2486 | } else { | |
2487 | if (table->search_limit[spec.type] < depth) { | |
2488 | table->search_limit[spec.type] = depth; | |
2489 | if (spec.flags & EFX_FILTER_FLAG_TX) | |
2490 | efx_farch_filter_push_tx_limits(efx); | |
2491 | else | |
2492 | efx_farch_filter_push_rx_config(efx); | |
2493 | } | |
2494 | ||
2495 | efx_writeo(efx, &filter, | |
2496 | table->offset + table->step * ins_index); | |
2497 | ||
2498 | /* If we were able to replace a filter by inserting | |
2499 | * at a lower depth, clear the replaced filter | |
2500 | */ | |
2501 | if (ins_index != rep_index && rep_index >= 0) | |
2502 | efx_farch_filter_table_clear_entry(efx, table, | |
2503 | rep_index); | |
2504 | } | |
2505 | ||
2506 | netif_vdbg(efx, hw, efx->net_dev, | |
2507 | "%s: filter type %d index %d rxq %u set", | |
2508 | __func__, spec.type, ins_index, spec.dmaq_id); | |
2509 | rc = efx_farch_filter_make_id(&spec, ins_index); | |
2510 | ||
2511 | out: | |
2512 | spin_unlock_bh(&efx->filter_lock); | |
2513 | return rc; | |
2514 | } | |
2515 | ||
2516 | static void | |
2517 | efx_farch_filter_table_clear_entry(struct efx_nic *efx, | |
2518 | struct efx_farch_filter_table *table, | |
2519 | unsigned int filter_idx) | |
2520 | { | |
2521 | static efx_oword_t filter; | |
2522 | ||
2523 | EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); | |
2524 | BUG_ON(table->offset == 0); /* can't clear MAC default filters */ | |
2525 | ||
2526 | __clear_bit(filter_idx, table->used_bitmap); | |
2527 | --table->used; | |
2528 | memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); | |
2529 | ||
2530 | efx_writeo(efx, &filter, table->offset + table->step * filter_idx); | |
2531 | ||
2532 | /* If this filter required a greater search depth than | |
2533 | * any other, the search limit for its type can now be | |
2534 | * decreased. However, it is hard to determine that | |
2535 | * unless the table has become completely empty - in | |
2536 | * which case, all its search limits can be set to 0. | |
2537 | */ | |
2538 | if (unlikely(table->used == 0)) { | |
2539 | memset(table->search_limit, 0, sizeof(table->search_limit)); | |
2540 | if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) | |
2541 | efx_farch_filter_push_tx_limits(efx); | |
2542 | else | |
2543 | efx_farch_filter_push_rx_config(efx); | |
2544 | } | |
2545 | } | |
2546 | ||
2547 | static int efx_farch_filter_remove(struct efx_nic *efx, | |
2548 | struct efx_farch_filter_table *table, | |
2549 | unsigned int filter_idx, | |
2550 | enum efx_filter_priority priority) | |
2551 | { | |
2552 | struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; | |
2553 | ||
2554 | if (!test_bit(filter_idx, table->used_bitmap) || | |
2555 | spec->priority > priority) | |
2556 | return -ENOENT; | |
2557 | ||
2558 | if (spec->flags & EFX_FILTER_FLAG_RX_STACK) { | |
2559 | efx_farch_filter_init_rx_for_stack(efx, spec); | |
2560 | efx_farch_filter_push_rx_config(efx); | |
2561 | } else { | |
2562 | efx_farch_filter_table_clear_entry(efx, table, filter_idx); | |
2563 | } | |
2564 | ||
2565 | return 0; | |
2566 | } | |
2567 | ||
2568 | int efx_farch_filter_remove_safe(struct efx_nic *efx, | |
2569 | enum efx_filter_priority priority, | |
2570 | u32 filter_id) | |
2571 | { | |
2572 | struct efx_farch_filter_state *state = efx->filter_state; | |
2573 | enum efx_farch_filter_table_id table_id; | |
2574 | struct efx_farch_filter_table *table; | |
2575 | unsigned int filter_idx; | |
2576 | struct efx_farch_filter_spec *spec; | |
2577 | int rc; | |
2578 | ||
2579 | table_id = efx_farch_filter_id_table_id(filter_id); | |
2580 | if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) | |
2581 | return -ENOENT; | |
2582 | table = &state->table[table_id]; | |
2583 | ||
2584 | filter_idx = efx_farch_filter_id_index(filter_id); | |
2585 | if (filter_idx >= table->size) | |
2586 | return -ENOENT; | |
2587 | spec = &table->spec[filter_idx]; | |
2588 | ||
2589 | spin_lock_bh(&efx->filter_lock); | |
2590 | rc = efx_farch_filter_remove(efx, table, filter_idx, priority); | |
2591 | spin_unlock_bh(&efx->filter_lock); | |
2592 | ||
2593 | return rc; | |
2594 | } | |
2595 | ||
2596 | int efx_farch_filter_get_safe(struct efx_nic *efx, | |
2597 | enum efx_filter_priority priority, | |
2598 | u32 filter_id, struct efx_filter_spec *spec_buf) | |
2599 | { | |
2600 | struct efx_farch_filter_state *state = efx->filter_state; | |
2601 | enum efx_farch_filter_table_id table_id; | |
2602 | struct efx_farch_filter_table *table; | |
2603 | struct efx_farch_filter_spec *spec; | |
2604 | unsigned int filter_idx; | |
2605 | int rc; | |
2606 | ||
2607 | table_id = efx_farch_filter_id_table_id(filter_id); | |
2608 | if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) | |
2609 | return -ENOENT; | |
2610 | table = &state->table[table_id]; | |
2611 | ||
2612 | filter_idx = efx_farch_filter_id_index(filter_id); | |
2613 | if (filter_idx >= table->size) | |
2614 | return -ENOENT; | |
2615 | spec = &table->spec[filter_idx]; | |
2616 | ||
2617 | spin_lock_bh(&efx->filter_lock); | |
2618 | ||
2619 | if (test_bit(filter_idx, table->used_bitmap) && | |
2620 | spec->priority == priority) { | |
2621 | efx_farch_filter_to_gen_spec(spec_buf, spec); | |
2622 | rc = 0; | |
2623 | } else { | |
2624 | rc = -ENOENT; | |
2625 | } | |
2626 | ||
2627 | spin_unlock_bh(&efx->filter_lock); | |
2628 | ||
2629 | return rc; | |
2630 | } | |
2631 | ||
2632 | static void | |
2633 | efx_farch_filter_table_clear(struct efx_nic *efx, | |
2634 | enum efx_farch_filter_table_id table_id, | |
2635 | enum efx_filter_priority priority) | |
2636 | { | |
2637 | struct efx_farch_filter_state *state = efx->filter_state; | |
2638 | struct efx_farch_filter_table *table = &state->table[table_id]; | |
2639 | unsigned int filter_idx; | |
2640 | ||
2641 | spin_lock_bh(&efx->filter_lock); | |
2642 | for (filter_idx = 0; filter_idx < table->size; ++filter_idx) | |
2643 | efx_farch_filter_remove(efx, table, filter_idx, priority); | |
2644 | spin_unlock_bh(&efx->filter_lock); | |
2645 | } | |
2646 | ||
2647 | void efx_farch_filter_clear_rx(struct efx_nic *efx, | |
2648 | enum efx_filter_priority priority) | |
2649 | { | |
2650 | efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, | |
2651 | priority); | |
2652 | efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, | |
2653 | priority); | |
2654 | efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, | |
2655 | priority); | |
2656 | } | |
2657 | ||
2658 | u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, | |
2659 | enum efx_filter_priority priority) | |
2660 | { | |
2661 | struct efx_farch_filter_state *state = efx->filter_state; | |
2662 | enum efx_farch_filter_table_id table_id; | |
2663 | struct efx_farch_filter_table *table; | |
2664 | unsigned int filter_idx; | |
2665 | u32 count = 0; | |
2666 | ||
2667 | spin_lock_bh(&efx->filter_lock); | |
2668 | ||
2669 | for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; | |
2670 | table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; | |
2671 | table_id++) { | |
2672 | table = &state->table[table_id]; | |
2673 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { | |
2674 | if (test_bit(filter_idx, table->used_bitmap) && | |
2675 | table->spec[filter_idx].priority == priority) | |
2676 | ++count; | |
2677 | } | |
2678 | } | |
2679 | ||
2680 | spin_unlock_bh(&efx->filter_lock); | |
2681 | ||
2682 | return count; | |
2683 | } | |
2684 | ||
2685 | s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, | |
2686 | enum efx_filter_priority priority, | |
2687 | u32 *buf, u32 size) | |
2688 | { | |
2689 | struct efx_farch_filter_state *state = efx->filter_state; | |
2690 | enum efx_farch_filter_table_id table_id; | |
2691 | struct efx_farch_filter_table *table; | |
2692 | unsigned int filter_idx; | |
2693 | s32 count = 0; | |
2694 | ||
2695 | spin_lock_bh(&efx->filter_lock); | |
2696 | ||
2697 | for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; | |
2698 | table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; | |
2699 | table_id++) { | |
2700 | table = &state->table[table_id]; | |
2701 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { | |
2702 | if (test_bit(filter_idx, table->used_bitmap) && | |
2703 | table->spec[filter_idx].priority == priority) { | |
2704 | if (count == size) { | |
2705 | count = -EMSGSIZE; | |
2706 | goto out; | |
2707 | } | |
2708 | buf[count++] = efx_farch_filter_make_id( | |
2709 | &table->spec[filter_idx], filter_idx); | |
2710 | } | |
2711 | } | |
2712 | } | |
2713 | out: | |
2714 | spin_unlock_bh(&efx->filter_lock); | |
2715 | ||
2716 | return count; | |
2717 | } | |
2718 | ||
2719 | /* Restore filter stater after reset */ | |
2720 | void efx_farch_filter_table_restore(struct efx_nic *efx) | |
2721 | { | |
2722 | struct efx_farch_filter_state *state = efx->filter_state; | |
2723 | enum efx_farch_filter_table_id table_id; | |
2724 | struct efx_farch_filter_table *table; | |
2725 | efx_oword_t filter; | |
2726 | unsigned int filter_idx; | |
2727 | ||
2728 | spin_lock_bh(&efx->filter_lock); | |
2729 | ||
2730 | for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { | |
2731 | table = &state->table[table_id]; | |
2732 | ||
2733 | /* Check whether this is a regular register table */ | |
2734 | if (table->step == 0) | |
2735 | continue; | |
2736 | ||
2737 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { | |
2738 | if (!test_bit(filter_idx, table->used_bitmap)) | |
2739 | continue; | |
2740 | efx_farch_filter_build(&filter, &table->spec[filter_idx]); | |
2741 | efx_writeo(efx, &filter, | |
2742 | table->offset + table->step * filter_idx); | |
2743 | } | |
2744 | } | |
2745 | ||
2746 | efx_farch_filter_push_rx_config(efx); | |
2747 | efx_farch_filter_push_tx_limits(efx); | |
2748 | ||
2749 | spin_unlock_bh(&efx->filter_lock); | |
2750 | } | |
2751 | ||
2752 | void efx_farch_filter_table_remove(struct efx_nic *efx) | |
2753 | { | |
2754 | struct efx_farch_filter_state *state = efx->filter_state; | |
2755 | enum efx_farch_filter_table_id table_id; | |
2756 | ||
2757 | for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { | |
2758 | kfree(state->table[table_id].used_bitmap); | |
2759 | vfree(state->table[table_id].spec); | |
2760 | } | |
2761 | kfree(state); | |
2762 | } | |
2763 | ||
2764 | int efx_farch_filter_table_probe(struct efx_nic *efx) | |
2765 | { | |
2766 | struct efx_farch_filter_state *state; | |
2767 | struct efx_farch_filter_table *table; | |
2768 | unsigned table_id; | |
2769 | ||
2770 | state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); | |
2771 | if (!state) | |
2772 | return -ENOMEM; | |
2773 | efx->filter_state = state; | |
2774 | ||
2775 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | |
2776 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; | |
2777 | table->id = EFX_FARCH_FILTER_TABLE_RX_IP; | |
2778 | table->offset = FR_BZ_RX_FILTER_TBL0; | |
2779 | table->size = FR_BZ_RX_FILTER_TBL0_ROWS; | |
2780 | table->step = FR_BZ_RX_FILTER_TBL0_STEP; | |
2781 | } | |
2782 | ||
2783 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | |
2784 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; | |
2785 | table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; | |
2786 | table->offset = FR_CZ_RX_MAC_FILTER_TBL0; | |
2787 | table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; | |
2788 | table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; | |
2789 | ||
2790 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; | |
2791 | table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; | |
2792 | table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; | |
2793 | ||
2794 | table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; | |
2795 | table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; | |
2796 | table->offset = FR_CZ_TX_MAC_FILTER_TBL0; | |
2797 | table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; | |
2798 | table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; | |
2799 | } | |
2800 | ||
2801 | for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { | |
2802 | table = &state->table[table_id]; | |
2803 | if (table->size == 0) | |
2804 | continue; | |
2805 | table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), | |
2806 | sizeof(unsigned long), | |
2807 | GFP_KERNEL); | |
2808 | if (!table->used_bitmap) | |
2809 | goto fail; | |
2810 | table->spec = vzalloc(table->size * sizeof(*table->spec)); | |
2811 | if (!table->spec) | |
2812 | goto fail; | |
2813 | } | |
2814 | ||
2815 | table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; | |
2816 | if (table->size) { | |
2817 | /* RX default filters must always exist */ | |
2818 | struct efx_farch_filter_spec *spec; | |
2819 | unsigned i; | |
2820 | ||
2821 | for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { | |
2822 | spec = &table->spec[i]; | |
2823 | spec->type = EFX_FARCH_FILTER_UC_DEF + i; | |
2824 | efx_farch_filter_init_rx_for_stack(efx, spec); | |
2825 | __set_bit(i, table->used_bitmap); | |
2826 | } | |
2827 | } | |
2828 | ||
2829 | efx_farch_filter_push_rx_config(efx); | |
2830 | ||
2831 | return 0; | |
2832 | ||
2833 | fail: | |
2834 | efx_farch_filter_table_remove(efx); | |
2835 | return -ENOMEM; | |
2836 | } | |
2837 | ||
2838 | /* Update scatter enable flags for filters pointing to our own RX queues */ | |
2839 | void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) | |
2840 | { | |
2841 | struct efx_farch_filter_state *state = efx->filter_state; | |
2842 | enum efx_farch_filter_table_id table_id; | |
2843 | struct efx_farch_filter_table *table; | |
2844 | efx_oword_t filter; | |
2845 | unsigned int filter_idx; | |
2846 | ||
2847 | spin_lock_bh(&efx->filter_lock); | |
2848 | ||
2849 | for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; | |
2850 | table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; | |
2851 | table_id++) { | |
2852 | table = &state->table[table_id]; | |
2853 | ||
2854 | for (filter_idx = 0; filter_idx < table->size; filter_idx++) { | |
2855 | if (!test_bit(filter_idx, table->used_bitmap) || | |
2856 | table->spec[filter_idx].dmaq_id >= | |
2857 | efx->n_rx_channels) | |
2858 | continue; | |
2859 | ||
2860 | if (efx->rx_scatter) | |
2861 | table->spec[filter_idx].flags |= | |
2862 | EFX_FILTER_FLAG_RX_SCATTER; | |
2863 | else | |
2864 | table->spec[filter_idx].flags &= | |
2865 | ~EFX_FILTER_FLAG_RX_SCATTER; | |
2866 | ||
2867 | if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) | |
2868 | /* Pushed by efx_farch_filter_push_rx_config() */ | |
2869 | continue; | |
2870 | ||
2871 | efx_farch_filter_build(&filter, &table->spec[filter_idx]); | |
2872 | efx_writeo(efx, &filter, | |
2873 | table->offset + table->step * filter_idx); | |
2874 | } | |
2875 | } | |
2876 | ||
2877 | efx_farch_filter_push_rx_config(efx); | |
2878 | ||
2879 | spin_unlock_bh(&efx->filter_lock); | |
2880 | } | |
2881 | ||
2882 | #ifdef CONFIG_RFS_ACCEL | |
2883 | ||
2884 | s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, | |
2885 | struct efx_filter_spec *gen_spec) | |
2886 | { | |
2887 | return efx_farch_filter_insert(efx, gen_spec, true); | |
2888 | } | |
2889 | ||
2890 | bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |
2891 | unsigned int index) | |
2892 | { | |
2893 | struct efx_farch_filter_state *state = efx->filter_state; | |
2894 | struct efx_farch_filter_table *table = | |
2895 | &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; | |
2896 | ||
2897 | if (test_bit(index, table->used_bitmap) && | |
2898 | table->spec[index].priority == EFX_FILTER_PRI_HINT && | |
2899 | rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, | |
2900 | flow_id, index)) { | |
2901 | efx_farch_filter_table_clear_entry(efx, table, index); | |
2902 | return true; | |
2903 | } | |
2904 | ||
2905 | return false; | |
2906 | } | |
2907 | ||
2908 | #endif /* CONFIG_RFS_ACCEL */ |