]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/sfc/rx.c
Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / sfc / rx.c
1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 #include <linux/socket.h>
12 #include <linux/in.h>
13 #include <linux/slab.h>
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18 #include <linux/prefetch.h>
19 #include <linux/moduleparam.h>
20 #include <linux/iommu.h>
21 #include <net/ip.h>
22 #include <net/checksum.h>
23 #include "net_driver.h"
24 #include "efx.h"
25 #include "filter.h"
26 #include "nic.h"
27 #include "selftest.h"
28 #include "workarounds.h"
29
30 /* Preferred number of descriptors to fill at once */
31 #define EFX_RX_PREFERRED_BATCH 8U
32
33 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
34 * ring, this number is divided by the number of buffers per page to calculate
35 * the number of pages to store in the RX page recycle ring.
36 */
37 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
38 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
39
40 /* Size of buffer allocated for skb header area. */
41 #define EFX_SKB_HEADERS 128u
42
43 /* This is the percentage fill level below which new RX descriptors
44 * will be added to the RX descriptor ring.
45 */
46 static unsigned int rx_refill_threshold;
47
48 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
49 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
50 EFX_RX_USR_BUF_SIZE)
51
52 /*
53 * RX maximum head room required.
54 *
55 * This must be at least 1 to prevent overflow, plus one packet-worth
56 * to allow pipelined receives.
57 */
58 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
59
60 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
61 {
62 return page_address(buf->page) + buf->page_offset;
63 }
64
65 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
66 {
67 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
69 #else
70 const u8 *data = eh + efx->rx_packet_hash_offset;
71 return (u32)data[0] |
72 (u32)data[1] << 8 |
73 (u32)data[2] << 16 |
74 (u32)data[3] << 24;
75 #endif
76 }
77
78 static inline struct efx_rx_buffer *
79 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
80 {
81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
82 return efx_rx_buffer(rx_queue, 0);
83 else
84 return rx_buf + 1;
85 }
86
87 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
88 struct efx_rx_buffer *rx_buf,
89 unsigned int len)
90 {
91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
92 DMA_FROM_DEVICE);
93 }
94
95 void efx_rx_config_page_split(struct efx_nic *efx)
96 {
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98 EFX_RX_BUF_ALIGNMENT);
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
101 efx->rx_page_buf_step);
102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
103 efx->rx_bufs_per_page;
104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
105 efx->rx_bufs_per_page);
106 }
107
108 /* Check the RX page recycle ring for a page that can be reused. */
109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
110 {
111 struct efx_nic *efx = rx_queue->efx;
112 struct page *page;
113 struct efx_rx_page_state *state;
114 unsigned index;
115
116 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
117 page = rx_queue->page_ring[index];
118 if (page == NULL)
119 return NULL;
120
121 rx_queue->page_ring[index] = NULL;
122 /* page_remove cannot exceed page_add. */
123 if (rx_queue->page_remove != rx_queue->page_add)
124 ++rx_queue->page_remove;
125
126 /* If page_count is 1 then we hold the only reference to this page. */
127 if (page_count(page) == 1) {
128 ++rx_queue->page_recycle_count;
129 return page;
130 } else {
131 state = page_address(page);
132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
133 PAGE_SIZE << efx->rx_buffer_order,
134 DMA_FROM_DEVICE);
135 put_page(page);
136 ++rx_queue->page_recycle_failed;
137 }
138
139 return NULL;
140 }
141
142 /**
143 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
144 *
145 * @rx_queue: Efx RX queue
146 *
147 * This allocates a batch of pages, maps them for DMA, and populates
148 * struct efx_rx_buffers for each one. Return a negative error code or
149 * 0 on success. If a single page can be used for multiple buffers,
150 * then the page will either be inserted fully, or not at all.
151 */
152 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
153 {
154 struct efx_nic *efx = rx_queue->efx;
155 struct efx_rx_buffer *rx_buf;
156 struct page *page;
157 unsigned int page_offset;
158 struct efx_rx_page_state *state;
159 dma_addr_t dma_addr;
160 unsigned index, count;
161
162 count = 0;
163 do {
164 page = efx_reuse_page(rx_queue);
165 if (page == NULL) {
166 page = alloc_pages(__GFP_COMP |
167 (atomic ? GFP_ATOMIC : GFP_KERNEL),
168 efx->rx_buffer_order);
169 if (unlikely(page == NULL))
170 return -ENOMEM;
171 dma_addr =
172 dma_map_page(&efx->pci_dev->dev, page, 0,
173 PAGE_SIZE << efx->rx_buffer_order,
174 DMA_FROM_DEVICE);
175 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
176 dma_addr))) {
177 __free_pages(page, efx->rx_buffer_order);
178 return -EIO;
179 }
180 state = page_address(page);
181 state->dma_addr = dma_addr;
182 } else {
183 state = page_address(page);
184 dma_addr = state->dma_addr;
185 }
186
187 dma_addr += sizeof(struct efx_rx_page_state);
188 page_offset = sizeof(struct efx_rx_page_state);
189
190 do {
191 index = rx_queue->added_count & rx_queue->ptr_mask;
192 rx_buf = efx_rx_buffer(rx_queue, index);
193 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
194 rx_buf->page = page;
195 rx_buf->page_offset = page_offset + efx->rx_ip_align;
196 rx_buf->len = efx->rx_dma_len;
197 rx_buf->flags = 0;
198 ++rx_queue->added_count;
199 get_page(page);
200 dma_addr += efx->rx_page_buf_step;
201 page_offset += efx->rx_page_buf_step;
202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
203
204 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
205 } while (++count < efx->rx_pages_per_batch);
206
207 return 0;
208 }
209
210 /* Unmap a DMA-mapped page. This function is only called for the final RX
211 * buffer in a page.
212 */
213 static void efx_unmap_rx_buffer(struct efx_nic *efx,
214 struct efx_rx_buffer *rx_buf)
215 {
216 struct page *page = rx_buf->page;
217
218 if (page) {
219 struct efx_rx_page_state *state = page_address(page);
220 dma_unmap_page(&efx->pci_dev->dev,
221 state->dma_addr,
222 PAGE_SIZE << efx->rx_buffer_order,
223 DMA_FROM_DEVICE);
224 }
225 }
226
227 static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
228 struct efx_rx_buffer *rx_buf,
229 unsigned int num_bufs)
230 {
231 do {
232 if (rx_buf->page) {
233 put_page(rx_buf->page);
234 rx_buf->page = NULL;
235 }
236 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
237 } while (--num_bufs);
238 }
239
240 /* Attempt to recycle the page if there is an RX recycle ring; the page can
241 * only be added if this is the final RX buffer, to prevent pages being used in
242 * the descriptor ring and appearing in the recycle ring simultaneously.
243 */
244 static void efx_recycle_rx_page(struct efx_channel *channel,
245 struct efx_rx_buffer *rx_buf)
246 {
247 struct page *page = rx_buf->page;
248 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
249 struct efx_nic *efx = rx_queue->efx;
250 unsigned index;
251
252 /* Only recycle the page after processing the final buffer. */
253 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
254 return;
255
256 index = rx_queue->page_add & rx_queue->page_ptr_mask;
257 if (rx_queue->page_ring[index] == NULL) {
258 unsigned read_index = rx_queue->page_remove &
259 rx_queue->page_ptr_mask;
260
261 /* The next slot in the recycle ring is available, but
262 * increment page_remove if the read pointer currently
263 * points here.
264 */
265 if (read_index == index)
266 ++rx_queue->page_remove;
267 rx_queue->page_ring[index] = page;
268 ++rx_queue->page_add;
269 return;
270 }
271 ++rx_queue->page_recycle_full;
272 efx_unmap_rx_buffer(efx, rx_buf);
273 put_page(rx_buf->page);
274 }
275
276 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
277 struct efx_rx_buffer *rx_buf)
278 {
279 /* Release the page reference we hold for the buffer. */
280 if (rx_buf->page)
281 put_page(rx_buf->page);
282
283 /* If this is the last buffer in a page, unmap and free it. */
284 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
285 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
286 efx_free_rx_buffers(rx_queue, rx_buf, 1);
287 }
288 rx_buf->page = NULL;
289 }
290
291 /* Recycle the pages that are used by buffers that have just been received. */
292 static void efx_recycle_rx_pages(struct efx_channel *channel,
293 struct efx_rx_buffer *rx_buf,
294 unsigned int n_frags)
295 {
296 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
297
298 do {
299 efx_recycle_rx_page(channel, rx_buf);
300 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
301 } while (--n_frags);
302 }
303
304 static void efx_discard_rx_packet(struct efx_channel *channel,
305 struct efx_rx_buffer *rx_buf,
306 unsigned int n_frags)
307 {
308 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
309
310 efx_recycle_rx_pages(channel, rx_buf, n_frags);
311
312 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
313 }
314
315 /**
316 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
317 * @rx_queue: RX descriptor queue
318 *
319 * This will aim to fill the RX descriptor queue up to
320 * @rx_queue->@max_fill. If there is insufficient atomic
321 * memory to do so, a slow fill will be scheduled.
322 *
323 * The caller must provide serialisation (none is used here). In practise,
324 * this means this function must run from the NAPI handler, or be called
325 * when NAPI is disabled.
326 */
327 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
328 {
329 struct efx_nic *efx = rx_queue->efx;
330 unsigned int fill_level, batch_size;
331 int space, rc = 0;
332
333 if (!rx_queue->refill_enabled)
334 return;
335
336 /* Calculate current fill level, and exit if we don't need to fill */
337 fill_level = (rx_queue->added_count - rx_queue->removed_count);
338 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
339 if (fill_level >= rx_queue->fast_fill_trigger)
340 goto out;
341
342 /* Record minimum fill level */
343 if (unlikely(fill_level < rx_queue->min_fill)) {
344 if (fill_level)
345 rx_queue->min_fill = fill_level;
346 }
347
348 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
349 space = rx_queue->max_fill - fill_level;
350 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
351
352 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
353 "RX queue %d fast-filling descriptor ring from"
354 " level %d to level %d\n",
355 efx_rx_queue_index(rx_queue), fill_level,
356 rx_queue->max_fill);
357
358
359 do {
360 rc = efx_init_rx_buffers(rx_queue, atomic);
361 if (unlikely(rc)) {
362 /* Ensure that we don't leave the rx queue empty */
363 efx_schedule_slow_fill(rx_queue);
364 goto out;
365 }
366 } while ((space -= batch_size) >= batch_size);
367
368 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
369 "RX queue %d fast-filled descriptor ring "
370 "to level %d\n", efx_rx_queue_index(rx_queue),
371 rx_queue->added_count - rx_queue->removed_count);
372
373 out:
374 if (rx_queue->notified_count != rx_queue->added_count)
375 efx_nic_notify_rx_desc(rx_queue);
376 }
377
378 void efx_rx_slow_fill(struct timer_list *t)
379 {
380 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
381
382 /* Post an event to cause NAPI to run and refill the queue */
383 efx_nic_generate_fill_event(rx_queue);
384 ++rx_queue->slow_fill_count;
385 }
386
387 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
388 struct efx_rx_buffer *rx_buf,
389 int len)
390 {
391 struct efx_nic *efx = rx_queue->efx;
392 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
393
394 if (likely(len <= max_len))
395 return;
396
397 /* The packet must be discarded, but this is only a fatal error
398 * if the caller indicated it was
399 */
400 rx_buf->flags |= EFX_RX_PKT_DISCARD;
401
402 if (net_ratelimit())
403 netif_err(efx, rx_err, efx->net_dev,
404 "RX queue %d overlength RX event (%#x > %#x)\n",
405 efx_rx_queue_index(rx_queue), len, max_len);
406
407 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
408 }
409
410 /* Pass a received packet up through GRO. GRO can handle pages
411 * regardless of checksum state and skbs with a good checksum.
412 */
413 static void
414 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
415 unsigned int n_frags, u8 *eh)
416 {
417 struct napi_struct *napi = &channel->napi_str;
418 gro_result_t gro_result;
419 struct efx_nic *efx = channel->efx;
420 struct sk_buff *skb;
421
422 skb = napi_get_frags(napi);
423 if (unlikely(!skb)) {
424 struct efx_rx_queue *rx_queue;
425
426 rx_queue = efx_channel_get_rx_queue(channel);
427 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
428 return;
429 }
430
431 if (efx->net_dev->features & NETIF_F_RXHASH)
432 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
433 PKT_HASH_TYPE_L3);
434 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
435 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
436 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
437
438 for (;;) {
439 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
440 rx_buf->page, rx_buf->page_offset,
441 rx_buf->len);
442 rx_buf->page = NULL;
443 skb->len += rx_buf->len;
444 if (skb_shinfo(skb)->nr_frags == n_frags)
445 break;
446
447 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
448 }
449
450 skb->data_len = skb->len;
451 skb->truesize += n_frags * efx->rx_buffer_truesize;
452
453 skb_record_rx_queue(skb, channel->rx_queue.core_index);
454
455 gro_result = napi_gro_frags(napi);
456 if (gro_result != GRO_DROP)
457 channel->irq_mod_score += 2;
458 }
459
460 /* Allocate and construct an SKB around page fragments */
461 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
462 struct efx_rx_buffer *rx_buf,
463 unsigned int n_frags,
464 u8 *eh, int hdr_len)
465 {
466 struct efx_nic *efx = channel->efx;
467 struct sk_buff *skb;
468
469 /* Allocate an SKB to store the headers */
470 skb = netdev_alloc_skb(efx->net_dev,
471 efx->rx_ip_align + efx->rx_prefix_size +
472 hdr_len);
473 if (unlikely(skb == NULL)) {
474 atomic_inc(&efx->n_rx_noskb_drops);
475 return NULL;
476 }
477
478 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
479
480 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
481 efx->rx_prefix_size + hdr_len);
482 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
483 __skb_put(skb, hdr_len);
484
485 /* Append the remaining page(s) onto the frag list */
486 if (rx_buf->len > hdr_len) {
487 rx_buf->page_offset += hdr_len;
488 rx_buf->len -= hdr_len;
489
490 for (;;) {
491 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
492 rx_buf->page, rx_buf->page_offset,
493 rx_buf->len);
494 rx_buf->page = NULL;
495 skb->len += rx_buf->len;
496 skb->data_len += rx_buf->len;
497 if (skb_shinfo(skb)->nr_frags == n_frags)
498 break;
499
500 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
501 }
502 } else {
503 __free_pages(rx_buf->page, efx->rx_buffer_order);
504 rx_buf->page = NULL;
505 n_frags = 0;
506 }
507
508 skb->truesize += n_frags * efx->rx_buffer_truesize;
509
510 /* Move past the ethernet header */
511 skb->protocol = eth_type_trans(skb, efx->net_dev);
512
513 skb_mark_napi_id(skb, &channel->napi_str);
514
515 return skb;
516 }
517
518 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
519 unsigned int n_frags, unsigned int len, u16 flags)
520 {
521 struct efx_nic *efx = rx_queue->efx;
522 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
523 struct efx_rx_buffer *rx_buf;
524
525 rx_queue->rx_packets++;
526
527 rx_buf = efx_rx_buffer(rx_queue, index);
528 rx_buf->flags |= flags;
529
530 /* Validate the number of fragments and completed length */
531 if (n_frags == 1) {
532 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
533 efx_rx_packet__check_len(rx_queue, rx_buf, len);
534 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
535 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
536 unlikely(len > n_frags * efx->rx_dma_len) ||
537 unlikely(!efx->rx_scatter)) {
538 /* If this isn't an explicit discard request, either
539 * the hardware or the driver is broken.
540 */
541 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
542 rx_buf->flags |= EFX_RX_PKT_DISCARD;
543 }
544
545 netif_vdbg(efx, rx_status, efx->net_dev,
546 "RX queue %d received ids %x-%x len %d %s%s\n",
547 efx_rx_queue_index(rx_queue), index,
548 (index + n_frags - 1) & rx_queue->ptr_mask, len,
549 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
550 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
551
552 /* Discard packet, if instructed to do so. Process the
553 * previous receive first.
554 */
555 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
556 efx_rx_flush_packet(channel);
557 efx_discard_rx_packet(channel, rx_buf, n_frags);
558 return;
559 }
560
561 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
562 rx_buf->len = len;
563
564 /* Release and/or sync the DMA mapping - assumes all RX buffers
565 * consumed in-order per RX queue.
566 */
567 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
568
569 /* Prefetch nice and early so data will (hopefully) be in cache by
570 * the time we look at it.
571 */
572 prefetch(efx_rx_buf_va(rx_buf));
573
574 rx_buf->page_offset += efx->rx_prefix_size;
575 rx_buf->len -= efx->rx_prefix_size;
576
577 if (n_frags > 1) {
578 /* Release/sync DMA mapping for additional fragments.
579 * Fix length for last fragment.
580 */
581 unsigned int tail_frags = n_frags - 1;
582
583 for (;;) {
584 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
585 if (--tail_frags == 0)
586 break;
587 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
588 }
589 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
590 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
591 }
592
593 /* All fragments have been DMA-synced, so recycle pages. */
594 rx_buf = efx_rx_buffer(rx_queue, index);
595 efx_recycle_rx_pages(channel, rx_buf, n_frags);
596
597 /* Pipeline receives so that we give time for packet headers to be
598 * prefetched into cache.
599 */
600 efx_rx_flush_packet(channel);
601 channel->rx_pkt_n_frags = n_frags;
602 channel->rx_pkt_index = index;
603 }
604
605 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
606 struct efx_rx_buffer *rx_buf,
607 unsigned int n_frags)
608 {
609 struct sk_buff *skb;
610 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
611
612 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
613 if (unlikely(skb == NULL)) {
614 struct efx_rx_queue *rx_queue;
615
616 rx_queue = efx_channel_get_rx_queue(channel);
617 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
618 return;
619 }
620 skb_record_rx_queue(skb, channel->rx_queue.core_index);
621
622 /* Set the SKB flags */
623 skb_checksum_none_assert(skb);
624 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
625 skb->ip_summed = CHECKSUM_UNNECESSARY;
626 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
627 }
628
629 efx_rx_skb_attach_timestamp(channel, skb);
630
631 if (channel->type->receive_skb)
632 if (channel->type->receive_skb(channel, skb))
633 return;
634
635 /* Pass the packet up */
636 if (channel->rx_list != NULL)
637 /* Add to list, will pass up later */
638 list_add_tail(&skb->list, channel->rx_list);
639 else
640 /* No list, so pass it up now */
641 netif_receive_skb(skb);
642 }
643
644 /* Handle a received packet. Second half: Touches packet payload. */
645 void __efx_rx_packet(struct efx_channel *channel)
646 {
647 struct efx_nic *efx = channel->efx;
648 struct efx_rx_buffer *rx_buf =
649 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
650 u8 *eh = efx_rx_buf_va(rx_buf);
651
652 /* Read length from the prefix if necessary. This already
653 * excludes the length of the prefix itself.
654 */
655 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
656 rx_buf->len = le16_to_cpup((__le16 *)
657 (eh + efx->rx_packet_len_offset));
658
659 /* If we're in loopback test, then pass the packet directly to the
660 * loopback layer, and free the rx_buf here
661 */
662 if (unlikely(efx->loopback_selftest)) {
663 struct efx_rx_queue *rx_queue;
664
665 efx_loopback_rx_packet(efx, eh, rx_buf->len);
666 rx_queue = efx_channel_get_rx_queue(channel);
667 efx_free_rx_buffers(rx_queue, rx_buf,
668 channel->rx_pkt_n_frags);
669 goto out;
670 }
671
672 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
673 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
674
675 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
676 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
677 else
678 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
679 out:
680 channel->rx_pkt_n_frags = 0;
681 }
682
683 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
684 {
685 struct efx_nic *efx = rx_queue->efx;
686 unsigned int entries;
687 int rc;
688
689 /* Create the smallest power-of-two aligned ring */
690 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
691 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
692 rx_queue->ptr_mask = entries - 1;
693
694 netif_dbg(efx, probe, efx->net_dev,
695 "creating RX queue %d size %#x mask %#x\n",
696 efx_rx_queue_index(rx_queue), efx->rxq_entries,
697 rx_queue->ptr_mask);
698
699 /* Allocate RX buffers */
700 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
701 GFP_KERNEL);
702 if (!rx_queue->buffer)
703 return -ENOMEM;
704
705 rc = efx_nic_probe_rx(rx_queue);
706 if (rc) {
707 kfree(rx_queue->buffer);
708 rx_queue->buffer = NULL;
709 }
710
711 return rc;
712 }
713
714 static void efx_init_rx_recycle_ring(struct efx_nic *efx,
715 struct efx_rx_queue *rx_queue)
716 {
717 unsigned int bufs_in_recycle_ring, page_ring_size;
718
719 /* Set the RX recycle ring size */
720 #ifdef CONFIG_PPC64
721 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
722 #else
723 if (iommu_present(&pci_bus_type))
724 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
725 else
726 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
727 #endif /* CONFIG_PPC64 */
728
729 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
730 efx->rx_bufs_per_page);
731 rx_queue->page_ring = kcalloc(page_ring_size,
732 sizeof(*rx_queue->page_ring), GFP_KERNEL);
733 rx_queue->page_ptr_mask = page_ring_size - 1;
734 }
735
736 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
737 {
738 struct efx_nic *efx = rx_queue->efx;
739 unsigned int max_fill, trigger, max_trigger;
740
741 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
742 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
743
744 /* Initialise ptr fields */
745 rx_queue->added_count = 0;
746 rx_queue->notified_count = 0;
747 rx_queue->removed_count = 0;
748 rx_queue->min_fill = -1U;
749 efx_init_rx_recycle_ring(efx, rx_queue);
750
751 rx_queue->page_remove = 0;
752 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
753 rx_queue->page_recycle_count = 0;
754 rx_queue->page_recycle_failed = 0;
755 rx_queue->page_recycle_full = 0;
756
757 /* Initialise limit fields */
758 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
759 max_trigger =
760 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
761 if (rx_refill_threshold != 0) {
762 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
763 if (trigger > max_trigger)
764 trigger = max_trigger;
765 } else {
766 trigger = max_trigger;
767 }
768
769 rx_queue->max_fill = max_fill;
770 rx_queue->fast_fill_trigger = trigger;
771 rx_queue->refill_enabled = true;
772
773 /* Set up RX descriptor ring */
774 efx_nic_init_rx(rx_queue);
775 }
776
777 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
778 {
779 int i;
780 struct efx_nic *efx = rx_queue->efx;
781 struct efx_rx_buffer *rx_buf;
782
783 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
784 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
785
786 del_timer_sync(&rx_queue->slow_fill);
787
788 /* Release RX buffers from the current read ptr to the write ptr */
789 if (rx_queue->buffer) {
790 for (i = rx_queue->removed_count; i < rx_queue->added_count;
791 i++) {
792 unsigned index = i & rx_queue->ptr_mask;
793 rx_buf = efx_rx_buffer(rx_queue, index);
794 efx_fini_rx_buffer(rx_queue, rx_buf);
795 }
796 }
797
798 /* Unmap and release the pages in the recycle ring. Remove the ring. */
799 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
800 struct page *page = rx_queue->page_ring[i];
801 struct efx_rx_page_state *state;
802
803 if (page == NULL)
804 continue;
805
806 state = page_address(page);
807 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
808 PAGE_SIZE << efx->rx_buffer_order,
809 DMA_FROM_DEVICE);
810 put_page(page);
811 }
812 kfree(rx_queue->page_ring);
813 rx_queue->page_ring = NULL;
814 }
815
816 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
817 {
818 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
819 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
820
821 efx_nic_remove_rx(rx_queue);
822
823 kfree(rx_queue->buffer);
824 rx_queue->buffer = NULL;
825 }
826
827
828 module_param(rx_refill_threshold, uint, 0444);
829 MODULE_PARM_DESC(rx_refill_threshold,
830 "RX descriptor ring refill threshold (%)");
831
832 #ifdef CONFIG_RFS_ACCEL
833
834 static void efx_filter_rfs_work(struct work_struct *data)
835 {
836 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
837 work);
838 struct efx_nic *efx = netdev_priv(req->net_dev);
839 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
840 int slot_idx = req - efx->rps_slot;
841 struct efx_arfs_rule *rule;
842 u16 arfs_id = 0;
843 int rc;
844
845 rc = efx->type->filter_insert(efx, &req->spec, true);
846 if (rc >= 0)
847 rc %= efx->type->max_rx_ip_filters;
848 if (efx->rps_hash_table) {
849 spin_lock_bh(&efx->rps_hash_lock);
850 rule = efx_rps_hash_find(efx, &req->spec);
851 /* The rule might have already gone, if someone else's request
852 * for the same spec was already worked and then expired before
853 * we got around to our work. In that case we have nothing
854 * tying us to an arfs_id, meaning that as soon as the filter
855 * is considered for expiry it will be removed.
856 */
857 if (rule) {
858 if (rc < 0)
859 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
860 else
861 rule->filter_id = rc;
862 arfs_id = rule->arfs_id;
863 }
864 spin_unlock_bh(&efx->rps_hash_lock);
865 }
866 if (rc >= 0) {
867 /* Remember this so we can check whether to expire the filter
868 * later.
869 */
870 mutex_lock(&efx->rps_mutex);
871 channel->rps_flow_id[rc] = req->flow_id;
872 ++channel->rfs_filters_added;
873 mutex_unlock(&efx->rps_mutex);
874
875 if (req->spec.ether_type == htons(ETH_P_IP))
876 netif_info(efx, rx_status, efx->net_dev,
877 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
878 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
879 req->spec.rem_host, ntohs(req->spec.rem_port),
880 req->spec.loc_host, ntohs(req->spec.loc_port),
881 req->rxq_index, req->flow_id, rc, arfs_id);
882 else
883 netif_info(efx, rx_status, efx->net_dev,
884 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
885 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
886 req->spec.rem_host, ntohs(req->spec.rem_port),
887 req->spec.loc_host, ntohs(req->spec.loc_port),
888 req->rxq_index, req->flow_id, rc, arfs_id);
889 }
890
891 /* Release references */
892 clear_bit(slot_idx, &efx->rps_slot_map);
893 dev_put(req->net_dev);
894 }
895
896 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
897 u16 rxq_index, u32 flow_id)
898 {
899 struct efx_nic *efx = netdev_priv(net_dev);
900 struct efx_async_filter_insertion *req;
901 struct efx_arfs_rule *rule;
902 struct flow_keys fk;
903 int slot_idx;
904 bool new;
905 int rc;
906
907 /* find a free slot */
908 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
909 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
910 break;
911 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
912 return -EBUSY;
913
914 if (flow_id == RPS_FLOW_ID_INVALID) {
915 rc = -EINVAL;
916 goto out_clear;
917 }
918
919 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
920 rc = -EPROTONOSUPPORT;
921 goto out_clear;
922 }
923
924 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
925 rc = -EPROTONOSUPPORT;
926 goto out_clear;
927 }
928 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
929 rc = -EPROTONOSUPPORT;
930 goto out_clear;
931 }
932
933 req = efx->rps_slot + slot_idx;
934 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
935 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
936 rxq_index);
937 req->spec.match_flags =
938 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
939 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
940 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
941 req->spec.ether_type = fk.basic.n_proto;
942 req->spec.ip_proto = fk.basic.ip_proto;
943
944 if (fk.basic.n_proto == htons(ETH_P_IP)) {
945 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
946 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
947 } else {
948 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
949 sizeof(struct in6_addr));
950 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
951 sizeof(struct in6_addr));
952 }
953
954 req->spec.rem_port = fk.ports.src;
955 req->spec.loc_port = fk.ports.dst;
956
957 if (efx->rps_hash_table) {
958 /* Add it to ARFS hash table */
959 spin_lock(&efx->rps_hash_lock);
960 rule = efx_rps_hash_add(efx, &req->spec, &new);
961 if (!rule) {
962 rc = -ENOMEM;
963 goto out_unlock;
964 }
965 if (new)
966 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
967 rc = rule->arfs_id;
968 /* Skip if existing or pending filter already does the right thing */
969 if (!new && rule->rxq_index == rxq_index &&
970 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
971 goto out_unlock;
972 rule->rxq_index = rxq_index;
973 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
974 spin_unlock(&efx->rps_hash_lock);
975 } else {
976 /* Without an ARFS hash table, we just use arfs_id 0 for all
977 * filters. This means if multiple flows hash to the same
978 * flow_id, all but the most recently touched will be eligible
979 * for expiry.
980 */
981 rc = 0;
982 }
983
984 /* Queue the request */
985 dev_hold(req->net_dev = net_dev);
986 INIT_WORK(&req->work, efx_filter_rfs_work);
987 req->rxq_index = rxq_index;
988 req->flow_id = flow_id;
989 schedule_work(&req->work);
990 return rc;
991 out_unlock:
992 spin_unlock(&efx->rps_hash_lock);
993 out_clear:
994 clear_bit(slot_idx, &efx->rps_slot_map);
995 return rc;
996 }
997
998 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
999 {
1000 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1001 unsigned int channel_idx, index, size;
1002 u32 flow_id;
1003
1004 if (!mutex_trylock(&efx->rps_mutex))
1005 return false;
1006 expire_one = efx->type->filter_rfs_expire_one;
1007 channel_idx = efx->rps_expire_channel;
1008 index = efx->rps_expire_index;
1009 size = efx->type->max_rx_ip_filters;
1010 while (quota--) {
1011 struct efx_channel *channel = efx_get_channel(efx, channel_idx);
1012 flow_id = channel->rps_flow_id[index];
1013
1014 if (flow_id != RPS_FLOW_ID_INVALID &&
1015 expire_one(efx, flow_id, index)) {
1016 netif_info(efx, rx_status, efx->net_dev,
1017 "expired filter %d [queue %u flow %u]\n",
1018 index, channel_idx, flow_id);
1019 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1020 }
1021 if (++index == size) {
1022 if (++channel_idx == efx->n_channels)
1023 channel_idx = 0;
1024 index = 0;
1025 }
1026 }
1027 efx->rps_expire_channel = channel_idx;
1028 efx->rps_expire_index = index;
1029
1030 mutex_unlock(&efx->rps_mutex);
1031 return true;
1032 }
1033
1034 #endif /* CONFIG_RFS_ACCEL */
1035
1036 /**
1037 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
1038 * @spec: Specification to test
1039 *
1040 * Return: %true if the specification is a non-drop RX filter that
1041 * matches a local MAC address I/G bit value of 1 or matches a local
1042 * IPv4 or IPv6 address value in the respective multicast address
1043 * range. Otherwise %false.
1044 */
1045 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
1046 {
1047 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
1048 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
1049 return false;
1050
1051 if (spec->match_flags &
1052 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
1053 is_multicast_ether_addr(spec->loc_mac))
1054 return true;
1055
1056 if ((spec->match_flags &
1057 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
1058 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
1059 if (spec->ether_type == htons(ETH_P_IP) &&
1060 ipv4_is_multicast(spec->loc_host[0]))
1061 return true;
1062 if (spec->ether_type == htons(ETH_P_IPV6) &&
1063 ((const u8 *)spec->loc_host)[0] == 0xff)
1064 return true;
1065 }
1066
1067 return false;
1068 }