1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
17 #include <net/checksum.h>
18 #include "net_driver.h"
23 #include "workarounds.h"
25 /* Number of RX descriptors pushed at once. */
26 #define EFX_RX_BATCH 8
28 /* Size of buffer allocated for skb header area. */
29 #define EFX_SKB_HEADERS 64u
32 * rx_alloc_method - RX buffer allocation method
34 * This driver supports two methods for allocating and using RX buffers:
35 * each RX buffer may be backed by an skb or by an order-n page.
37 * When LRO is in use then the second method has a lower overhead,
38 * since we don't have to allocate then free skbs on reassembled frames.
41 * - RX_ALLOC_METHOD_AUTO = 0
42 * - RX_ALLOC_METHOD_SKB = 1
43 * - RX_ALLOC_METHOD_PAGE = 2
45 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
46 * controlled by the parameters below.
48 * - Since pushing and popping descriptors are separated by the rx_queue
49 * size, so the watermarks should be ~rxd_size.
50 * - The performance win by using page-based allocation for LRO is less
51 * than the performance hit of using page-based allocation of non-LRO,
52 * so the watermarks should reflect this.
54 * Per channel we maintain a single variable, updated by each channel:
56 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
57 * RX_ALLOC_FACTOR_SKB)
58 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
59 * limits the hysteresis), and update the allocation strategy:
61 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
62 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
64 static int rx_alloc_method
= RX_ALLOC_METHOD_PAGE
;
66 #define RX_ALLOC_LEVEL_LRO 0x2000
67 #define RX_ALLOC_LEVEL_MAX 0x3000
68 #define RX_ALLOC_FACTOR_LRO 1
69 #define RX_ALLOC_FACTOR_SKB (-2)
71 /* This is the percentage fill level below which new RX descriptors
72 * will be added to the RX descriptor ring.
74 static unsigned int rx_refill_threshold
= 90;
76 /* This is the percentage fill level to which an RX queue will be refilled
77 * when the "RX refill threshold" is reached.
79 static unsigned int rx_refill_limit
= 95;
82 * RX maximum head room required.
84 * This must be at least 1 to prevent overflow and at least 2 to allow
87 #define EFX_RXD_HEAD_ROOM 2
89 /* Macros for zero-order pages (potentially) containing multiple RX buffers */
90 #define RX_DATA_OFFSET(_data) \
91 (((unsigned long) (_data)) & (PAGE_SIZE-1))
92 #define RX_BUF_OFFSET(_rx_buf) \
93 RX_DATA_OFFSET((_rx_buf)->data)
95 #define RX_PAGE_SIZE(_efx) \
96 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
99 /**************************************************************************
101 * Linux generic LRO handling
103 **************************************************************************
106 static int efx_lro_get_skb_hdr(struct sk_buff
*skb
, void **ip_hdr
,
107 void **tcpudp_hdr
, u64
*hdr_flags
, void *priv
)
109 struct efx_channel
*channel
= (struct efx_channel
*)priv
;
113 iph
= (struct iphdr
*)skb
->data
;
114 if (skb
->protocol
!= htons(ETH_P_IP
) || iph
->protocol
!= IPPROTO_TCP
)
117 th
= (struct tcphdr
*)(skb
->data
+ iph
->ihl
* 4);
121 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
123 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_LRO
;
126 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
130 static int efx_get_frag_hdr(struct skb_frag_struct
*frag
, void **mac_hdr
,
131 void **ip_hdr
, void **tcpudp_hdr
, u64
*hdr_flags
,
134 struct efx_channel
*channel
= (struct efx_channel
*)priv
;
138 /* We support EtherII and VLAN encapsulated IPv4 */
139 eh
= (struct ethhdr
*)(page_address(frag
->page
) + frag
->page_offset
);
142 if (eh
->h_proto
== htons(ETH_P_IP
)) {
143 iph
= (struct iphdr
*)(eh
+ 1);
145 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)eh
;
146 if (veh
->h_vlan_encapsulated_proto
!= htons(ETH_P_IP
))
149 iph
= (struct iphdr
*)(veh
+ 1);
153 /* We can only do LRO over TCP */
154 if (iph
->protocol
!= IPPROTO_TCP
)
157 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
158 *tcpudp_hdr
= (struct tcphdr
*)((u8
*) iph
+ iph
->ihl
* 4);
160 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_LRO
;
163 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
167 int efx_lro_init(struct net_lro_mgr
*lro_mgr
, struct efx_nic
*efx
)
169 size_t s
= sizeof(struct net_lro_desc
) * EFX_MAX_LRO_DESCRIPTORS
;
170 struct net_lro_desc
*lro_arr
;
172 /* Allocate the LRO descriptors structure */
173 lro_arr
= kzalloc(s
, GFP_KERNEL
);
177 lro_mgr
->lro_arr
= lro_arr
;
178 lro_mgr
->max_desc
= EFX_MAX_LRO_DESCRIPTORS
;
179 lro_mgr
->max_aggr
= EFX_MAX_LRO_AGGR
;
180 lro_mgr
->frag_align_pad
= EFX_PAGE_SKB_ALIGN
;
182 lro_mgr
->get_skb_header
= efx_lro_get_skb_hdr
;
183 lro_mgr
->get_frag_header
= efx_get_frag_hdr
;
184 lro_mgr
->dev
= efx
->net_dev
;
186 lro_mgr
->features
= LRO_F_NAPI
;
188 /* We can pass packets up with the checksum intact */
189 lro_mgr
->ip_summed
= CHECKSUM_UNNECESSARY
;
191 lro_mgr
->ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
196 void efx_lro_fini(struct net_lro_mgr
*lro_mgr
)
198 kfree(lro_mgr
->lro_arr
);
199 lro_mgr
->lro_arr
= NULL
;
203 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
205 * @rx_queue: Efx RX queue
206 * @rx_buf: RX buffer structure to populate
208 * This allocates memory for a new receive buffer, maps it for DMA,
209 * and populates a struct efx_rx_buffer with the relevant
210 * information. Return a negative error code or 0 on success.
212 static inline int efx_init_rx_buffer_skb(struct efx_rx_queue
*rx_queue
,
213 struct efx_rx_buffer
*rx_buf
)
215 struct efx_nic
*efx
= rx_queue
->efx
;
216 struct net_device
*net_dev
= efx
->net_dev
;
217 int skb_len
= efx
->rx_buffer_len
;
219 rx_buf
->skb
= netdev_alloc_skb(net_dev
, skb_len
);
220 if (unlikely(!rx_buf
->skb
))
223 /* Adjust the SKB for padding and checksum */
224 skb_reserve(rx_buf
->skb
, NET_IP_ALIGN
);
225 rx_buf
->len
= skb_len
- NET_IP_ALIGN
;
226 rx_buf
->data
= (char *)rx_buf
->skb
->data
;
227 rx_buf
->skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
229 rx_buf
->dma_addr
= pci_map_single(efx
->pci_dev
,
230 rx_buf
->data
, rx_buf
->len
,
233 if (unlikely(pci_dma_mapping_error(rx_buf
->dma_addr
))) {
234 dev_kfree_skb_any(rx_buf
->skb
);
243 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
245 * @rx_queue: Efx RX queue
246 * @rx_buf: RX buffer structure to populate
248 * This allocates memory for a new receive buffer, maps it for DMA,
249 * and populates a struct efx_rx_buffer with the relevant
250 * information. Return a negative error code or 0 on success.
252 static inline int efx_init_rx_buffer_page(struct efx_rx_queue
*rx_queue
,
253 struct efx_rx_buffer
*rx_buf
)
255 struct efx_nic
*efx
= rx_queue
->efx
;
256 int bytes
, space
, offset
;
258 bytes
= efx
->rx_buffer_len
- EFX_PAGE_IP_ALIGN
;
260 /* If there is space left in the previously allocated page,
261 * then use it. Otherwise allocate a new one */
262 rx_buf
->page
= rx_queue
->buf_page
;
263 if (rx_buf
->page
== NULL
) {
266 rx_buf
->page
= alloc_pages(__GFP_COLD
| __GFP_COMP
| GFP_ATOMIC
,
267 efx
->rx_buffer_order
);
268 if (unlikely(rx_buf
->page
== NULL
))
271 dma_addr
= pci_map_page(efx
->pci_dev
, rx_buf
->page
,
272 0, RX_PAGE_SIZE(efx
),
275 if (unlikely(pci_dma_mapping_error(dma_addr
))) {
276 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
281 rx_queue
->buf_page
= rx_buf
->page
;
282 rx_queue
->buf_dma_addr
= dma_addr
;
283 rx_queue
->buf_data
= ((char *) page_address(rx_buf
->page
) +
287 offset
= RX_DATA_OFFSET(rx_queue
->buf_data
);
289 rx_buf
->dma_addr
= rx_queue
->buf_dma_addr
+ offset
;
290 rx_buf
->data
= rx_queue
->buf_data
;
292 /* Try to pack multiple buffers per page */
293 if (efx
->rx_buffer_order
== 0) {
294 /* The next buffer starts on the next 512 byte boundary */
295 rx_queue
->buf_data
+= ((bytes
+ 0x1ff) & ~0x1ff);
296 offset
+= ((bytes
+ 0x1ff) & ~0x1ff);
298 space
= RX_PAGE_SIZE(efx
) - offset
;
299 if (space
>= bytes
) {
300 /* Refs dropped on kernel releasing each skb */
301 get_page(rx_queue
->buf_page
);
306 /* This is the final RX buffer for this page, so mark it for
308 rx_queue
->buf_page
= NULL
;
309 rx_buf
->unmap_addr
= rx_queue
->buf_dma_addr
;
315 /* This allocates memory for a new receive buffer, maps it for DMA,
316 * and populates a struct efx_rx_buffer with the relevant
319 static inline int efx_init_rx_buffer(struct efx_rx_queue
*rx_queue
,
320 struct efx_rx_buffer
*new_rx_buf
)
324 if (rx_queue
->channel
->rx_alloc_push_pages
) {
325 new_rx_buf
->skb
= NULL
;
326 rc
= efx_init_rx_buffer_page(rx_queue
, new_rx_buf
);
327 rx_queue
->alloc_page_count
++;
329 new_rx_buf
->page
= NULL
;
330 rc
= efx_init_rx_buffer_skb(rx_queue
, new_rx_buf
);
331 rx_queue
->alloc_skb_count
++;
334 if (unlikely(rc
< 0))
335 EFX_LOG_RL(rx_queue
->efx
, "%s RXQ[%d] =%d\n", __func__
,
336 rx_queue
->queue
, rc
);
340 static inline void efx_unmap_rx_buffer(struct efx_nic
*efx
,
341 struct efx_rx_buffer
*rx_buf
)
344 EFX_BUG_ON_PARANOID(rx_buf
->skb
);
345 if (rx_buf
->unmap_addr
) {
346 pci_unmap_page(efx
->pci_dev
, rx_buf
->unmap_addr
,
347 RX_PAGE_SIZE(efx
), PCI_DMA_FROMDEVICE
);
348 rx_buf
->unmap_addr
= 0;
350 } else if (likely(rx_buf
->skb
)) {
351 pci_unmap_single(efx
->pci_dev
, rx_buf
->dma_addr
,
352 rx_buf
->len
, PCI_DMA_FROMDEVICE
);
356 static inline void efx_free_rx_buffer(struct efx_nic
*efx
,
357 struct efx_rx_buffer
*rx_buf
)
360 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
362 } else if (likely(rx_buf
->skb
)) {
363 dev_kfree_skb_any(rx_buf
->skb
);
368 static inline void efx_fini_rx_buffer(struct efx_rx_queue
*rx_queue
,
369 struct efx_rx_buffer
*rx_buf
)
371 efx_unmap_rx_buffer(rx_queue
->efx
, rx_buf
);
372 efx_free_rx_buffer(rx_queue
->efx
, rx_buf
);
376 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
377 * @rx_queue: RX descriptor queue
378 * @retry: Recheck the fill level
379 * This will aim to fill the RX descriptor queue up to
380 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
381 * memory to do so, the caller should retry.
383 static int __efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
,
386 struct efx_rx_buffer
*rx_buf
;
387 unsigned fill_level
, index
;
388 int i
, space
, rc
= 0;
390 /* Calculate current fill level. Do this outside the lock,
391 * because most of the time we'll end up not wanting to do the
394 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
395 EFX_BUG_ON_PARANOID(fill_level
>
396 rx_queue
->efx
->type
->rxd_ring_mask
+ 1);
398 /* Don't fill if we don't need to */
399 if (fill_level
>= rx_queue
->fast_fill_trigger
)
402 /* Record minimum fill level */
403 if (unlikely(fill_level
< rx_queue
->min_fill
))
405 rx_queue
->min_fill
= fill_level
;
407 /* Acquire RX add lock. If this lock is contended, then a fast
408 * fill must already be in progress (e.g. in the refill
409 * tasklet), so we don't need to do anything
411 if (!spin_trylock_bh(&rx_queue
->add_lock
))
415 /* Recalculate current fill level now that we have the lock */
416 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
417 EFX_BUG_ON_PARANOID(fill_level
>
418 rx_queue
->efx
->type
->rxd_ring_mask
+ 1);
419 space
= rx_queue
->fast_fill_limit
- fill_level
;
420 if (space
< EFX_RX_BATCH
)
423 EFX_TRACE(rx_queue
->efx
, "RX queue %d fast-filling descriptor ring from"
424 " level %d to level %d using %s allocation\n",
425 rx_queue
->queue
, fill_level
, rx_queue
->fast_fill_limit
,
426 rx_queue
->channel
->rx_alloc_push_pages
? "page" : "skb");
429 for (i
= 0; i
< EFX_RX_BATCH
; ++i
) {
430 index
= (rx_queue
->added_count
&
431 rx_queue
->efx
->type
->rxd_ring_mask
);
432 rx_buf
= efx_rx_buffer(rx_queue
, index
);
433 rc
= efx_init_rx_buffer(rx_queue
, rx_buf
);
436 ++rx_queue
->added_count
;
438 } while ((space
-= EFX_RX_BATCH
) >= EFX_RX_BATCH
);
440 EFX_TRACE(rx_queue
->efx
, "RX queue %d fast-filled descriptor ring "
441 "to level %d\n", rx_queue
->queue
,
442 rx_queue
->added_count
- rx_queue
->removed_count
);
445 /* Send write pointer to card. */
446 falcon_notify_rx_desc(rx_queue
);
448 /* If the fast fill is running inside from the refill tasklet, then
449 * for SMP systems it may be running on a different CPU to
450 * RX event processing, which means that the fill level may now be
452 if (unlikely(retry
&& (rc
== 0)))
456 spin_unlock_bh(&rx_queue
->add_lock
);
462 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
463 * @rx_queue: RX descriptor queue
465 * This will aim to fill the RX descriptor queue up to
466 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
467 * it will schedule a work item to immediately continue the fast fill
469 void efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
)
473 rc
= __efx_fast_push_rx_descriptors(rx_queue
, 0);
475 /* Schedule the work item to run immediately. The hope is
476 * that work is immediately pending to free some memory
477 * (e.g. an RX event or TX completion)
479 efx_schedule_slow_fill(rx_queue
, 0);
483 void efx_rx_work(struct work_struct
*data
)
485 struct efx_rx_queue
*rx_queue
;
488 rx_queue
= container_of(data
, struct efx_rx_queue
, work
.work
);
490 if (unlikely(!rx_queue
->channel
->enabled
))
493 EFX_TRACE(rx_queue
->efx
, "RX queue %d worker thread executing on CPU "
494 "%d\n", rx_queue
->queue
, raw_smp_processor_id());
496 ++rx_queue
->slow_fill_count
;
497 /* Push new RX descriptors, allowing at least 1 jiffy for
498 * the kernel to free some more memory. */
499 rc
= __efx_fast_push_rx_descriptors(rx_queue
, 1);
501 efx_schedule_slow_fill(rx_queue
, 1);
504 static inline void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
505 struct efx_rx_buffer
*rx_buf
,
506 int len
, int *discard
,
509 struct efx_nic
*efx
= rx_queue
->efx
;
510 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
512 if (likely(len
<= max_len
))
515 /* The packet must be discarded, but this is only a fatal error
516 * if the caller indicated it was
520 if ((len
> rx_buf
->len
) && EFX_WORKAROUND_8071(efx
)) {
521 EFX_ERR_RL(efx
, " RX queue %d seriously overlength "
522 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
523 rx_queue
->queue
, len
, max_len
,
524 efx
->type
->rx_buffer_padding
);
525 /* If this buffer was skb-allocated, then the meta
526 * data at the end of the skb will be trashed. So
527 * we have no choice but to leak the fragment.
529 *leak_packet
= (rx_buf
->skb
!= NULL
);
530 efx_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
532 EFX_ERR_RL(efx
, " RX queue %d overlength RX event "
533 "(0x%x > 0x%x)\n", rx_queue
->queue
, len
, max_len
);
536 rx_queue
->channel
->n_rx_overlength
++;
539 /* Pass a received packet up through the generic LRO stack
541 * Handles driverlink veto, and passes the fragment up via
542 * the appropriate LRO method
544 static inline void efx_rx_packet_lro(struct efx_channel
*channel
,
545 struct efx_rx_buffer
*rx_buf
)
547 struct net_lro_mgr
*lro_mgr
= &channel
->lro_mgr
;
548 void *priv
= channel
;
550 /* Pass the skb/page into the LRO engine */
552 struct skb_frag_struct frags
;
554 frags
.page
= rx_buf
->page
;
555 frags
.page_offset
= RX_BUF_OFFSET(rx_buf
);
556 frags
.size
= rx_buf
->len
;
558 lro_receive_frags(lro_mgr
, &frags
, rx_buf
->len
,
559 rx_buf
->len
, priv
, 0);
561 EFX_BUG_ON_PARANOID(rx_buf
->skb
);
564 EFX_BUG_ON_PARANOID(!rx_buf
->skb
);
566 lro_receive_skb(lro_mgr
, rx_buf
->skb
, priv
);
571 /* Allocate and construct an SKB around a struct page.*/
572 static inline struct sk_buff
*efx_rx_mk_skb(struct efx_rx_buffer
*rx_buf
,
578 /* Allocate an SKB to store the headers */
579 skb
= netdev_alloc_skb(efx
->net_dev
, hdr_len
+ EFX_PAGE_SKB_ALIGN
);
580 if (unlikely(skb
== NULL
)) {
581 EFX_ERR_RL(efx
, "RX out of memory for skb\n");
585 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
);
586 EFX_BUG_ON_PARANOID(rx_buf
->len
< hdr_len
);
588 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
589 skb_reserve(skb
, EFX_PAGE_SKB_ALIGN
);
591 skb
->len
= rx_buf
->len
;
592 skb
->truesize
= rx_buf
->len
+ sizeof(struct sk_buff
);
593 memcpy(skb
->data
, rx_buf
->data
, hdr_len
);
594 skb
->tail
+= hdr_len
;
596 /* Append the remaining page onto the frag list */
597 if (unlikely(rx_buf
->len
> hdr_len
)) {
598 struct skb_frag_struct
*frag
= skb_shinfo(skb
)->frags
;
599 frag
->page
= rx_buf
->page
;
600 frag
->page_offset
= RX_BUF_OFFSET(rx_buf
) + hdr_len
;
601 frag
->size
= skb
->len
- hdr_len
;
602 skb_shinfo(skb
)->nr_frags
= 1;
603 skb
->data_len
= frag
->size
;
605 __free_pages(rx_buf
->page
, efx
->rx_buffer_order
);
609 /* Ownership has transferred from the rx_buf to skb */
612 /* Move past the ethernet header */
613 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
618 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
619 unsigned int len
, int checksummed
, int discard
)
621 struct efx_nic
*efx
= rx_queue
->efx
;
622 struct efx_rx_buffer
*rx_buf
;
625 rx_buf
= efx_rx_buffer(rx_queue
, index
);
626 EFX_BUG_ON_PARANOID(!rx_buf
->data
);
627 EFX_BUG_ON_PARANOID(rx_buf
->skb
&& rx_buf
->page
);
628 EFX_BUG_ON_PARANOID(!(rx_buf
->skb
|| rx_buf
->page
));
630 /* This allows the refill path to post another buffer.
631 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
632 * isn't overwritten yet.
634 rx_queue
->removed_count
++;
636 /* Validate the length encoded in the event vs the descriptor pushed */
637 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
,
638 &discard
, &leak_packet
);
640 EFX_TRACE(efx
, "RX queue %d received id %x at %llx+%x %s%s\n",
641 rx_queue
->queue
, index
,
642 (unsigned long long)rx_buf
->dma_addr
, len
,
643 (checksummed
? " [SUMMED]" : ""),
644 (discard
? " [DISCARD]" : ""));
646 /* Discard packet, if instructed to do so */
647 if (unlikely(discard
)) {
648 if (unlikely(leak_packet
))
649 rx_queue
->channel
->n_skbuff_leaks
++;
651 /* We haven't called efx_unmap_rx_buffer yet,
652 * so fini the entire rx_buffer here */
653 efx_fini_rx_buffer(rx_queue
, rx_buf
);
657 /* Release card resources - assumes all RX buffers consumed in-order
660 efx_unmap_rx_buffer(efx
, rx_buf
);
662 /* Prefetch nice and early so data will (hopefully) be in cache by
663 * the time we look at it.
665 prefetch(rx_buf
->data
);
667 /* Pipeline receives so that we give time for packet headers to be
668 * prefetched into cache.
671 if (rx_queue
->channel
->rx_pkt
)
672 __efx_rx_packet(rx_queue
->channel
,
673 rx_queue
->channel
->rx_pkt
,
674 rx_queue
->channel
->rx_pkt_csummed
);
675 rx_queue
->channel
->rx_pkt
= rx_buf
;
676 rx_queue
->channel
->rx_pkt_csummed
= checksummed
;
679 /* Handle a received packet. Second half: Touches packet payload. */
680 void __efx_rx_packet(struct efx_channel
*channel
,
681 struct efx_rx_buffer
*rx_buf
, int checksummed
)
683 struct efx_nic
*efx
= channel
->efx
;
685 int lro
= efx
->net_dev
->features
& NETIF_F_LRO
;
687 /* If we're in loopback test, then pass the packet directly to the
688 * loopback layer, and free the rx_buf here
690 if (unlikely(efx
->loopback_selftest
)) {
691 efx_loopback_rx_packet(efx
, rx_buf
->data
, rx_buf
->len
);
692 efx_free_rx_buffer(efx
, rx_buf
);
697 prefetch(skb_shinfo(rx_buf
->skb
));
699 skb_put(rx_buf
->skb
, rx_buf
->len
);
701 /* Move past the ethernet header. rx_buf->data still points
702 * at the ethernet header */
703 rx_buf
->skb
->protocol
= eth_type_trans(rx_buf
->skb
,
707 /* Both our generic-LRO and SFC-SSR support skb and page based
708 * allocation, but neither support switching from one to the
709 * other on the fly. If we spot that the allocation mode has
710 * changed, then flush the LRO state.
712 if (unlikely(channel
->rx_alloc_pop_pages
!= (rx_buf
->page
!= NULL
))) {
713 efx_flush_lro(channel
);
714 channel
->rx_alloc_pop_pages
= (rx_buf
->page
!= NULL
);
716 if (likely(checksummed
&& lro
)) {
717 efx_rx_packet_lro(channel
, rx_buf
);
721 /* Form an skb if required */
723 int hdr_len
= min(rx_buf
->len
, EFX_SKB_HEADERS
);
724 skb
= efx_rx_mk_skb(rx_buf
, efx
, hdr_len
);
725 if (unlikely(skb
== NULL
)) {
726 efx_free_rx_buffer(efx
, rx_buf
);
730 /* We now own the SKB */
735 EFX_BUG_ON_PARANOID(rx_buf
->page
);
736 EFX_BUG_ON_PARANOID(rx_buf
->skb
);
737 EFX_BUG_ON_PARANOID(!skb
);
739 /* Set the SKB flags */
740 if (unlikely(!checksummed
|| !efx
->rx_checksum_enabled
))
741 skb
->ip_summed
= CHECKSUM_NONE
;
743 /* Pass the packet up */
744 netif_receive_skb(skb
);
746 /* Update allocation strategy method */
747 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
750 efx
->net_dev
->last_rx
= jiffies
;
753 void efx_rx_strategy(struct efx_channel
*channel
)
755 enum efx_rx_alloc_method method
= rx_alloc_method
;
757 /* Only makes sense to use page based allocation if LRO is enabled */
758 if (!(channel
->efx
->net_dev
->features
& NETIF_F_LRO
)) {
759 method
= RX_ALLOC_METHOD_SKB
;
760 } else if (method
== RX_ALLOC_METHOD_AUTO
) {
761 /* Constrain the rx_alloc_level */
762 if (channel
->rx_alloc_level
< 0)
763 channel
->rx_alloc_level
= 0;
764 else if (channel
->rx_alloc_level
> RX_ALLOC_LEVEL_MAX
)
765 channel
->rx_alloc_level
= RX_ALLOC_LEVEL_MAX
;
767 /* Decide on the allocation method */
768 method
= ((channel
->rx_alloc_level
> RX_ALLOC_LEVEL_LRO
) ?
769 RX_ALLOC_METHOD_PAGE
: RX_ALLOC_METHOD_SKB
);
772 /* Push the option */
773 channel
->rx_alloc_push_pages
= (method
== RX_ALLOC_METHOD_PAGE
);
776 int efx_probe_rx_queue(struct efx_rx_queue
*rx_queue
)
778 struct efx_nic
*efx
= rx_queue
->efx
;
779 unsigned int rxq_size
;
782 EFX_LOG(efx
, "creating RX queue %d\n", rx_queue
->queue
);
784 /* Allocate RX buffers */
785 rxq_size
= (efx
->type
->rxd_ring_mask
+ 1) * sizeof(*rx_queue
->buffer
);
786 rx_queue
->buffer
= kzalloc(rxq_size
, GFP_KERNEL
);
787 if (!rx_queue
->buffer
) {
792 rc
= falcon_probe_rx(rx_queue
);
799 kfree(rx_queue
->buffer
);
800 rx_queue
->buffer
= NULL
;
807 int efx_init_rx_queue(struct efx_rx_queue
*rx_queue
)
809 struct efx_nic
*efx
= rx_queue
->efx
;
810 unsigned int max_fill
, trigger
, limit
;
812 EFX_LOG(rx_queue
->efx
, "initialising RX queue %d\n", rx_queue
->queue
);
814 /* Initialise ptr fields */
815 rx_queue
->added_count
= 0;
816 rx_queue
->notified_count
= 0;
817 rx_queue
->removed_count
= 0;
818 rx_queue
->min_fill
= -1U;
819 rx_queue
->min_overfill
= -1U;
821 /* Initialise limit fields */
822 max_fill
= efx
->type
->rxd_ring_mask
+ 1 - EFX_RXD_HEAD_ROOM
;
823 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
824 limit
= max_fill
* min(rx_refill_limit
, 100U) / 100U;
826 rx_queue
->max_fill
= max_fill
;
827 rx_queue
->fast_fill_trigger
= trigger
;
828 rx_queue
->fast_fill_limit
= limit
;
830 /* Set up RX descriptor ring */
831 return falcon_init_rx(rx_queue
);
834 void efx_fini_rx_queue(struct efx_rx_queue
*rx_queue
)
837 struct efx_rx_buffer
*rx_buf
;
839 EFX_LOG(rx_queue
->efx
, "shutting down RX queue %d\n", rx_queue
->queue
);
841 falcon_fini_rx(rx_queue
);
843 /* Release RX buffers NB start at index 0 not current HW ptr */
844 if (rx_queue
->buffer
) {
845 for (i
= 0; i
<= rx_queue
->efx
->type
->rxd_ring_mask
; i
++) {
846 rx_buf
= efx_rx_buffer(rx_queue
, i
);
847 efx_fini_rx_buffer(rx_queue
, rx_buf
);
851 /* For a page that is part-way through splitting into RX buffers */
852 if (rx_queue
->buf_page
!= NULL
) {
853 pci_unmap_page(rx_queue
->efx
->pci_dev
, rx_queue
->buf_dma_addr
,
854 RX_PAGE_SIZE(rx_queue
->efx
), PCI_DMA_FROMDEVICE
);
855 __free_pages(rx_queue
->buf_page
,
856 rx_queue
->efx
->rx_buffer_order
);
857 rx_queue
->buf_page
= NULL
;
861 void efx_remove_rx_queue(struct efx_rx_queue
*rx_queue
)
863 EFX_LOG(rx_queue
->efx
, "destroying RX queue %d\n", rx_queue
->queue
);
865 falcon_remove_rx(rx_queue
);
867 kfree(rx_queue
->buffer
);
868 rx_queue
->buffer
= NULL
;
872 void efx_flush_lro(struct efx_channel
*channel
)
874 lro_flush_all(&channel
->lro_mgr
);
878 module_param(rx_alloc_method
, int, 0644);
879 MODULE_PARM_DESC(rx_alloc_method
, "Allocation method used for RX buffers");
881 module_param(rx_refill_threshold
, uint
, 0444);
882 MODULE_PARM_DESC(rx_refill_threshold
,
883 "RX descriptor ring fast/slow fill threshold (%)");