1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
20 #include <net/checksum.h>
21 #include "net_driver.h"
25 #include "workarounds.h"
27 /* Number of RX descriptors pushed at once. */
28 #define EFX_RX_BATCH 8
30 /* Maximum size of a buffer sharing a page */
31 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
33 /* Size of buffer allocated for skb header area. */
34 #define EFX_SKB_HEADERS 64u
37 * rx_alloc_method - RX buffer allocation method
39 * This driver supports two methods for allocating and using RX buffers:
40 * each RX buffer may be backed by an skb or by an order-n page.
42 * When GRO is in use then the second method has a lower overhead,
43 * since we don't have to allocate then free skbs on reassembled frames.
46 * - RX_ALLOC_METHOD_AUTO = 0
47 * - RX_ALLOC_METHOD_SKB = 1
48 * - RX_ALLOC_METHOD_PAGE = 2
50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51 * controlled by the parameters below.
53 * - Since pushing and popping descriptors are separated by the rx_queue
54 * size, so the watermarks should be ~rxd_size.
55 * - The performance win by using page-based allocation for GRO is less
56 * than the performance hit of using page-based allocation of non-GRO,
57 * so the watermarks should reflect this.
59 * Per channel we maintain a single variable, updated by each channel:
61 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62 * RX_ALLOC_FACTOR_SKB)
63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64 * limits the hysteresis), and update the allocation strategy:
66 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
69 static int rx_alloc_method
= RX_ALLOC_METHOD_AUTO
;
71 #define RX_ALLOC_LEVEL_GRO 0x2000
72 #define RX_ALLOC_LEVEL_MAX 0x3000
73 #define RX_ALLOC_FACTOR_GRO 1
74 #define RX_ALLOC_FACTOR_SKB (-2)
76 /* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring.
79 static unsigned int rx_refill_threshold
;
82 * RX maximum head room required.
84 * This must be at least 1 to prevent overflow and at least 2 to allow
87 #define EFX_RXD_HEAD_ROOM 2
89 /* Offset of ethernet header within page */
90 static inline unsigned int efx_rx_buf_offset(struct efx_nic
*efx
,
91 struct efx_rx_buffer
*buf
)
93 return buf
->page_offset
+ efx
->type
->rx_buffer_hash_size
;
95 static inline unsigned int efx_rx_buf_size(struct efx_nic
*efx
)
97 return PAGE_SIZE
<< efx
->rx_buffer_order
;
100 static u8
*efx_rx_buf_eh(struct efx_nic
*efx
, struct efx_rx_buffer
*buf
)
102 if (buf
->flags
& EFX_RX_BUF_PAGE
)
103 return page_address(buf
->u
.page
) + efx_rx_buf_offset(efx
, buf
);
105 return (u8
*)buf
->u
.skb
->data
+ efx
->type
->rx_buffer_hash_size
;
108 static inline u32
efx_rx_buf_hash(const u8
*eh
)
110 /* The ethernet header is always directly after any hash. */
111 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
112 return __le32_to_cpup((const __le32
*)(eh
- 4));
114 const u8
*data
= eh
- 4;
115 return (u32
)data
[0] |
123 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
125 * @rx_queue: Efx RX queue
127 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
128 * struct efx_rx_buffer for each one. Return a negative error code or 0
129 * on success. May fail having only inserted fewer than EFX_RX_BATCH
132 static int efx_init_rx_buffers_skb(struct efx_rx_queue
*rx_queue
)
134 struct efx_nic
*efx
= rx_queue
->efx
;
135 struct net_device
*net_dev
= efx
->net_dev
;
136 struct efx_rx_buffer
*rx_buf
;
138 int skb_len
= efx
->rx_buffer_len
;
139 unsigned index
, count
;
141 for (count
= 0; count
< EFX_RX_BATCH
; ++count
) {
142 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
143 rx_buf
= efx_rx_buffer(rx_queue
, index
);
145 rx_buf
->u
.skb
= skb
= netdev_alloc_skb(net_dev
, skb_len
);
149 /* Adjust the SKB for padding */
150 skb_reserve(skb
, NET_IP_ALIGN
);
151 rx_buf
->len
= skb_len
- NET_IP_ALIGN
;
154 rx_buf
->dma_addr
= dma_map_single(&efx
->pci_dev
->dev
,
155 skb
->data
, rx_buf
->len
,
157 if (unlikely(dma_mapping_error(&efx
->pci_dev
->dev
,
158 rx_buf
->dma_addr
))) {
159 dev_kfree_skb_any(skb
);
160 rx_buf
->u
.skb
= NULL
;
164 ++rx_queue
->added_count
;
165 ++rx_queue
->alloc_skb_count
;
172 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
174 * @rx_queue: Efx RX queue
176 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
177 * and populates struct efx_rx_buffers for each one. Return a negative error
178 * code or 0 on success. If a single page can be split between two buffers,
179 * then the page will either be inserted fully, or not at at all.
181 static int efx_init_rx_buffers_page(struct efx_rx_queue
*rx_queue
)
183 struct efx_nic
*efx
= rx_queue
->efx
;
184 struct efx_rx_buffer
*rx_buf
;
186 unsigned int page_offset
;
187 struct efx_rx_page_state
*state
;
189 unsigned index
, count
;
191 /* We can split a page between two buffers */
192 BUILD_BUG_ON(EFX_RX_BATCH
& 1);
194 for (count
= 0; count
< EFX_RX_BATCH
; ++count
) {
195 page
= alloc_pages(__GFP_COLD
| __GFP_COMP
| GFP_ATOMIC
,
196 efx
->rx_buffer_order
);
197 if (unlikely(page
== NULL
))
199 dma_addr
= dma_map_page(&efx
->pci_dev
->dev
, page
, 0,
200 efx_rx_buf_size(efx
),
202 if (unlikely(dma_mapping_error(&efx
->pci_dev
->dev
, dma_addr
))) {
203 __free_pages(page
, efx
->rx_buffer_order
);
206 state
= page_address(page
);
208 state
->dma_addr
= dma_addr
;
210 dma_addr
+= sizeof(struct efx_rx_page_state
);
211 page_offset
= sizeof(struct efx_rx_page_state
);
214 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
215 rx_buf
= efx_rx_buffer(rx_queue
, index
);
216 rx_buf
->dma_addr
= dma_addr
+ EFX_PAGE_IP_ALIGN
;
217 rx_buf
->u
.page
= page
;
218 rx_buf
->page_offset
= page_offset
;
219 rx_buf
->len
= efx
->rx_buffer_len
- EFX_PAGE_IP_ALIGN
;
220 rx_buf
->flags
= EFX_RX_BUF_PAGE
;
221 ++rx_queue
->added_count
;
222 ++rx_queue
->alloc_page_count
;
225 if ((~count
& 1) && (efx
->rx_buffer_len
<= EFX_RX_HALF_PAGE
)) {
226 /* Use the second half of the page */
228 dma_addr
+= (PAGE_SIZE
>> 1);
229 page_offset
+= (PAGE_SIZE
>> 1);
238 static void efx_unmap_rx_buffer(struct efx_nic
*efx
,
239 struct efx_rx_buffer
*rx_buf
,
240 unsigned int used_len
)
242 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.page
) {
243 struct efx_rx_page_state
*state
;
245 state
= page_address(rx_buf
->u
.page
);
246 if (--state
->refcnt
== 0) {
247 dma_unmap_page(&efx
->pci_dev
->dev
,
249 efx_rx_buf_size(efx
),
251 } else if (used_len
) {
252 dma_sync_single_for_cpu(&efx
->pci_dev
->dev
,
253 rx_buf
->dma_addr
, used_len
,
256 } else if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.skb
) {
257 dma_unmap_single(&efx
->pci_dev
->dev
, rx_buf
->dma_addr
,
258 rx_buf
->len
, DMA_FROM_DEVICE
);
262 static void efx_free_rx_buffer(struct efx_nic
*efx
,
263 struct efx_rx_buffer
*rx_buf
)
265 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.page
) {
266 __free_pages(rx_buf
->u
.page
, efx
->rx_buffer_order
);
267 rx_buf
->u
.page
= NULL
;
268 } else if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.skb
) {
269 dev_kfree_skb_any(rx_buf
->u
.skb
);
270 rx_buf
->u
.skb
= NULL
;
274 static void efx_fini_rx_buffer(struct efx_rx_queue
*rx_queue
,
275 struct efx_rx_buffer
*rx_buf
)
277 efx_unmap_rx_buffer(rx_queue
->efx
, rx_buf
, 0);
278 efx_free_rx_buffer(rx_queue
->efx
, rx_buf
);
281 /* Attempt to resurrect the other receive buffer that used to share this page,
282 * which had previously been passed up to the kernel and freed. */
283 static void efx_resurrect_rx_buffer(struct efx_rx_queue
*rx_queue
,
284 struct efx_rx_buffer
*rx_buf
)
286 struct efx_rx_page_state
*state
= page_address(rx_buf
->u
.page
);
287 struct efx_rx_buffer
*new_buf
;
288 unsigned fill_level
, index
;
290 /* +1 because efx_rx_packet() incremented removed_count. +1 because
291 * we'd like to insert an additional descriptor whilst leaving
292 * EFX_RXD_HEAD_ROOM for the non-recycle path */
293 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
+ 2);
294 if (unlikely(fill_level
> rx_queue
->max_fill
)) {
295 /* We could place "state" on a list, and drain the list in
296 * efx_fast_push_rx_descriptors(). For now, this will do. */
301 get_page(rx_buf
->u
.page
);
303 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
304 new_buf
= efx_rx_buffer(rx_queue
, index
);
305 new_buf
->dma_addr
= rx_buf
->dma_addr
^ (PAGE_SIZE
>> 1);
306 new_buf
->u
.page
= rx_buf
->u
.page
;
307 new_buf
->len
= rx_buf
->len
;
308 new_buf
->flags
= EFX_RX_BUF_PAGE
;
309 ++rx_queue
->added_count
;
312 /* Recycle the given rx buffer directly back into the rx_queue. There is
313 * always room to add this buffer, because we've just popped a buffer. */
314 static void efx_recycle_rx_buffer(struct efx_channel
*channel
,
315 struct efx_rx_buffer
*rx_buf
)
317 struct efx_nic
*efx
= channel
->efx
;
318 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
319 struct efx_rx_buffer
*new_buf
;
322 rx_buf
->flags
&= EFX_RX_BUF_PAGE
;
324 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) &&
325 efx
->rx_buffer_len
<= EFX_RX_HALF_PAGE
&&
326 page_count(rx_buf
->u
.page
) == 1)
327 efx_resurrect_rx_buffer(rx_queue
, rx_buf
);
329 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
330 new_buf
= efx_rx_buffer(rx_queue
, index
);
332 memcpy(new_buf
, rx_buf
, sizeof(*new_buf
));
333 rx_buf
->u
.page
= NULL
;
334 ++rx_queue
->added_count
;
338 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
339 * @rx_queue: RX descriptor queue
341 * This will aim to fill the RX descriptor queue up to
342 * @rx_queue->@max_fill. If there is insufficient atomic
343 * memory to do so, a slow fill will be scheduled.
345 * The caller must provide serialisation (none is used here). In practise,
346 * this means this function must run from the NAPI handler, or be called
347 * when NAPI is disabled.
349 void efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
)
351 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
355 /* Calculate current fill level, and exit if we don't need to fill */
356 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
357 EFX_BUG_ON_PARANOID(fill_level
> rx_queue
->efx
->rxq_entries
);
358 if (fill_level
>= rx_queue
->fast_fill_trigger
)
361 /* Record minimum fill level */
362 if (unlikely(fill_level
< rx_queue
->min_fill
)) {
364 rx_queue
->min_fill
= fill_level
;
367 space
= rx_queue
->max_fill
- fill_level
;
368 EFX_BUG_ON_PARANOID(space
< EFX_RX_BATCH
);
370 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
371 "RX queue %d fast-filling descriptor ring from"
372 " level %d to level %d using %s allocation\n",
373 efx_rx_queue_index(rx_queue
), fill_level
,
375 channel
->rx_alloc_push_pages
? "page" : "skb");
378 if (channel
->rx_alloc_push_pages
)
379 rc
= efx_init_rx_buffers_page(rx_queue
);
381 rc
= efx_init_rx_buffers_skb(rx_queue
);
383 /* Ensure that we don't leave the rx queue empty */
384 if (rx_queue
->added_count
== rx_queue
->removed_count
)
385 efx_schedule_slow_fill(rx_queue
);
388 } while ((space
-= EFX_RX_BATCH
) >= EFX_RX_BATCH
);
390 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
391 "RX queue %d fast-filled descriptor ring "
392 "to level %d\n", efx_rx_queue_index(rx_queue
),
393 rx_queue
->added_count
- rx_queue
->removed_count
);
396 if (rx_queue
->notified_count
!= rx_queue
->added_count
)
397 efx_nic_notify_rx_desc(rx_queue
);
400 void efx_rx_slow_fill(unsigned long context
)
402 struct efx_rx_queue
*rx_queue
= (struct efx_rx_queue
*)context
;
404 /* Post an event to cause NAPI to run and refill the queue */
405 efx_nic_generate_fill_event(rx_queue
);
406 ++rx_queue
->slow_fill_count
;
409 static void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
410 struct efx_rx_buffer
*rx_buf
,
411 int len
, bool *leak_packet
)
413 struct efx_nic
*efx
= rx_queue
->efx
;
414 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
416 if (likely(len
<= max_len
))
419 /* The packet must be discarded, but this is only a fatal error
420 * if the caller indicated it was
422 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
424 if ((len
> rx_buf
->len
) && EFX_WORKAROUND_8071(efx
)) {
426 netif_err(efx
, rx_err
, efx
->net_dev
,
427 " RX queue %d seriously overlength "
428 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
429 efx_rx_queue_index(rx_queue
), len
, max_len
,
430 efx
->type
->rx_buffer_padding
);
431 /* If this buffer was skb-allocated, then the meta
432 * data at the end of the skb will be trashed. So
433 * we have no choice but to leak the fragment.
435 *leak_packet
= !(rx_buf
->flags
& EFX_RX_BUF_PAGE
);
436 efx_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
439 netif_err(efx
, rx_err
, efx
->net_dev
,
440 " RX queue %d overlength RX event "
442 efx_rx_queue_index(rx_queue
), len
, max_len
);
445 efx_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
448 /* Pass a received packet up through GRO. GRO can handle pages
449 * regardless of checksum state and skbs with a good checksum.
451 static void efx_rx_packet_gro(struct efx_channel
*channel
,
452 struct efx_rx_buffer
*rx_buf
,
455 struct napi_struct
*napi
= &channel
->napi_str
;
456 gro_result_t gro_result
;
458 if (rx_buf
->flags
& EFX_RX_BUF_PAGE
) {
459 struct efx_nic
*efx
= channel
->efx
;
460 struct page
*page
= rx_buf
->u
.page
;
463 rx_buf
->u
.page
= NULL
;
465 skb
= napi_get_frags(napi
);
471 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
472 skb
->rxhash
= efx_rx_buf_hash(eh
);
474 skb_fill_page_desc(skb
, 0, page
,
475 efx_rx_buf_offset(efx
, rx_buf
), rx_buf
->len
);
477 skb
->len
= rx_buf
->len
;
478 skb
->data_len
= rx_buf
->len
;
479 skb
->truesize
+= rx_buf
->len
;
480 skb
->ip_summed
= ((rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ?
481 CHECKSUM_UNNECESSARY
: CHECKSUM_NONE
);
483 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
485 gro_result
= napi_gro_frags(napi
);
487 struct sk_buff
*skb
= rx_buf
->u
.skb
;
489 EFX_BUG_ON_PARANOID(!(rx_buf
->flags
& EFX_RX_PKT_CSUMMED
));
490 rx_buf
->u
.skb
= NULL
;
491 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
493 gro_result
= napi_gro_receive(napi
, skb
);
496 if (gro_result
== GRO_NORMAL
) {
497 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
498 } else if (gro_result
!= GRO_DROP
) {
499 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_GRO
;
500 channel
->irq_mod_score
+= 2;
504 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
505 unsigned int len
, u16 flags
)
507 struct efx_nic
*efx
= rx_queue
->efx
;
508 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
509 struct efx_rx_buffer
*rx_buf
;
510 bool leak_packet
= false;
512 rx_buf
= efx_rx_buffer(rx_queue
, index
);
513 rx_buf
->flags
|= flags
;
515 /* This allows the refill path to post another buffer.
516 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
517 * isn't overwritten yet.
519 rx_queue
->removed_count
++;
521 /* Validate the length encoded in the event vs the descriptor pushed */
522 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
, &leak_packet
);
524 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
525 "RX queue %d received id %x at %llx+%x %s%s\n",
526 efx_rx_queue_index(rx_queue
), index
,
527 (unsigned long long)rx_buf
->dma_addr
, len
,
528 (rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
529 (rx_buf
->flags
& EFX_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
531 /* Discard packet, if instructed to do so */
532 if (unlikely(rx_buf
->flags
& EFX_RX_PKT_DISCARD
)) {
533 if (unlikely(leak_packet
))
534 channel
->n_skbuff_leaks
++;
536 efx_recycle_rx_buffer(channel
, rx_buf
);
538 /* Don't hold off the previous receive */
543 /* Release and/or sync DMA mapping - assumes all RX buffers
544 * consumed in-order per RX queue
546 efx_unmap_rx_buffer(efx
, rx_buf
, len
);
548 /* Prefetch nice and early so data will (hopefully) be in cache by
549 * the time we look at it.
551 prefetch(efx_rx_buf_eh(efx
, rx_buf
));
553 /* Pipeline receives so that we give time for packet headers to be
554 * prefetched into cache.
556 rx_buf
->len
= len
- efx
->type
->rx_buffer_hash_size
;
559 __efx_rx_packet(channel
, channel
->rx_pkt
);
560 channel
->rx_pkt
= rx_buf
;
563 static void efx_rx_deliver(struct efx_channel
*channel
,
564 struct efx_rx_buffer
*rx_buf
)
568 /* We now own the SKB */
570 rx_buf
->u
.skb
= NULL
;
572 /* Set the SKB flags */
573 skb_checksum_none_assert(skb
);
575 /* Record the rx_queue */
576 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
578 /* Pass the packet up */
579 if (channel
->type
->receive_skb
)
580 channel
->type
->receive_skb(channel
, skb
);
582 netif_receive_skb(skb
);
584 /* Update allocation strategy method */
585 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
588 /* Handle a received packet. Second half: Touches packet payload. */
589 void __efx_rx_packet(struct efx_channel
*channel
, struct efx_rx_buffer
*rx_buf
)
591 struct efx_nic
*efx
= channel
->efx
;
592 u8
*eh
= efx_rx_buf_eh(efx
, rx_buf
);
594 /* If we're in loopback test, then pass the packet directly to the
595 * loopback layer, and free the rx_buf here
597 if (unlikely(efx
->loopback_selftest
)) {
598 efx_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
599 efx_free_rx_buffer(efx
, rx_buf
);
603 if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
)) {
604 struct sk_buff
*skb
= rx_buf
->u
.skb
;
606 prefetch(skb_shinfo(skb
));
608 skb_reserve(skb
, efx
->type
->rx_buffer_hash_size
);
609 skb_put(skb
, rx_buf
->len
);
611 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
612 skb
->rxhash
= efx_rx_buf_hash(eh
);
614 /* Move past the ethernet header. rx_buf->data still points
615 * at the ethernet header */
616 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
618 skb_record_rx_queue(skb
, channel
->rx_queue
.core_index
);
621 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
622 rx_buf
->flags
&= ~EFX_RX_PKT_CSUMMED
;
624 if (likely(rx_buf
->flags
& (EFX_RX_BUF_PAGE
| EFX_RX_PKT_CSUMMED
)) &&
625 !channel
->type
->receive_skb
)
626 efx_rx_packet_gro(channel
, rx_buf
, eh
);
628 efx_rx_deliver(channel
, rx_buf
);
631 void efx_rx_strategy(struct efx_channel
*channel
)
633 enum efx_rx_alloc_method method
= rx_alloc_method
;
635 if (channel
->type
->receive_skb
) {
636 channel
->rx_alloc_push_pages
= false;
640 /* Only makes sense to use page based allocation if GRO is enabled */
641 if (!(channel
->efx
->net_dev
->features
& NETIF_F_GRO
)) {
642 method
= RX_ALLOC_METHOD_SKB
;
643 } else if (method
== RX_ALLOC_METHOD_AUTO
) {
644 /* Constrain the rx_alloc_level */
645 if (channel
->rx_alloc_level
< 0)
646 channel
->rx_alloc_level
= 0;
647 else if (channel
->rx_alloc_level
> RX_ALLOC_LEVEL_MAX
)
648 channel
->rx_alloc_level
= RX_ALLOC_LEVEL_MAX
;
650 /* Decide on the allocation method */
651 method
= ((channel
->rx_alloc_level
> RX_ALLOC_LEVEL_GRO
) ?
652 RX_ALLOC_METHOD_PAGE
: RX_ALLOC_METHOD_SKB
);
655 /* Push the option */
656 channel
->rx_alloc_push_pages
= (method
== RX_ALLOC_METHOD_PAGE
);
659 int efx_probe_rx_queue(struct efx_rx_queue
*rx_queue
)
661 struct efx_nic
*efx
= rx_queue
->efx
;
662 unsigned int entries
;
665 /* Create the smallest power-of-two aligned ring */
666 entries
= max(roundup_pow_of_two(efx
->rxq_entries
), EFX_MIN_DMAQ_SIZE
);
667 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
668 rx_queue
->ptr_mask
= entries
- 1;
670 netif_dbg(efx
, probe
, efx
->net_dev
,
671 "creating RX queue %d size %#x mask %#x\n",
672 efx_rx_queue_index(rx_queue
), efx
->rxq_entries
,
675 /* Allocate RX buffers */
676 rx_queue
->buffer
= kcalloc(entries
, sizeof(*rx_queue
->buffer
),
678 if (!rx_queue
->buffer
)
681 rc
= efx_nic_probe_rx(rx_queue
);
683 kfree(rx_queue
->buffer
);
684 rx_queue
->buffer
= NULL
;
689 void efx_init_rx_queue(struct efx_rx_queue
*rx_queue
)
691 struct efx_nic
*efx
= rx_queue
->efx
;
692 unsigned int max_fill
, trigger
, max_trigger
;
694 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
695 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue
));
697 /* Initialise ptr fields */
698 rx_queue
->added_count
= 0;
699 rx_queue
->notified_count
= 0;
700 rx_queue
->removed_count
= 0;
701 rx_queue
->min_fill
= -1U;
703 /* Initialise limit fields */
704 max_fill
= efx
->rxq_entries
- EFX_RXD_HEAD_ROOM
;
705 max_trigger
= max_fill
- EFX_RX_BATCH
;
706 if (rx_refill_threshold
!= 0) {
707 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
708 if (trigger
> max_trigger
)
709 trigger
= max_trigger
;
711 trigger
= max_trigger
;
714 rx_queue
->max_fill
= max_fill
;
715 rx_queue
->fast_fill_trigger
= trigger
;
717 /* Set up RX descriptor ring */
718 rx_queue
->enabled
= true;
719 efx_nic_init_rx(rx_queue
);
722 void efx_fini_rx_queue(struct efx_rx_queue
*rx_queue
)
725 struct efx_rx_buffer
*rx_buf
;
727 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
728 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue
));
730 /* A flush failure might have left rx_queue->enabled */
731 rx_queue
->enabled
= false;
733 del_timer_sync(&rx_queue
->slow_fill
);
734 efx_nic_fini_rx(rx_queue
);
736 /* Release RX buffers NB start at index 0 not current HW ptr */
737 if (rx_queue
->buffer
) {
738 for (i
= 0; i
<= rx_queue
->ptr_mask
; i
++) {
739 rx_buf
= efx_rx_buffer(rx_queue
, i
);
740 efx_fini_rx_buffer(rx_queue
, rx_buf
);
745 void efx_remove_rx_queue(struct efx_rx_queue
*rx_queue
)
747 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
748 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue
));
750 efx_nic_remove_rx(rx_queue
);
752 kfree(rx_queue
->buffer
);
753 rx_queue
->buffer
= NULL
;
757 module_param(rx_alloc_method
, int, 0644);
758 MODULE_PARM_DESC(rx_alloc_method
, "Allocation method used for RX buffers");
760 module_param(rx_refill_threshold
, uint
, 0444);
761 MODULE_PARM_DESC(rx_refill_threshold
,
762 "RX descriptor ring refill threshold (%)");