]>
Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
0a6f40c6 | 4 | * Copyright 2005-2011 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/socket.h> | |
12 | #include <linux/in.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
8ceee660 BH |
14 | #include <linux/ip.h> |
15 | #include <linux/tcp.h> | |
16 | #include <linux/udp.h> | |
70c71606 | 17 | #include <linux/prefetch.h> |
8ceee660 BH |
18 | #include <net/ip.h> |
19 | #include <net/checksum.h> | |
20 | #include "net_driver.h" | |
8ceee660 | 21 | #include "efx.h" |
744093c9 | 22 | #include "nic.h" |
3273c2e8 | 23 | #include "selftest.h" |
8ceee660 BH |
24 | #include "workarounds.h" |
25 | ||
26 | /* Number of RX descriptors pushed at once. */ | |
27 | #define EFX_RX_BATCH 8 | |
28 | ||
62b330ba SH |
29 | /* Maximum size of a buffer sharing a page */ |
30 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) | |
31 | ||
8ceee660 BH |
32 | /* Size of buffer allocated for skb header area. */ |
33 | #define EFX_SKB_HEADERS 64u | |
34 | ||
35 | /* | |
36 | * rx_alloc_method - RX buffer allocation method | |
37 | * | |
38 | * This driver supports two methods for allocating and using RX buffers: | |
39 | * each RX buffer may be backed by an skb or by an order-n page. | |
40 | * | |
4afb7527 | 41 | * When GRO is in use then the second method has a lower overhead, |
8ceee660 BH |
42 | * since we don't have to allocate then free skbs on reassembled frames. |
43 | * | |
44 | * Values: | |
45 | * - RX_ALLOC_METHOD_AUTO = 0 | |
46 | * - RX_ALLOC_METHOD_SKB = 1 | |
47 | * - RX_ALLOC_METHOD_PAGE = 2 | |
48 | * | |
49 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | |
50 | * controlled by the parameters below. | |
51 | * | |
52 | * - Since pushing and popping descriptors are separated by the rx_queue | |
53 | * size, so the watermarks should be ~rxd_size. | |
4afb7527 | 54 | * - The performance win by using page-based allocation for GRO is less |
55 | * than the performance hit of using page-based allocation of non-GRO, | |
8ceee660 BH |
56 | * so the watermarks should reflect this. |
57 | * | |
58 | * Per channel we maintain a single variable, updated by each channel: | |
59 | * | |
4afb7527 | 60 | * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : |
8ceee660 BH |
61 | * RX_ALLOC_FACTOR_SKB) |
62 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | |
63 | * limits the hysteresis), and update the allocation strategy: | |
64 | * | |
4afb7527 | 65 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? |
8ceee660 BH |
66 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) |
67 | */ | |
c3c63365 | 68 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
8ceee660 | 69 | |
4afb7527 | 70 | #define RX_ALLOC_LEVEL_GRO 0x2000 |
8ceee660 | 71 | #define RX_ALLOC_LEVEL_MAX 0x3000 |
4afb7527 | 72 | #define RX_ALLOC_FACTOR_GRO 1 |
8ceee660 BH |
73 | #define RX_ALLOC_FACTOR_SKB (-2) |
74 | ||
75 | /* This is the percentage fill level below which new RX descriptors | |
76 | * will be added to the RX descriptor ring. | |
77 | */ | |
78 | static unsigned int rx_refill_threshold = 90; | |
79 | ||
80 | /* This is the percentage fill level to which an RX queue will be refilled | |
81 | * when the "RX refill threshold" is reached. | |
82 | */ | |
83 | static unsigned int rx_refill_limit = 95; | |
84 | ||
85 | /* | |
86 | * RX maximum head room required. | |
87 | * | |
88 | * This must be at least 1 to prevent overflow and at least 2 to allow | |
62b330ba | 89 | * pipelined receives. |
8ceee660 | 90 | */ |
62b330ba | 91 | #define EFX_RXD_HEAD_ROOM 2 |
8ceee660 | 92 | |
a526f140 SH |
93 | /* Offset of ethernet header within page */ |
94 | static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, | |
95 | struct efx_rx_buffer *buf) | |
55668611 BH |
96 | { |
97 | /* Offset is always within one page, so we don't need to consider | |
98 | * the page order. | |
99 | */ | |
a526f140 SH |
100 | return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) + |
101 | efx->type->rx_buffer_hash_size); | |
55668611 BH |
102 | } |
103 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |
104 | { | |
105 | return PAGE_SIZE << efx->rx_buffer_order; | |
106 | } | |
8ceee660 | 107 | |
a526f140 | 108 | static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) |
39c9cf07 | 109 | { |
a526f140 SH |
110 | if (buf->is_page) |
111 | return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); | |
112 | else | |
113 | return ((u8 *)buf->u.skb->data + | |
114 | efx->type->rx_buffer_hash_size); | |
115 | } | |
116 | ||
117 | static inline u32 efx_rx_buf_hash(const u8 *eh) | |
118 | { | |
119 | /* The ethernet header is always directly after any hash. */ | |
39c9cf07 | 120 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 |
a526f140 | 121 | return __le32_to_cpup((const __le32 *)(eh - 4)); |
39c9cf07 | 122 | #else |
a526f140 | 123 | const u8 *data = eh - 4; |
39c9cf07 BH |
124 | return ((u32)data[0] | |
125 | (u32)data[1] << 8 | | |
126 | (u32)data[2] << 16 | | |
127 | (u32)data[3] << 24); | |
128 | #endif | |
129 | } | |
130 | ||
8ceee660 | 131 | /** |
f7d6f379 | 132 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers |
8ceee660 BH |
133 | * |
134 | * @rx_queue: Efx RX queue | |
8ceee660 | 135 | * |
f7d6f379 SH |
136 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a |
137 | * struct efx_rx_buffer for each one. Return a negative error code or 0 | |
138 | * on success. May fail having only inserted fewer than EFX_RX_BATCH | |
139 | * buffers. | |
8ceee660 | 140 | */ |
f7d6f379 | 141 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
142 | { |
143 | struct efx_nic *efx = rx_queue->efx; | |
144 | struct net_device *net_dev = efx->net_dev; | |
f7d6f379 | 145 | struct efx_rx_buffer *rx_buf; |
8ba5366a | 146 | struct sk_buff *skb; |
8ceee660 | 147 | int skb_len = efx->rx_buffer_len; |
f7d6f379 | 148 | unsigned index, count; |
8ceee660 | 149 | |
f7d6f379 | 150 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
ecc910f5 | 151 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 152 | rx_buf = efx_rx_buffer(rx_queue, index); |
8ceee660 | 153 | |
8ba5366a SH |
154 | rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); |
155 | if (unlikely(!skb)) | |
f7d6f379 | 156 | return -ENOMEM; |
8ceee660 | 157 | |
f7d6f379 | 158 | /* Adjust the SKB for padding and checksum */ |
8ba5366a | 159 | skb_reserve(skb, NET_IP_ALIGN); |
f7d6f379 | 160 | rx_buf->len = skb_len - NET_IP_ALIGN; |
8ba5366a SH |
161 | rx_buf->is_page = false; |
162 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
f7d6f379 SH |
163 | |
164 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | |
a526f140 | 165 | skb->data, rx_buf->len, |
f7d6f379 SH |
166 | PCI_DMA_FROMDEVICE); |
167 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | |
168 | rx_buf->dma_addr))) { | |
8ba5366a SH |
169 | dev_kfree_skb_any(skb); |
170 | rx_buf->u.skb = NULL; | |
f7d6f379 SH |
171 | return -EIO; |
172 | } | |
8ceee660 | 173 | |
f7d6f379 SH |
174 | ++rx_queue->added_count; |
175 | ++rx_queue->alloc_skb_count; | |
8ceee660 BH |
176 | } |
177 | ||
178 | return 0; | |
179 | } | |
180 | ||
181 | /** | |
f7d6f379 | 182 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers |
8ceee660 BH |
183 | * |
184 | * @rx_queue: Efx RX queue | |
8ceee660 | 185 | * |
f7d6f379 SH |
186 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
187 | * and populates struct efx_rx_buffers for each one. Return a negative error | |
188 | * code or 0 on success. If a single page can be split between two buffers, | |
189 | * then the page will either be inserted fully, or not at at all. | |
8ceee660 | 190 | */ |
f7d6f379 | 191 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
192 | { |
193 | struct efx_nic *efx = rx_queue->efx; | |
f7d6f379 SH |
194 | struct efx_rx_buffer *rx_buf; |
195 | struct page *page; | |
62b330ba SH |
196 | void *page_addr; |
197 | struct efx_rx_page_state *state; | |
f7d6f379 SH |
198 | dma_addr_t dma_addr; |
199 | unsigned index, count; | |
200 | ||
201 | /* We can split a page between two buffers */ | |
202 | BUILD_BUG_ON(EFX_RX_BATCH & 1); | |
203 | ||
204 | for (count = 0; count < EFX_RX_BATCH; ++count) { | |
205 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | |
206 | efx->rx_buffer_order); | |
207 | if (unlikely(page == NULL)) | |
8ceee660 | 208 | return -ENOMEM; |
f7d6f379 SH |
209 | dma_addr = pci_map_page(efx->pci_dev, page, 0, |
210 | efx_rx_buf_size(efx), | |
8ceee660 | 211 | PCI_DMA_FROMDEVICE); |
8d8bb39b | 212 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
f7d6f379 | 213 | __free_pages(page, efx->rx_buffer_order); |
8ceee660 BH |
214 | return -EIO; |
215 | } | |
62b330ba SH |
216 | page_addr = page_address(page); |
217 | state = page_addr; | |
218 | state->refcnt = 0; | |
219 | state->dma_addr = dma_addr; | |
220 | ||
221 | page_addr += sizeof(struct efx_rx_page_state); | |
222 | dma_addr += sizeof(struct efx_rx_page_state); | |
f7d6f379 SH |
223 | |
224 | split: | |
ecc910f5 | 225 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 226 | rx_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 227 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
8ba5366a | 228 | rx_buf->u.page = page; |
f7d6f379 | 229 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
8ba5366a | 230 | rx_buf->is_page = true; |
f7d6f379 SH |
231 | ++rx_queue->added_count; |
232 | ++rx_queue->alloc_page_count; | |
62b330ba | 233 | ++state->refcnt; |
f7d6f379 | 234 | |
62b330ba | 235 | if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { |
f7d6f379 SH |
236 | /* Use the second half of the page */ |
237 | get_page(page); | |
238 | dma_addr += (PAGE_SIZE >> 1); | |
239 | page_addr += (PAGE_SIZE >> 1); | |
240 | ++count; | |
241 | goto split; | |
8ceee660 BH |
242 | } |
243 | } | |
244 | ||
8ceee660 BH |
245 | return 0; |
246 | } | |
247 | ||
4d566063 BH |
248 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
249 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 250 | { |
8ba5366a | 251 | if (rx_buf->is_page && rx_buf->u.page) { |
62b330ba SH |
252 | struct efx_rx_page_state *state; |
253 | ||
8ba5366a | 254 | state = page_address(rx_buf->u.page); |
62b330ba | 255 | if (--state->refcnt == 0) { |
f7d6f379 | 256 | pci_unmap_page(efx->pci_dev, |
62b330ba | 257 | state->dma_addr, |
55668611 BH |
258 | efx_rx_buf_size(efx), |
259 | PCI_DMA_FROMDEVICE); | |
8ceee660 | 260 | } |
8ba5366a | 261 | } else if (!rx_buf->is_page && rx_buf->u.skb) { |
8ceee660 BH |
262 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, |
263 | rx_buf->len, PCI_DMA_FROMDEVICE); | |
264 | } | |
265 | } | |
266 | ||
4d566063 BH |
267 | static void efx_free_rx_buffer(struct efx_nic *efx, |
268 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 269 | { |
8ba5366a SH |
270 | if (rx_buf->is_page && rx_buf->u.page) { |
271 | __free_pages(rx_buf->u.page, efx->rx_buffer_order); | |
272 | rx_buf->u.page = NULL; | |
273 | } else if (!rx_buf->is_page && rx_buf->u.skb) { | |
274 | dev_kfree_skb_any(rx_buf->u.skb); | |
275 | rx_buf->u.skb = NULL; | |
8ceee660 BH |
276 | } |
277 | } | |
278 | ||
4d566063 BH |
279 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
280 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
281 | { |
282 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | |
283 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | |
284 | } | |
285 | ||
24455800 SH |
286 | /* Attempt to resurrect the other receive buffer that used to share this page, |
287 | * which had previously been passed up to the kernel and freed. */ | |
288 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |
289 | struct efx_rx_buffer *rx_buf) | |
290 | { | |
8ba5366a | 291 | struct efx_rx_page_state *state = page_address(rx_buf->u.page); |
24455800 | 292 | struct efx_rx_buffer *new_buf; |
62b330ba SH |
293 | unsigned fill_level, index; |
294 | ||
295 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | |
296 | * we'd like to insert an additional descriptor whilst leaving | |
297 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | |
298 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | |
ecc910f5 | 299 | if (unlikely(fill_level > rx_queue->max_fill)) { |
62b330ba SH |
300 | /* We could place "state" on a list, and drain the list in |
301 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | |
302 | return; | |
303 | } | |
24455800 | 304 | |
62b330ba | 305 | ++state->refcnt; |
8ba5366a | 306 | get_page(rx_buf->u.page); |
24455800 | 307 | |
ecc910f5 | 308 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 | 309 | new_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 310 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
8ba5366a | 311 | new_buf->u.page = rx_buf->u.page; |
24455800 | 312 | new_buf->len = rx_buf->len; |
8ba5366a | 313 | new_buf->is_page = true; |
24455800 SH |
314 | ++rx_queue->added_count; |
315 | } | |
316 | ||
317 | /* Recycle the given rx buffer directly back into the rx_queue. There is | |
318 | * always room to add this buffer, because we've just popped a buffer. */ | |
319 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | |
320 | struct efx_rx_buffer *rx_buf) | |
321 | { | |
322 | struct efx_nic *efx = channel->efx; | |
f7d12cdc | 323 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
24455800 SH |
324 | struct efx_rx_buffer *new_buf; |
325 | unsigned index; | |
326 | ||
8ba5366a SH |
327 | if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && |
328 | page_count(rx_buf->u.page) == 1) | |
62b330ba | 329 | efx_resurrect_rx_buffer(rx_queue, rx_buf); |
24455800 | 330 | |
ecc910f5 | 331 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 SH |
332 | new_buf = efx_rx_buffer(rx_queue, index); |
333 | ||
334 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | |
8ba5366a | 335 | rx_buf->u.page = NULL; |
24455800 SH |
336 | ++rx_queue->added_count; |
337 | } | |
338 | ||
8ceee660 BH |
339 | /** |
340 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
341 | * @rx_queue: RX descriptor queue | |
8ceee660 BH |
342 | * This will aim to fill the RX descriptor queue up to |
343 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic | |
90d683af SH |
344 | * memory to do so, a slow fill will be scheduled. |
345 | * | |
346 | * The caller must provide serialisation (none is used here). In practise, | |
347 | * this means this function must run from the NAPI handler, or be called | |
348 | * when NAPI is disabled. | |
8ceee660 | 349 | */ |
90d683af | 350 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
8ceee660 | 351 | { |
ba1e8a35 | 352 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
f7d6f379 SH |
353 | unsigned fill_level; |
354 | int space, rc = 0; | |
8ceee660 | 355 | |
90d683af | 356 | /* Calculate current fill level, and exit if we don't need to fill */ |
8ceee660 | 357 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
ecc910f5 | 358 | EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); |
8ceee660 | 359 | if (fill_level >= rx_queue->fast_fill_trigger) |
24455800 | 360 | goto out; |
8ceee660 BH |
361 | |
362 | /* Record minimum fill level */ | |
b3475645 | 363 | if (unlikely(fill_level < rx_queue->min_fill)) { |
8ceee660 BH |
364 | if (fill_level) |
365 | rx_queue->min_fill = fill_level; | |
b3475645 | 366 | } |
8ceee660 | 367 | |
8ceee660 BH |
368 | space = rx_queue->fast_fill_limit - fill_level; |
369 | if (space < EFX_RX_BATCH) | |
24455800 | 370 | goto out; |
8ceee660 | 371 | |
62776d03 BH |
372 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
373 | "RX queue %d fast-filling descriptor ring from" | |
374 | " level %d to level %d using %s allocation\n", | |
ba1e8a35 BH |
375 | efx_rx_queue_index(rx_queue), fill_level, |
376 | rx_queue->fast_fill_limit, | |
62776d03 | 377 | channel->rx_alloc_push_pages ? "page" : "skb"); |
8ceee660 BH |
378 | |
379 | do { | |
f7d6f379 SH |
380 | if (channel->rx_alloc_push_pages) |
381 | rc = efx_init_rx_buffers_page(rx_queue); | |
382 | else | |
383 | rc = efx_init_rx_buffers_skb(rx_queue); | |
384 | if (unlikely(rc)) { | |
385 | /* Ensure that we don't leave the rx queue empty */ | |
386 | if (rx_queue->added_count == rx_queue->removed_count) | |
387 | efx_schedule_slow_fill(rx_queue); | |
388 | goto out; | |
8ceee660 BH |
389 | } |
390 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | |
391 | ||
62776d03 BH |
392 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
393 | "RX queue %d fast-filled descriptor ring " | |
ba1e8a35 | 394 | "to level %d\n", efx_rx_queue_index(rx_queue), |
62776d03 | 395 | rx_queue->added_count - rx_queue->removed_count); |
8ceee660 BH |
396 | |
397 | out: | |
24455800 SH |
398 | if (rx_queue->notified_count != rx_queue->added_count) |
399 | efx_nic_notify_rx_desc(rx_queue); | |
8ceee660 BH |
400 | } |
401 | ||
90d683af | 402 | void efx_rx_slow_fill(unsigned long context) |
8ceee660 | 403 | { |
90d683af | 404 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
ba1e8a35 | 405 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
8ceee660 | 406 | |
90d683af SH |
407 | /* Post an event to cause NAPI to run and refill the queue */ |
408 | efx_nic_generate_fill_event(channel); | |
8ceee660 | 409 | ++rx_queue->slow_fill_count; |
8ceee660 BH |
410 | } |
411 | ||
4d566063 BH |
412 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
413 | struct efx_rx_buffer *rx_buf, | |
414 | int len, bool *discard, | |
415 | bool *leak_packet) | |
8ceee660 BH |
416 | { |
417 | struct efx_nic *efx = rx_queue->efx; | |
418 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | |
419 | ||
420 | if (likely(len <= max_len)) | |
421 | return; | |
422 | ||
423 | /* The packet must be discarded, but this is only a fatal error | |
424 | * if the caller indicated it was | |
425 | */ | |
dc8cfa55 | 426 | *discard = true; |
8ceee660 BH |
427 | |
428 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | |
62776d03 BH |
429 | if (net_ratelimit()) |
430 | netif_err(efx, rx_err, efx->net_dev, | |
431 | " RX queue %d seriously overlength " | |
432 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | |
ba1e8a35 | 433 | efx_rx_queue_index(rx_queue), len, max_len, |
62776d03 | 434 | efx->type->rx_buffer_padding); |
8ceee660 BH |
435 | /* If this buffer was skb-allocated, then the meta |
436 | * data at the end of the skb will be trashed. So | |
437 | * we have no choice but to leak the fragment. | |
438 | */ | |
8ba5366a | 439 | *leak_packet = !rx_buf->is_page; |
8ceee660 BH |
440 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
441 | } else { | |
62776d03 BH |
442 | if (net_ratelimit()) |
443 | netif_err(efx, rx_err, efx->net_dev, | |
444 | " RX queue %d overlength RX event " | |
445 | "(0x%x > 0x%x)\n", | |
ba1e8a35 | 446 | efx_rx_queue_index(rx_queue), len, max_len); |
8ceee660 BH |
447 | } |
448 | ||
ba1e8a35 | 449 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
8ceee660 BH |
450 | } |
451 | ||
4afb7527 | 452 | /* Pass a received packet up through the generic GRO stack |
8ceee660 BH |
453 | * |
454 | * Handles driverlink veto, and passes the fragment up via | |
4afb7527 | 455 | * the appropriate GRO method |
8ceee660 | 456 | */ |
4afb7527 | 457 | static void efx_rx_packet_gro(struct efx_channel *channel, |
345056af | 458 | struct efx_rx_buffer *rx_buf, |
a526f140 | 459 | const u8 *eh, bool checksummed) |
8ceee660 | 460 | { |
da3bc071 | 461 | struct napi_struct *napi = &channel->napi_str; |
18e1d2be | 462 | gro_result_t gro_result; |
8ceee660 | 463 | |
4afb7527 | 464 | /* Pass the skb/page into the GRO engine */ |
8ba5366a | 465 | if (rx_buf->is_page) { |
39c9cf07 | 466 | struct efx_nic *efx = channel->efx; |
8ba5366a | 467 | struct page *page = rx_buf->u.page; |
1241e951 | 468 | struct sk_buff *skb; |
8ceee660 | 469 | |
8ba5366a | 470 | rx_buf->u.page = NULL; |
1241e951 BH |
471 | |
472 | skb = napi_get_frags(napi); | |
76620aaf | 473 | if (!skb) { |
1241e951 BH |
474 | put_page(page); |
475 | return; | |
76620aaf HX |
476 | } |
477 | ||
39c9cf07 | 478 | if (efx->net_dev->features & NETIF_F_RXHASH) |
a526f140 | 479 | skb->rxhash = efx_rx_buf_hash(eh); |
39c9cf07 | 480 | |
1241e951 | 481 | skb_shinfo(skb)->frags[0].page = page; |
76620aaf | 482 | skb_shinfo(skb)->frags[0].page_offset = |
a526f140 | 483 | efx_rx_buf_offset(efx, rx_buf); |
76620aaf HX |
484 | skb_shinfo(skb)->frags[0].size = rx_buf->len; |
485 | skb_shinfo(skb)->nr_frags = 1; | |
486 | ||
487 | skb->len = rx_buf->len; | |
488 | skb->data_len = rx_buf->len; | |
489 | skb->truesize += rx_buf->len; | |
345056af BH |
490 | skb->ip_summed = |
491 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; | |
8ceee660 | 492 | |
3eadb7b0 BH |
493 | skb_record_rx_queue(skb, channel->channel); |
494 | ||
18e1d2be | 495 | gro_result = napi_gro_frags(napi); |
8ceee660 | 496 | } else { |
8ba5366a | 497 | struct sk_buff *skb = rx_buf->u.skb; |
8ceee660 | 498 | |
1241e951 | 499 | EFX_BUG_ON_PARANOID(!checksummed); |
8ba5366a | 500 | rx_buf->u.skb = NULL; |
1241e951 BH |
501 | |
502 | gro_result = napi_gro_receive(napi, skb); | |
8ceee660 | 503 | } |
18e1d2be BH |
504 | |
505 | if (gro_result == GRO_NORMAL) { | |
506 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
507 | } else if (gro_result != GRO_DROP) { | |
4afb7527 | 508 | channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; |
18e1d2be BH |
509 | channel->irq_mod_score += 2; |
510 | } | |
8ceee660 BH |
511 | } |
512 | ||
8ceee660 | 513 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
dc8cfa55 | 514 | unsigned int len, bool checksummed, bool discard) |
8ceee660 BH |
515 | { |
516 | struct efx_nic *efx = rx_queue->efx; | |
ba1e8a35 | 517 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
8ceee660 | 518 | struct efx_rx_buffer *rx_buf; |
dc8cfa55 | 519 | bool leak_packet = false; |
8ceee660 BH |
520 | |
521 | rx_buf = efx_rx_buffer(rx_queue, index); | |
8ceee660 BH |
522 | |
523 | /* This allows the refill path to post another buffer. | |
524 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | |
525 | * isn't overwritten yet. | |
526 | */ | |
527 | rx_queue->removed_count++; | |
528 | ||
529 | /* Validate the length encoded in the event vs the descriptor pushed */ | |
530 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | |
531 | &discard, &leak_packet); | |
532 | ||
62776d03 BH |
533 | netif_vdbg(efx, rx_status, efx->net_dev, |
534 | "RX queue %d received id %x at %llx+%x %s%s\n", | |
ba1e8a35 | 535 | efx_rx_queue_index(rx_queue), index, |
62776d03 BH |
536 | (unsigned long long)rx_buf->dma_addr, len, |
537 | (checksummed ? " [SUMMED]" : ""), | |
538 | (discard ? " [DISCARD]" : "")); | |
8ceee660 BH |
539 | |
540 | /* Discard packet, if instructed to do so */ | |
541 | if (unlikely(discard)) { | |
542 | if (unlikely(leak_packet)) | |
24455800 | 543 | channel->n_skbuff_leaks++; |
8ceee660 | 544 | else |
24455800 SH |
545 | efx_recycle_rx_buffer(channel, rx_buf); |
546 | ||
547 | /* Don't hold off the previous receive */ | |
548 | rx_buf = NULL; | |
549 | goto out; | |
8ceee660 BH |
550 | } |
551 | ||
552 | /* Release card resources - assumes all RX buffers consumed in-order | |
553 | * per RX queue | |
554 | */ | |
555 | efx_unmap_rx_buffer(efx, rx_buf); | |
556 | ||
557 | /* Prefetch nice and early so data will (hopefully) be in cache by | |
558 | * the time we look at it. | |
559 | */ | |
a526f140 | 560 | prefetch(efx_rx_buf_eh(efx, rx_buf)); |
8ceee660 BH |
561 | |
562 | /* Pipeline receives so that we give time for packet headers to be | |
563 | * prefetched into cache. | |
564 | */ | |
a526f140 | 565 | rx_buf->len = len - efx->type->rx_buffer_hash_size; |
24455800 | 566 | out: |
ba1e8a35 BH |
567 | if (channel->rx_pkt) |
568 | __efx_rx_packet(channel, | |
569 | channel->rx_pkt, channel->rx_pkt_csummed); | |
570 | channel->rx_pkt = rx_buf; | |
571 | channel->rx_pkt_csummed = checksummed; | |
8ceee660 BH |
572 | } |
573 | ||
574 | /* Handle a received packet. Second half: Touches packet payload. */ | |
575 | void __efx_rx_packet(struct efx_channel *channel, | |
dc8cfa55 | 576 | struct efx_rx_buffer *rx_buf, bool checksummed) |
8ceee660 BH |
577 | { |
578 | struct efx_nic *efx = channel->efx; | |
579 | struct sk_buff *skb; | |
a526f140 | 580 | u8 *eh = efx_rx_buf_eh(efx, rx_buf); |
604f6049 | 581 | |
3273c2e8 BH |
582 | /* If we're in loopback test, then pass the packet directly to the |
583 | * loopback layer, and free the rx_buf here | |
584 | */ | |
585 | if (unlikely(efx->loopback_selftest)) { | |
a526f140 | 586 | efx_loopback_rx_packet(efx, eh, rx_buf->len); |
3273c2e8 | 587 | efx_free_rx_buffer(efx, rx_buf); |
d96d7dc9 | 588 | return; |
3273c2e8 BH |
589 | } |
590 | ||
8ba5366a SH |
591 | if (!rx_buf->is_page) { |
592 | skb = rx_buf->u.skb; | |
593 | ||
594 | prefetch(skb_shinfo(skb)); | |
8ceee660 | 595 | |
8ba5366a SH |
596 | skb_reserve(skb, efx->type->rx_buffer_hash_size); |
597 | skb_put(skb, rx_buf->len); | |
8ceee660 | 598 | |
39c9cf07 | 599 | if (efx->net_dev->features & NETIF_F_RXHASH) |
a526f140 | 600 | skb->rxhash = efx_rx_buf_hash(eh); |
39c9cf07 | 601 | |
8ceee660 BH |
602 | /* Move past the ethernet header. rx_buf->data still points |
603 | * at the ethernet header */ | |
8ba5366a | 604 | skb->protocol = eth_type_trans(skb, efx->net_dev); |
3eadb7b0 | 605 | |
8ba5366a | 606 | skb_record_rx_queue(skb, channel->channel); |
8ceee660 BH |
607 | } |
608 | ||
abfe9039 | 609 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) |
ab3cf6d0 BH |
610 | checksummed = false; |
611 | ||
8ba5366a | 612 | if (likely(checksummed || rx_buf->is_page)) { |
a526f140 | 613 | efx_rx_packet_gro(channel, rx_buf, eh, checksummed); |
d96d7dc9 | 614 | return; |
8ceee660 BH |
615 | } |
616 | ||
da3bc071 | 617 | /* We now own the SKB */ |
8ba5366a SH |
618 | skb = rx_buf->u.skb; |
619 | rx_buf->u.skb = NULL; | |
8ceee660 BH |
620 | |
621 | /* Set the SKB flags */ | |
bc8acf2c | 622 | skb_checksum_none_assert(skb); |
8ceee660 BH |
623 | |
624 | /* Pass the packet up */ | |
625 | netif_receive_skb(skb); | |
626 | ||
627 | /* Update allocation strategy method */ | |
628 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
8ceee660 BH |
629 | } |
630 | ||
631 | void efx_rx_strategy(struct efx_channel *channel) | |
632 | { | |
633 | enum efx_rx_alloc_method method = rx_alloc_method; | |
634 | ||
4afb7527 | 635 | /* Only makes sense to use page based allocation if GRO is enabled */ |
da3bc071 | 636 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { |
8ceee660 BH |
637 | method = RX_ALLOC_METHOD_SKB; |
638 | } else if (method == RX_ALLOC_METHOD_AUTO) { | |
639 | /* Constrain the rx_alloc_level */ | |
640 | if (channel->rx_alloc_level < 0) | |
641 | channel->rx_alloc_level = 0; | |
642 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | |
643 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | |
644 | ||
645 | /* Decide on the allocation method */ | |
4afb7527 | 646 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? |
8ceee660 BH |
647 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); |
648 | } | |
649 | ||
650 | /* Push the option */ | |
651 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | |
652 | } | |
653 | ||
654 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |
655 | { | |
656 | struct efx_nic *efx = rx_queue->efx; | |
ecc910f5 | 657 | unsigned int entries; |
8ceee660 BH |
658 | int rc; |
659 | ||
ecc910f5 SH |
660 | /* Create the smallest power-of-two aligned ring */ |
661 | entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); | |
662 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
663 | rx_queue->ptr_mask = entries - 1; | |
664 | ||
62776d03 | 665 | netif_dbg(efx, probe, efx->net_dev, |
ecc910f5 SH |
666 | "creating RX queue %d size %#x mask %#x\n", |
667 | efx_rx_queue_index(rx_queue), efx->rxq_entries, | |
668 | rx_queue->ptr_mask); | |
8ceee660 BH |
669 | |
670 | /* Allocate RX buffers */ | |
ecc910f5 SH |
671 | rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer), |
672 | GFP_KERNEL); | |
8831da7b BH |
673 | if (!rx_queue->buffer) |
674 | return -ENOMEM; | |
8ceee660 | 675 | |
152b6a62 | 676 | rc = efx_nic_probe_rx(rx_queue); |
8831da7b BH |
677 | if (rc) { |
678 | kfree(rx_queue->buffer); | |
679 | rx_queue->buffer = NULL; | |
680 | } | |
8ceee660 BH |
681 | return rc; |
682 | } | |
683 | ||
bc3c90a2 | 684 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
8ceee660 | 685 | { |
ecc910f5 | 686 | struct efx_nic *efx = rx_queue->efx; |
8ceee660 BH |
687 | unsigned int max_fill, trigger, limit; |
688 | ||
62776d03 | 689 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 690 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 BH |
691 | |
692 | /* Initialise ptr fields */ | |
693 | rx_queue->added_count = 0; | |
694 | rx_queue->notified_count = 0; | |
695 | rx_queue->removed_count = 0; | |
696 | rx_queue->min_fill = -1U; | |
8ceee660 BH |
697 | |
698 | /* Initialise limit fields */ | |
ecc910f5 | 699 | max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; |
8ceee660 BH |
700 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
701 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | |
702 | ||
703 | rx_queue->max_fill = max_fill; | |
704 | rx_queue->fast_fill_trigger = trigger; | |
705 | rx_queue->fast_fill_limit = limit; | |
706 | ||
707 | /* Set up RX descriptor ring */ | |
152b6a62 | 708 | efx_nic_init_rx(rx_queue); |
8ceee660 BH |
709 | } |
710 | ||
711 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |
712 | { | |
713 | int i; | |
714 | struct efx_rx_buffer *rx_buf; | |
715 | ||
62776d03 | 716 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 717 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 718 | |
90d683af | 719 | del_timer_sync(&rx_queue->slow_fill); |
152b6a62 | 720 | efx_nic_fini_rx(rx_queue); |
8ceee660 BH |
721 | |
722 | /* Release RX buffers NB start at index 0 not current HW ptr */ | |
723 | if (rx_queue->buffer) { | |
ecc910f5 | 724 | for (i = 0; i <= rx_queue->ptr_mask; i++) { |
8ceee660 BH |
725 | rx_buf = efx_rx_buffer(rx_queue, i); |
726 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
727 | } | |
728 | } | |
8ceee660 BH |
729 | } |
730 | ||
731 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |
732 | { | |
62776d03 | 733 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 734 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 735 | |
152b6a62 | 736 | efx_nic_remove_rx(rx_queue); |
8ceee660 BH |
737 | |
738 | kfree(rx_queue->buffer); | |
739 | rx_queue->buffer = NULL; | |
8ceee660 BH |
740 | } |
741 | ||
8ceee660 BH |
742 | |
743 | module_param(rx_alloc_method, int, 0644); | |
744 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | |
745 | ||
746 | module_param(rx_refill_threshold, uint, 0444); | |
747 | MODULE_PARM_DESC(rx_refill_threshold, | |
748 | "RX descriptor ring fast/slow fill threshold (%)"); | |
749 |