]>
Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
4 | * Copyright 2005-2008 Solarflare Communications Inc. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/socket.h> | |
12 | #include <linux/in.h> | |
13 | #include <linux/ip.h> | |
14 | #include <linux/tcp.h> | |
15 | #include <linux/udp.h> | |
16 | #include <net/ip.h> | |
17 | #include <net/checksum.h> | |
18 | #include "net_driver.h" | |
19 | #include "rx.h" | |
20 | #include "efx.h" | |
21 | #include "falcon.h" | |
3273c2e8 | 22 | #include "selftest.h" |
8ceee660 BH |
23 | #include "workarounds.h" |
24 | ||
25 | /* Number of RX descriptors pushed at once. */ | |
26 | #define EFX_RX_BATCH 8 | |
27 | ||
28 | /* Size of buffer allocated for skb header area. */ | |
29 | #define EFX_SKB_HEADERS 64u | |
30 | ||
31 | /* | |
32 | * rx_alloc_method - RX buffer allocation method | |
33 | * | |
34 | * This driver supports two methods for allocating and using RX buffers: | |
35 | * each RX buffer may be backed by an skb or by an order-n page. | |
36 | * | |
37 | * When LRO is in use then the second method has a lower overhead, | |
38 | * since we don't have to allocate then free skbs on reassembled frames. | |
39 | * | |
40 | * Values: | |
41 | * - RX_ALLOC_METHOD_AUTO = 0 | |
42 | * - RX_ALLOC_METHOD_SKB = 1 | |
43 | * - RX_ALLOC_METHOD_PAGE = 2 | |
44 | * | |
45 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | |
46 | * controlled by the parameters below. | |
47 | * | |
48 | * - Since pushing and popping descriptors are separated by the rx_queue | |
49 | * size, so the watermarks should be ~rxd_size. | |
50 | * - The performance win by using page-based allocation for LRO is less | |
51 | * than the performance hit of using page-based allocation of non-LRO, | |
52 | * so the watermarks should reflect this. | |
53 | * | |
54 | * Per channel we maintain a single variable, updated by each channel: | |
55 | * | |
56 | * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : | |
57 | * RX_ALLOC_FACTOR_SKB) | |
58 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | |
59 | * limits the hysteresis), and update the allocation strategy: | |
60 | * | |
61 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? | |
62 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | |
63 | */ | |
64 | static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; | |
65 | ||
66 | #define RX_ALLOC_LEVEL_LRO 0x2000 | |
67 | #define RX_ALLOC_LEVEL_MAX 0x3000 | |
68 | #define RX_ALLOC_FACTOR_LRO 1 | |
69 | #define RX_ALLOC_FACTOR_SKB (-2) | |
70 | ||
71 | /* This is the percentage fill level below which new RX descriptors | |
72 | * will be added to the RX descriptor ring. | |
73 | */ | |
74 | static unsigned int rx_refill_threshold = 90; | |
75 | ||
76 | /* This is the percentage fill level to which an RX queue will be refilled | |
77 | * when the "RX refill threshold" is reached. | |
78 | */ | |
79 | static unsigned int rx_refill_limit = 95; | |
80 | ||
81 | /* | |
82 | * RX maximum head room required. | |
83 | * | |
84 | * This must be at least 1 to prevent overflow and at least 2 to allow | |
85 | * pipelined receives. | |
86 | */ | |
87 | #define EFX_RXD_HEAD_ROOM 2 | |
88 | ||
55668611 BH |
89 | static inline unsigned int efx_page_offset(void *p) |
90 | { | |
91 | return (__force unsigned int)p & (PAGE_SIZE - 1); | |
92 | } | |
93 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) | |
94 | { | |
95 | /* Offset is always within one page, so we don't need to consider | |
96 | * the page order. | |
97 | */ | |
98 | return efx_page_offset(buf->data); | |
99 | } | |
100 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |
101 | { | |
102 | return PAGE_SIZE << efx->rx_buffer_order; | |
103 | } | |
8ceee660 BH |
104 | |
105 | ||
106 | /************************************************************************** | |
107 | * | |
108 | * Linux generic LRO handling | |
109 | * | |
110 | ************************************************************************** | |
111 | */ | |
112 | ||
113 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | |
114 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | |
115 | { | |
116 | struct efx_channel *channel = (struct efx_channel *)priv; | |
117 | struct iphdr *iph; | |
118 | struct tcphdr *th; | |
119 | ||
120 | iph = (struct iphdr *)skb->data; | |
121 | if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP) | |
122 | goto fail; | |
123 | ||
124 | th = (struct tcphdr *)(skb->data + iph->ihl * 4); | |
125 | ||
126 | *tcpudp_hdr = th; | |
127 | *ip_hdr = iph; | |
128 | *hdr_flags = LRO_IPV4 | LRO_TCP; | |
129 | ||
130 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | |
131 | return 0; | |
132 | fail: | |
133 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
134 | return -1; | |
135 | } | |
136 | ||
137 | static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | |
138 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | |
139 | void *priv) | |
140 | { | |
141 | struct efx_channel *channel = (struct efx_channel *)priv; | |
142 | struct ethhdr *eh; | |
143 | struct iphdr *iph; | |
144 | ||
145 | /* We support EtherII and VLAN encapsulated IPv4 */ | |
146 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | |
147 | *mac_hdr = eh; | |
148 | ||
149 | if (eh->h_proto == htons(ETH_P_IP)) { | |
150 | iph = (struct iphdr *)(eh + 1); | |
151 | } else { | |
152 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh; | |
153 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | |
154 | goto fail; | |
155 | ||
156 | iph = (struct iphdr *)(veh + 1); | |
157 | } | |
158 | *ip_hdr = iph; | |
159 | ||
160 | /* We can only do LRO over TCP */ | |
161 | if (iph->protocol != IPPROTO_TCP) | |
162 | goto fail; | |
163 | ||
164 | *hdr_flags = LRO_IPV4 | LRO_TCP; | |
165 | *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4); | |
166 | ||
167 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | |
168 | return 0; | |
169 | fail: | |
170 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
171 | return -1; | |
172 | } | |
173 | ||
174 | int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx) | |
175 | { | |
176 | size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS; | |
177 | struct net_lro_desc *lro_arr; | |
178 | ||
179 | /* Allocate the LRO descriptors structure */ | |
180 | lro_arr = kzalloc(s, GFP_KERNEL); | |
181 | if (lro_arr == NULL) | |
182 | return -ENOMEM; | |
183 | ||
184 | lro_mgr->lro_arr = lro_arr; | |
185 | lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS; | |
186 | lro_mgr->max_aggr = EFX_MAX_LRO_AGGR; | |
187 | lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN; | |
188 | ||
189 | lro_mgr->get_skb_header = efx_lro_get_skb_hdr; | |
190 | lro_mgr->get_frag_header = efx_get_frag_hdr; | |
191 | lro_mgr->dev = efx->net_dev; | |
192 | ||
193 | lro_mgr->features = LRO_F_NAPI; | |
194 | ||
195 | /* We can pass packets up with the checksum intact */ | |
196 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | |
197 | ||
198 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
203 | void efx_lro_fini(struct net_lro_mgr *lro_mgr) | |
204 | { | |
205 | kfree(lro_mgr->lro_arr); | |
206 | lro_mgr->lro_arr = NULL; | |
207 | } | |
208 | ||
209 | /** | |
210 | * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation | |
211 | * | |
212 | * @rx_queue: Efx RX queue | |
213 | * @rx_buf: RX buffer structure to populate | |
214 | * | |
215 | * This allocates memory for a new receive buffer, maps it for DMA, | |
216 | * and populates a struct efx_rx_buffer with the relevant | |
217 | * information. Return a negative error code or 0 on success. | |
218 | */ | |
219 | static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | |
220 | struct efx_rx_buffer *rx_buf) | |
221 | { | |
222 | struct efx_nic *efx = rx_queue->efx; | |
223 | struct net_device *net_dev = efx->net_dev; | |
224 | int skb_len = efx->rx_buffer_len; | |
225 | ||
226 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); | |
227 | if (unlikely(!rx_buf->skb)) | |
228 | return -ENOMEM; | |
229 | ||
230 | /* Adjust the SKB for padding and checksum */ | |
231 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); | |
232 | rx_buf->len = skb_len - NET_IP_ALIGN; | |
233 | rx_buf->data = (char *)rx_buf->skb->data; | |
234 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | |
235 | ||
236 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | |
237 | rx_buf->data, rx_buf->len, | |
238 | PCI_DMA_FROMDEVICE); | |
239 | ||
240 | if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { | |
241 | dev_kfree_skb_any(rx_buf->skb); | |
242 | rx_buf->skb = NULL; | |
243 | return -EIO; | |
244 | } | |
245 | ||
246 | return 0; | |
247 | } | |
248 | ||
249 | /** | |
250 | * efx_init_rx_buffer_page - create new RX buffer using page-based allocation | |
251 | * | |
252 | * @rx_queue: Efx RX queue | |
253 | * @rx_buf: RX buffer structure to populate | |
254 | * | |
255 | * This allocates memory for a new receive buffer, maps it for DMA, | |
256 | * and populates a struct efx_rx_buffer with the relevant | |
257 | * information. Return a negative error code or 0 on success. | |
258 | */ | |
259 | static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |
260 | struct efx_rx_buffer *rx_buf) | |
261 | { | |
262 | struct efx_nic *efx = rx_queue->efx; | |
263 | int bytes, space, offset; | |
264 | ||
265 | bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | |
266 | ||
267 | /* If there is space left in the previously allocated page, | |
268 | * then use it. Otherwise allocate a new one */ | |
269 | rx_buf->page = rx_queue->buf_page; | |
270 | if (rx_buf->page == NULL) { | |
271 | dma_addr_t dma_addr; | |
272 | ||
273 | rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | |
274 | efx->rx_buffer_order); | |
275 | if (unlikely(rx_buf->page == NULL)) | |
276 | return -ENOMEM; | |
277 | ||
278 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | |
55668611 | 279 | 0, efx_rx_buf_size(efx), |
8ceee660 BH |
280 | PCI_DMA_FROMDEVICE); |
281 | ||
282 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | |
283 | __free_pages(rx_buf->page, efx->rx_buffer_order); | |
284 | rx_buf->page = NULL; | |
285 | return -EIO; | |
286 | } | |
287 | ||
288 | rx_queue->buf_page = rx_buf->page; | |
289 | rx_queue->buf_dma_addr = dma_addr; | |
290 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | |
291 | EFX_PAGE_IP_ALIGN); | |
292 | } | |
293 | ||
55668611 | 294 | offset = efx_page_offset(rx_queue->buf_data); |
8ceee660 BH |
295 | rx_buf->len = bytes; |
296 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | |
297 | rx_buf->data = rx_queue->buf_data; | |
298 | ||
299 | /* Try to pack multiple buffers per page */ | |
300 | if (efx->rx_buffer_order == 0) { | |
301 | /* The next buffer starts on the next 512 byte boundary */ | |
302 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | |
303 | offset += ((bytes + 0x1ff) & ~0x1ff); | |
304 | ||
55668611 | 305 | space = efx_rx_buf_size(efx) - offset; |
8ceee660 BH |
306 | if (space >= bytes) { |
307 | /* Refs dropped on kernel releasing each skb */ | |
308 | get_page(rx_queue->buf_page); | |
309 | goto out; | |
310 | } | |
311 | } | |
312 | ||
313 | /* This is the final RX buffer for this page, so mark it for | |
314 | * unmapping */ | |
315 | rx_queue->buf_page = NULL; | |
316 | rx_buf->unmap_addr = rx_queue->buf_dma_addr; | |
317 | ||
318 | out: | |
319 | return 0; | |
320 | } | |
321 | ||
322 | /* This allocates memory for a new receive buffer, maps it for DMA, | |
323 | * and populates a struct efx_rx_buffer with the relevant | |
324 | * information. | |
325 | */ | |
326 | static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, | |
327 | struct efx_rx_buffer *new_rx_buf) | |
328 | { | |
329 | int rc = 0; | |
330 | ||
331 | if (rx_queue->channel->rx_alloc_push_pages) { | |
332 | new_rx_buf->skb = NULL; | |
333 | rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf); | |
334 | rx_queue->alloc_page_count++; | |
335 | } else { | |
336 | new_rx_buf->page = NULL; | |
337 | rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf); | |
338 | rx_queue->alloc_skb_count++; | |
339 | } | |
340 | ||
341 | if (unlikely(rc < 0)) | |
342 | EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__, | |
343 | rx_queue->queue, rc); | |
344 | return rc; | |
345 | } | |
346 | ||
347 | static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |
348 | struct efx_rx_buffer *rx_buf) | |
349 | { | |
350 | if (rx_buf->page) { | |
351 | EFX_BUG_ON_PARANOID(rx_buf->skb); | |
352 | if (rx_buf->unmap_addr) { | |
353 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | |
55668611 BH |
354 | efx_rx_buf_size(efx), |
355 | PCI_DMA_FROMDEVICE); | |
8ceee660 BH |
356 | rx_buf->unmap_addr = 0; |
357 | } | |
358 | } else if (likely(rx_buf->skb)) { | |
359 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | |
360 | rx_buf->len, PCI_DMA_FROMDEVICE); | |
361 | } | |
362 | } | |
363 | ||
364 | static inline void efx_free_rx_buffer(struct efx_nic *efx, | |
365 | struct efx_rx_buffer *rx_buf) | |
366 | { | |
367 | if (rx_buf->page) { | |
368 | __free_pages(rx_buf->page, efx->rx_buffer_order); | |
369 | rx_buf->page = NULL; | |
370 | } else if (likely(rx_buf->skb)) { | |
371 | dev_kfree_skb_any(rx_buf->skb); | |
372 | rx_buf->skb = NULL; | |
373 | } | |
374 | } | |
375 | ||
376 | static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | |
377 | struct efx_rx_buffer *rx_buf) | |
378 | { | |
379 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | |
380 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | |
381 | } | |
382 | ||
383 | /** | |
384 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
385 | * @rx_queue: RX descriptor queue | |
386 | * @retry: Recheck the fill level | |
387 | * This will aim to fill the RX descriptor queue up to | |
388 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic | |
389 | * memory to do so, the caller should retry. | |
390 | */ | |
391 | static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |
392 | int retry) | |
393 | { | |
394 | struct efx_rx_buffer *rx_buf; | |
395 | unsigned fill_level, index; | |
396 | int i, space, rc = 0; | |
397 | ||
398 | /* Calculate current fill level. Do this outside the lock, | |
399 | * because most of the time we'll end up not wanting to do the | |
400 | * fill anyway. | |
401 | */ | |
402 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | |
403 | EFX_BUG_ON_PARANOID(fill_level > | |
404 | rx_queue->efx->type->rxd_ring_mask + 1); | |
405 | ||
406 | /* Don't fill if we don't need to */ | |
407 | if (fill_level >= rx_queue->fast_fill_trigger) | |
408 | return 0; | |
409 | ||
410 | /* Record minimum fill level */ | |
b3475645 | 411 | if (unlikely(fill_level < rx_queue->min_fill)) { |
8ceee660 BH |
412 | if (fill_level) |
413 | rx_queue->min_fill = fill_level; | |
b3475645 | 414 | } |
8ceee660 BH |
415 | |
416 | /* Acquire RX add lock. If this lock is contended, then a fast | |
417 | * fill must already be in progress (e.g. in the refill | |
418 | * tasklet), so we don't need to do anything | |
419 | */ | |
420 | if (!spin_trylock_bh(&rx_queue->add_lock)) | |
421 | return -1; | |
422 | ||
423 | retry: | |
424 | /* Recalculate current fill level now that we have the lock */ | |
425 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | |
426 | EFX_BUG_ON_PARANOID(fill_level > | |
427 | rx_queue->efx->type->rxd_ring_mask + 1); | |
428 | space = rx_queue->fast_fill_limit - fill_level; | |
429 | if (space < EFX_RX_BATCH) | |
430 | goto out_unlock; | |
431 | ||
432 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" | |
433 | " level %d to level %d using %s allocation\n", | |
434 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | |
435 | rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); | |
436 | ||
437 | do { | |
438 | for (i = 0; i < EFX_RX_BATCH; ++i) { | |
439 | index = (rx_queue->added_count & | |
440 | rx_queue->efx->type->rxd_ring_mask); | |
441 | rx_buf = efx_rx_buffer(rx_queue, index); | |
442 | rc = efx_init_rx_buffer(rx_queue, rx_buf); | |
443 | if (unlikely(rc)) | |
444 | goto out; | |
445 | ++rx_queue->added_count; | |
446 | } | |
447 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | |
448 | ||
449 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " | |
450 | "to level %d\n", rx_queue->queue, | |
451 | rx_queue->added_count - rx_queue->removed_count); | |
452 | ||
453 | out: | |
454 | /* Send write pointer to card. */ | |
455 | falcon_notify_rx_desc(rx_queue); | |
456 | ||
457 | /* If the fast fill is running inside from the refill tasklet, then | |
458 | * for SMP systems it may be running on a different CPU to | |
459 | * RX event processing, which means that the fill level may now be | |
460 | * out of date. */ | |
461 | if (unlikely(retry && (rc == 0))) | |
462 | goto retry; | |
463 | ||
464 | out_unlock: | |
465 | spin_unlock_bh(&rx_queue->add_lock); | |
466 | ||
467 | return rc; | |
468 | } | |
469 | ||
470 | /** | |
471 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
472 | * @rx_queue: RX descriptor queue | |
473 | * | |
474 | * This will aim to fill the RX descriptor queue up to | |
475 | * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so, | |
476 | * it will schedule a work item to immediately continue the fast fill | |
477 | */ | |
478 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | |
479 | { | |
480 | int rc; | |
481 | ||
482 | rc = __efx_fast_push_rx_descriptors(rx_queue, 0); | |
483 | if (unlikely(rc)) { | |
484 | /* Schedule the work item to run immediately. The hope is | |
485 | * that work is immediately pending to free some memory | |
486 | * (e.g. an RX event or TX completion) | |
487 | */ | |
488 | efx_schedule_slow_fill(rx_queue, 0); | |
489 | } | |
490 | } | |
491 | ||
492 | void efx_rx_work(struct work_struct *data) | |
493 | { | |
494 | struct efx_rx_queue *rx_queue; | |
495 | int rc; | |
496 | ||
497 | rx_queue = container_of(data, struct efx_rx_queue, work.work); | |
498 | ||
499 | if (unlikely(!rx_queue->channel->enabled)) | |
500 | return; | |
501 | ||
502 | EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU " | |
503 | "%d\n", rx_queue->queue, raw_smp_processor_id()); | |
504 | ||
505 | ++rx_queue->slow_fill_count; | |
506 | /* Push new RX descriptors, allowing at least 1 jiffy for | |
507 | * the kernel to free some more memory. */ | |
508 | rc = __efx_fast_push_rx_descriptors(rx_queue, 1); | |
509 | if (rc) | |
510 | efx_schedule_slow_fill(rx_queue, 1); | |
511 | } | |
512 | ||
513 | static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |
514 | struct efx_rx_buffer *rx_buf, | |
515 | int len, int *discard, | |
516 | int *leak_packet) | |
517 | { | |
518 | struct efx_nic *efx = rx_queue->efx; | |
519 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | |
520 | ||
521 | if (likely(len <= max_len)) | |
522 | return; | |
523 | ||
524 | /* The packet must be discarded, but this is only a fatal error | |
525 | * if the caller indicated it was | |
526 | */ | |
527 | *discard = 1; | |
528 | ||
529 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | |
530 | EFX_ERR_RL(efx, " RX queue %d seriously overlength " | |
531 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | |
532 | rx_queue->queue, len, max_len, | |
533 | efx->type->rx_buffer_padding); | |
534 | /* If this buffer was skb-allocated, then the meta | |
535 | * data at the end of the skb will be trashed. So | |
536 | * we have no choice but to leak the fragment. | |
537 | */ | |
538 | *leak_packet = (rx_buf->skb != NULL); | |
539 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | |
540 | } else { | |
541 | EFX_ERR_RL(efx, " RX queue %d overlength RX event " | |
542 | "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); | |
543 | } | |
544 | ||
545 | rx_queue->channel->n_rx_overlength++; | |
546 | } | |
547 | ||
548 | /* Pass a received packet up through the generic LRO stack | |
549 | * | |
550 | * Handles driverlink veto, and passes the fragment up via | |
551 | * the appropriate LRO method | |
552 | */ | |
553 | static inline void efx_rx_packet_lro(struct efx_channel *channel, | |
554 | struct efx_rx_buffer *rx_buf) | |
555 | { | |
556 | struct net_lro_mgr *lro_mgr = &channel->lro_mgr; | |
557 | void *priv = channel; | |
558 | ||
559 | /* Pass the skb/page into the LRO engine */ | |
560 | if (rx_buf->page) { | |
561 | struct skb_frag_struct frags; | |
562 | ||
563 | frags.page = rx_buf->page; | |
55668611 | 564 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
8ceee660 BH |
565 | frags.size = rx_buf->len; |
566 | ||
567 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | |
568 | rx_buf->len, priv, 0); | |
569 | ||
570 | EFX_BUG_ON_PARANOID(rx_buf->skb); | |
571 | rx_buf->page = NULL; | |
572 | } else { | |
573 | EFX_BUG_ON_PARANOID(!rx_buf->skb); | |
574 | ||
575 | lro_receive_skb(lro_mgr, rx_buf->skb, priv); | |
576 | rx_buf->skb = NULL; | |
577 | } | |
578 | } | |
579 | ||
580 | /* Allocate and construct an SKB around a struct page.*/ | |
581 | static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |
582 | struct efx_nic *efx, | |
583 | int hdr_len) | |
584 | { | |
585 | struct sk_buff *skb; | |
586 | ||
587 | /* Allocate an SKB to store the headers */ | |
588 | skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); | |
589 | if (unlikely(skb == NULL)) { | |
590 | EFX_ERR_RL(efx, "RX out of memory for skb\n"); | |
591 | return NULL; | |
592 | } | |
593 | ||
594 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags); | |
595 | EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); | |
596 | ||
597 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
598 | skb_reserve(skb, EFX_PAGE_SKB_ALIGN); | |
599 | ||
600 | skb->len = rx_buf->len; | |
601 | skb->truesize = rx_buf->len + sizeof(struct sk_buff); | |
602 | memcpy(skb->data, rx_buf->data, hdr_len); | |
603 | skb->tail += hdr_len; | |
604 | ||
605 | /* Append the remaining page onto the frag list */ | |
606 | if (unlikely(rx_buf->len > hdr_len)) { | |
607 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | |
608 | frag->page = rx_buf->page; | |
55668611 | 609 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
8ceee660 BH |
610 | frag->size = skb->len - hdr_len; |
611 | skb_shinfo(skb)->nr_frags = 1; | |
612 | skb->data_len = frag->size; | |
613 | } else { | |
614 | __free_pages(rx_buf->page, efx->rx_buffer_order); | |
615 | skb->data_len = 0; | |
616 | } | |
617 | ||
618 | /* Ownership has transferred from the rx_buf to skb */ | |
619 | rx_buf->page = NULL; | |
620 | ||
621 | /* Move past the ethernet header */ | |
622 | skb->protocol = eth_type_trans(skb, efx->net_dev); | |
623 | ||
624 | return skb; | |
625 | } | |
626 | ||
627 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |
628 | unsigned int len, int checksummed, int discard) | |
629 | { | |
630 | struct efx_nic *efx = rx_queue->efx; | |
631 | struct efx_rx_buffer *rx_buf; | |
632 | int leak_packet = 0; | |
633 | ||
634 | rx_buf = efx_rx_buffer(rx_queue, index); | |
635 | EFX_BUG_ON_PARANOID(!rx_buf->data); | |
636 | EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page); | |
637 | EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page)); | |
638 | ||
639 | /* This allows the refill path to post another buffer. | |
640 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | |
641 | * isn't overwritten yet. | |
642 | */ | |
643 | rx_queue->removed_count++; | |
644 | ||
645 | /* Validate the length encoded in the event vs the descriptor pushed */ | |
646 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | |
647 | &discard, &leak_packet); | |
648 | ||
649 | EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n", | |
650 | rx_queue->queue, index, | |
651 | (unsigned long long)rx_buf->dma_addr, len, | |
652 | (checksummed ? " [SUMMED]" : ""), | |
653 | (discard ? " [DISCARD]" : "")); | |
654 | ||
655 | /* Discard packet, if instructed to do so */ | |
656 | if (unlikely(discard)) { | |
657 | if (unlikely(leak_packet)) | |
658 | rx_queue->channel->n_skbuff_leaks++; | |
659 | else | |
660 | /* We haven't called efx_unmap_rx_buffer yet, | |
661 | * so fini the entire rx_buffer here */ | |
662 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
663 | return; | |
664 | } | |
665 | ||
666 | /* Release card resources - assumes all RX buffers consumed in-order | |
667 | * per RX queue | |
668 | */ | |
669 | efx_unmap_rx_buffer(efx, rx_buf); | |
670 | ||
671 | /* Prefetch nice and early so data will (hopefully) be in cache by | |
672 | * the time we look at it. | |
673 | */ | |
674 | prefetch(rx_buf->data); | |
675 | ||
676 | /* Pipeline receives so that we give time for packet headers to be | |
677 | * prefetched into cache. | |
678 | */ | |
679 | rx_buf->len = len; | |
680 | if (rx_queue->channel->rx_pkt) | |
681 | __efx_rx_packet(rx_queue->channel, | |
682 | rx_queue->channel->rx_pkt, | |
683 | rx_queue->channel->rx_pkt_csummed); | |
684 | rx_queue->channel->rx_pkt = rx_buf; | |
685 | rx_queue->channel->rx_pkt_csummed = checksummed; | |
686 | } | |
687 | ||
688 | /* Handle a received packet. Second half: Touches packet payload. */ | |
689 | void __efx_rx_packet(struct efx_channel *channel, | |
690 | struct efx_rx_buffer *rx_buf, int checksummed) | |
691 | { | |
692 | struct efx_nic *efx = channel->efx; | |
693 | struct sk_buff *skb; | |
694 | int lro = efx->net_dev->features & NETIF_F_LRO; | |
695 | ||
3273c2e8 BH |
696 | /* If we're in loopback test, then pass the packet directly to the |
697 | * loopback layer, and free the rx_buf here | |
698 | */ | |
699 | if (unlikely(efx->loopback_selftest)) { | |
700 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | |
701 | efx_free_rx_buffer(efx, rx_buf); | |
702 | goto done; | |
703 | } | |
704 | ||
8ceee660 BH |
705 | if (rx_buf->skb) { |
706 | prefetch(skb_shinfo(rx_buf->skb)); | |
707 | ||
708 | skb_put(rx_buf->skb, rx_buf->len); | |
709 | ||
710 | /* Move past the ethernet header. rx_buf->data still points | |
711 | * at the ethernet header */ | |
712 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, | |
713 | efx->net_dev); | |
714 | } | |
715 | ||
716 | /* Both our generic-LRO and SFC-SSR support skb and page based | |
717 | * allocation, but neither support switching from one to the | |
718 | * other on the fly. If we spot that the allocation mode has | |
719 | * changed, then flush the LRO state. | |
720 | */ | |
721 | if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) { | |
722 | efx_flush_lro(channel); | |
723 | channel->rx_alloc_pop_pages = (rx_buf->page != NULL); | |
724 | } | |
725 | if (likely(checksummed && lro)) { | |
726 | efx_rx_packet_lro(channel, rx_buf); | |
727 | goto done; | |
728 | } | |
729 | ||
730 | /* Form an skb if required */ | |
731 | if (rx_buf->page) { | |
732 | int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS); | |
733 | skb = efx_rx_mk_skb(rx_buf, efx, hdr_len); | |
734 | if (unlikely(skb == NULL)) { | |
735 | efx_free_rx_buffer(efx, rx_buf); | |
736 | goto done; | |
737 | } | |
738 | } else { | |
739 | /* We now own the SKB */ | |
740 | skb = rx_buf->skb; | |
741 | rx_buf->skb = NULL; | |
742 | } | |
743 | ||
744 | EFX_BUG_ON_PARANOID(rx_buf->page); | |
745 | EFX_BUG_ON_PARANOID(rx_buf->skb); | |
746 | EFX_BUG_ON_PARANOID(!skb); | |
747 | ||
748 | /* Set the SKB flags */ | |
749 | if (unlikely(!checksummed || !efx->rx_checksum_enabled)) | |
750 | skb->ip_summed = CHECKSUM_NONE; | |
751 | ||
752 | /* Pass the packet up */ | |
753 | netif_receive_skb(skb); | |
754 | ||
755 | /* Update allocation strategy method */ | |
756 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
757 | ||
8ceee660 BH |
758 | done: |
759 | efx->net_dev->last_rx = jiffies; | |
760 | } | |
761 | ||
762 | void efx_rx_strategy(struct efx_channel *channel) | |
763 | { | |
764 | enum efx_rx_alloc_method method = rx_alloc_method; | |
765 | ||
766 | /* Only makes sense to use page based allocation if LRO is enabled */ | |
767 | if (!(channel->efx->net_dev->features & NETIF_F_LRO)) { | |
768 | method = RX_ALLOC_METHOD_SKB; | |
769 | } else if (method == RX_ALLOC_METHOD_AUTO) { | |
770 | /* Constrain the rx_alloc_level */ | |
771 | if (channel->rx_alloc_level < 0) | |
772 | channel->rx_alloc_level = 0; | |
773 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | |
774 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | |
775 | ||
776 | /* Decide on the allocation method */ | |
777 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? | |
778 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); | |
779 | } | |
780 | ||
781 | /* Push the option */ | |
782 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | |
783 | } | |
784 | ||
785 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |
786 | { | |
787 | struct efx_nic *efx = rx_queue->efx; | |
788 | unsigned int rxq_size; | |
789 | int rc; | |
790 | ||
791 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | |
792 | ||
793 | /* Allocate RX buffers */ | |
794 | rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); | |
795 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); | |
796 | if (!rx_queue->buffer) { | |
797 | rc = -ENOMEM; | |
798 | goto fail1; | |
799 | } | |
800 | ||
801 | rc = falcon_probe_rx(rx_queue); | |
802 | if (rc) | |
803 | goto fail2; | |
804 | ||
805 | return 0; | |
806 | ||
807 | fail2: | |
808 | kfree(rx_queue->buffer); | |
809 | rx_queue->buffer = NULL; | |
810 | fail1: | |
811 | rx_queue->used = 0; | |
812 | ||
813 | return rc; | |
814 | } | |
815 | ||
816 | int efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |
817 | { | |
818 | struct efx_nic *efx = rx_queue->efx; | |
819 | unsigned int max_fill, trigger, limit; | |
820 | ||
821 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | |
822 | ||
823 | /* Initialise ptr fields */ | |
824 | rx_queue->added_count = 0; | |
825 | rx_queue->notified_count = 0; | |
826 | rx_queue->removed_count = 0; | |
827 | rx_queue->min_fill = -1U; | |
828 | rx_queue->min_overfill = -1U; | |
829 | ||
830 | /* Initialise limit fields */ | |
831 | max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; | |
832 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | |
833 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | |
834 | ||
835 | rx_queue->max_fill = max_fill; | |
836 | rx_queue->fast_fill_trigger = trigger; | |
837 | rx_queue->fast_fill_limit = limit; | |
838 | ||
839 | /* Set up RX descriptor ring */ | |
840 | return falcon_init_rx(rx_queue); | |
841 | } | |
842 | ||
843 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |
844 | { | |
845 | int i; | |
846 | struct efx_rx_buffer *rx_buf; | |
847 | ||
848 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | |
849 | ||
850 | falcon_fini_rx(rx_queue); | |
851 | ||
852 | /* Release RX buffers NB start at index 0 not current HW ptr */ | |
853 | if (rx_queue->buffer) { | |
854 | for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { | |
855 | rx_buf = efx_rx_buffer(rx_queue, i); | |
856 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
857 | } | |
858 | } | |
859 | ||
860 | /* For a page that is part-way through splitting into RX buffers */ | |
861 | if (rx_queue->buf_page != NULL) { | |
862 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | |
55668611 BH |
863 | efx_rx_buf_size(rx_queue->efx), |
864 | PCI_DMA_FROMDEVICE); | |
8ceee660 BH |
865 | __free_pages(rx_queue->buf_page, |
866 | rx_queue->efx->rx_buffer_order); | |
867 | rx_queue->buf_page = NULL; | |
868 | } | |
869 | } | |
870 | ||
871 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |
872 | { | |
873 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); | |
874 | ||
875 | falcon_remove_rx(rx_queue); | |
876 | ||
877 | kfree(rx_queue->buffer); | |
878 | rx_queue->buffer = NULL; | |
879 | rx_queue->used = 0; | |
880 | } | |
881 | ||
882 | void efx_flush_lro(struct efx_channel *channel) | |
883 | { | |
884 | lro_flush_all(&channel->lro_mgr); | |
885 | } | |
886 | ||
887 | ||
888 | module_param(rx_alloc_method, int, 0644); | |
889 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | |
890 | ||
891 | module_param(rx_refill_threshold, uint, 0444); | |
892 | MODULE_PARM_DESC(rx_refill_threshold, | |
893 | "RX descriptor ring fast/slow fill threshold (%)"); | |
894 |