]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | ||
34 | #include <net/busy_poll.h> | |
35 | #include <linux/bpf.h> | |
36 | #include <linux/mlx4/cq.h> | |
37 | #include <linux/slab.h> | |
38 | #include <linux/mlx4/qp.h> | |
39 | #include <linux/skbuff.h> | |
40 | #include <linux/rculist.h> | |
41 | #include <linux/if_ether.h> | |
42 | #include <linux/if_vlan.h> | |
43 | #include <linux/vmalloc.h> | |
44 | #include <linux/irq.h> | |
45 | ||
46 | #if IS_ENABLED(CONFIG_IPV6) | |
47 | #include <net/ip6_checksum.h> | |
48 | #endif | |
49 | ||
50 | #include "mlx4_en.h" | |
51 | ||
52 | static int mlx4_alloc_pages(struct mlx4_en_priv *priv, | |
53 | struct mlx4_en_rx_alloc *page_alloc, | |
54 | const struct mlx4_en_frag_info *frag_info, | |
55 | gfp_t _gfp) | |
56 | { | |
57 | int order; | |
58 | struct page *page; | |
59 | dma_addr_t dma; | |
60 | ||
61 | for (order = frag_info->order; ;) { | |
62 | gfp_t gfp = _gfp; | |
63 | ||
64 | if (order) | |
65 | gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC; | |
66 | page = alloc_pages(gfp, order); | |
67 | if (likely(page)) | |
68 | break; | |
69 | if (--order < 0 || | |
70 | ((PAGE_SIZE << order) < frag_info->frag_size)) | |
71 | return -ENOMEM; | |
72 | } | |
73 | dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, | |
74 | frag_info->dma_dir); | |
75 | if (unlikely(dma_mapping_error(priv->ddev, dma))) { | |
76 | put_page(page); | |
77 | return -ENOMEM; | |
78 | } | |
79 | page_alloc->page_size = PAGE_SIZE << order; | |
80 | page_alloc->page = page; | |
81 | page_alloc->dma = dma; | |
82 | page_alloc->page_offset = 0; | |
83 | /* Not doing get_page() for each frag is a big win | |
84 | * on asymetric workloads. Note we can not use atomic_set(). | |
85 | */ | |
86 | page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1); | |
87 | return 0; | |
88 | } | |
89 | ||
90 | static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, | |
91 | struct mlx4_en_rx_desc *rx_desc, | |
92 | struct mlx4_en_rx_alloc *frags, | |
93 | struct mlx4_en_rx_alloc *ring_alloc, | |
94 | gfp_t gfp) | |
95 | { | |
96 | struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; | |
97 | const struct mlx4_en_frag_info *frag_info; | |
98 | struct page *page; | |
99 | dma_addr_t dma; | |
100 | int i; | |
101 | ||
102 | for (i = 0; i < priv->num_frags; i++) { | |
103 | frag_info = &priv->frag_info[i]; | |
104 | page_alloc[i] = ring_alloc[i]; | |
105 | page_alloc[i].page_offset += frag_info->frag_stride; | |
106 | ||
107 | if (page_alloc[i].page_offset + frag_info->frag_stride <= | |
108 | ring_alloc[i].page_size) | |
109 | continue; | |
110 | ||
111 | if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i], | |
112 | frag_info, gfp))) | |
113 | goto out; | |
114 | } | |
115 | ||
116 | for (i = 0; i < priv->num_frags; i++) { | |
117 | frags[i] = ring_alloc[i]; | |
118 | dma = ring_alloc[i].dma + ring_alloc[i].page_offset; | |
119 | ring_alloc[i] = page_alloc[i]; | |
120 | rx_desc->data[i].addr = cpu_to_be64(dma); | |
121 | } | |
122 | ||
123 | return 0; | |
124 | ||
125 | out: | |
126 | while (i--) { | |
127 | if (page_alloc[i].page != ring_alloc[i].page) { | |
128 | dma_unmap_page(priv->ddev, page_alloc[i].dma, | |
129 | page_alloc[i].page_size, | |
130 | priv->frag_info[i].dma_dir); | |
131 | page = page_alloc[i].page; | |
132 | /* Revert changes done by mlx4_alloc_pages */ | |
133 | page_ref_sub(page, page_alloc[i].page_size / | |
134 | priv->frag_info[i].frag_stride - 1); | |
135 | put_page(page); | |
136 | } | |
137 | } | |
138 | return -ENOMEM; | |
139 | } | |
140 | ||
141 | static void mlx4_en_free_frag(struct mlx4_en_priv *priv, | |
142 | struct mlx4_en_rx_alloc *frags, | |
143 | int i) | |
144 | { | |
145 | const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; | |
146 | u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; | |
147 | ||
148 | ||
149 | if (next_frag_end > frags[i].page_size) | |
150 | dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, | |
151 | frag_info->dma_dir); | |
152 | ||
153 | if (frags[i].page) | |
154 | put_page(frags[i].page); | |
155 | } | |
156 | ||
157 | static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, | |
158 | struct mlx4_en_rx_ring *ring) | |
159 | { | |
160 | int i; | |
161 | struct mlx4_en_rx_alloc *page_alloc; | |
162 | ||
163 | for (i = 0; i < priv->num_frags; i++) { | |
164 | const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; | |
165 | ||
166 | if (mlx4_alloc_pages(priv, &ring->page_alloc[i], | |
167 | frag_info, GFP_KERNEL | __GFP_COLD)) | |
168 | goto out; | |
169 | ||
170 | en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", | |
171 | i, ring->page_alloc[i].page_size, | |
172 | page_ref_count(ring->page_alloc[i].page)); | |
173 | } | |
174 | return 0; | |
175 | ||
176 | out: | |
177 | while (i--) { | |
178 | struct page *page; | |
179 | ||
180 | page_alloc = &ring->page_alloc[i]; | |
181 | dma_unmap_page(priv->ddev, page_alloc->dma, | |
182 | page_alloc->page_size, | |
183 | priv->frag_info[i].dma_dir); | |
184 | page = page_alloc->page; | |
185 | /* Revert changes done by mlx4_alloc_pages */ | |
186 | page_ref_sub(page, page_alloc->page_size / | |
187 | priv->frag_info[i].frag_stride - 1); | |
188 | put_page(page); | |
189 | page_alloc->page = NULL; | |
190 | } | |
191 | return -ENOMEM; | |
192 | } | |
193 | ||
194 | static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, | |
195 | struct mlx4_en_rx_ring *ring) | |
196 | { | |
197 | struct mlx4_en_rx_alloc *page_alloc; | |
198 | int i; | |
199 | ||
200 | for (i = 0; i < priv->num_frags; i++) { | |
201 | const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; | |
202 | ||
203 | page_alloc = &ring->page_alloc[i]; | |
204 | en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", | |
205 | i, page_count(page_alloc->page)); | |
206 | ||
207 | dma_unmap_page(priv->ddev, page_alloc->dma, | |
208 | page_alloc->page_size, frag_info->dma_dir); | |
209 | while (page_alloc->page_offset + frag_info->frag_stride < | |
210 | page_alloc->page_size) { | |
211 | put_page(page_alloc->page); | |
212 | page_alloc->page_offset += frag_info->frag_stride; | |
213 | } | |
214 | page_alloc->page = NULL; | |
215 | } | |
216 | } | |
217 | ||
218 | static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, | |
219 | struct mlx4_en_rx_ring *ring, int index) | |
220 | { | |
221 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; | |
222 | int possible_frags; | |
223 | int i; | |
224 | ||
225 | /* Set size and memtype fields */ | |
226 | for (i = 0; i < priv->num_frags; i++) { | |
227 | rx_desc->data[i].byte_count = | |
228 | cpu_to_be32(priv->frag_info[i].frag_size); | |
229 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); | |
230 | } | |
231 | ||
232 | /* If the number of used fragments does not fill up the ring stride, | |
233 | * remaining (unused) fragments must be padded with null address/size | |
234 | * and a special memory key */ | |
235 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; | |
236 | for (i = priv->num_frags; i < possible_frags; i++) { | |
237 | rx_desc->data[i].byte_count = 0; | |
238 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); | |
239 | rx_desc->data[i].addr = 0; | |
240 | } | |
241 | } | |
242 | ||
243 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, | |
244 | struct mlx4_en_rx_ring *ring, int index, | |
245 | gfp_t gfp) | |
246 | { | |
247 | struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); | |
248 | struct mlx4_en_rx_alloc *frags = ring->rx_info + | |
249 | (index << priv->log_rx_info); | |
250 | ||
251 | if (ring->page_cache.index > 0) { | |
252 | frags[0] = ring->page_cache.buf[--ring->page_cache.index]; | |
253 | rx_desc->data[0].addr = cpu_to_be64(frags[0].dma); | |
254 | return 0; | |
255 | } | |
256 | ||
257 | return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); | |
258 | } | |
259 | ||
260 | static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) | |
261 | { | |
262 | return ring->prod == ring->cons; | |
263 | } | |
264 | ||
265 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) | |
266 | { | |
267 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | |
268 | } | |
269 | ||
270 | static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, | |
271 | struct mlx4_en_rx_ring *ring, | |
272 | int index) | |
273 | { | |
274 | struct mlx4_en_rx_alloc *frags; | |
275 | int nr; | |
276 | ||
277 | frags = ring->rx_info + (index << priv->log_rx_info); | |
278 | for (nr = 0; nr < priv->num_frags; nr++) { | |
279 | en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); | |
280 | mlx4_en_free_frag(priv, frags, nr); | |
281 | } | |
282 | } | |
283 | ||
284 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) | |
285 | { | |
286 | struct mlx4_en_rx_ring *ring; | |
287 | int ring_ind; | |
288 | int buf_ind; | |
289 | int new_size; | |
290 | ||
291 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { | |
292 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
293 | ring = priv->rx_ring[ring_ind]; | |
294 | ||
295 | if (mlx4_en_prepare_rx_desc(priv, ring, | |
296 | ring->actual_size, | |
297 | GFP_KERNEL | __GFP_COLD)) { | |
298 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { | |
299 | en_err(priv, "Failed to allocate enough rx buffers\n"); | |
300 | return -ENOMEM; | |
301 | } else { | |
302 | new_size = rounddown_pow_of_two(ring->actual_size); | |
303 | en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", | |
304 | ring->actual_size, new_size); | |
305 | goto reduce_rings; | |
306 | } | |
307 | } | |
308 | ring->actual_size++; | |
309 | ring->prod++; | |
310 | } | |
311 | } | |
312 | return 0; | |
313 | ||
314 | reduce_rings: | |
315 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
316 | ring = priv->rx_ring[ring_ind]; | |
317 | while (ring->actual_size > new_size) { | |
318 | ring->actual_size--; | |
319 | ring->prod--; | |
320 | mlx4_en_free_rx_desc(priv, ring, ring->actual_size); | |
321 | } | |
322 | } | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
327 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | |
328 | struct mlx4_en_rx_ring *ring) | |
329 | { | |
330 | int index; | |
331 | ||
332 | en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", | |
333 | ring->cons, ring->prod); | |
334 | ||
335 | /* Unmap and free Rx buffers */ | |
336 | while (!mlx4_en_is_ring_empty(ring)) { | |
337 | index = ring->cons & ring->size_mask; | |
338 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); | |
339 | mlx4_en_free_rx_desc(priv, ring, index); | |
340 | ++ring->cons; | |
341 | } | |
342 | } | |
343 | ||
344 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) | |
345 | { | |
346 | int i; | |
347 | int num_of_eqs; | |
348 | int num_rx_rings; | |
349 | struct mlx4_dev *dev = mdev->dev; | |
350 | ||
351 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | |
352 | num_of_eqs = max_t(int, MIN_RX_RINGS, | |
353 | min_t(int, | |
354 | mlx4_get_eqs_per_port(mdev->dev, i), | |
355 | DEF_RX_RINGS)); | |
356 | ||
357 | num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : | |
358 | min_t(int, num_of_eqs, | |
359 | netif_get_num_default_rss_queues()); | |
360 | mdev->profile.prof[i].rx_ring_num = | |
361 | rounddown_pow_of_two(num_rx_rings); | |
362 | } | |
363 | } | |
364 | ||
365 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | |
366 | struct mlx4_en_rx_ring **pring, | |
367 | u32 size, u16 stride, int node) | |
368 | { | |
369 | struct mlx4_en_dev *mdev = priv->mdev; | |
370 | struct mlx4_en_rx_ring *ring; | |
371 | int err = -ENOMEM; | |
372 | int tmp; | |
373 | ||
374 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); | |
375 | if (!ring) { | |
376 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | |
377 | if (!ring) { | |
378 | en_err(priv, "Failed to allocate RX ring structure\n"); | |
379 | return -ENOMEM; | |
380 | } | |
381 | } | |
382 | ||
383 | ring->prod = 0; | |
384 | ring->cons = 0; | |
385 | ring->size = size; | |
386 | ring->size_mask = size - 1; | |
387 | ring->stride = stride; | |
388 | ring->log_stride = ffs(ring->stride) - 1; | |
389 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; | |
390 | ||
391 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | |
392 | sizeof(struct mlx4_en_rx_alloc)); | |
393 | ring->rx_info = vmalloc_node(tmp, node); | |
394 | if (!ring->rx_info) { | |
395 | ring->rx_info = vmalloc(tmp); | |
396 | if (!ring->rx_info) { | |
397 | err = -ENOMEM; | |
398 | goto err_ring; | |
399 | } | |
400 | } | |
401 | ||
402 | en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", | |
403 | ring->rx_info, tmp); | |
404 | ||
405 | /* Allocate HW buffers on provided NUMA node */ | |
406 | set_dev_node(&mdev->dev->persist->pdev->dev, node); | |
407 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | |
408 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); | |
409 | if (err) | |
410 | goto err_info; | |
411 | ||
412 | ring->buf = ring->wqres.buf.direct.buf; | |
413 | ||
414 | ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; | |
415 | ||
416 | *pring = ring; | |
417 | return 0; | |
418 | ||
419 | err_info: | |
420 | vfree(ring->rx_info); | |
421 | ring->rx_info = NULL; | |
422 | err_ring: | |
423 | kfree(ring); | |
424 | *pring = NULL; | |
425 | ||
426 | return err; | |
427 | } | |
428 | ||
429 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |
430 | { | |
431 | struct mlx4_en_rx_ring *ring; | |
432 | int i; | |
433 | int ring_ind; | |
434 | int err; | |
435 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | |
436 | DS_SIZE * priv->num_frags); | |
437 | ||
438 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
439 | ring = priv->rx_ring[ring_ind]; | |
440 | ||
441 | ring->prod = 0; | |
442 | ring->cons = 0; | |
443 | ring->actual_size = 0; | |
444 | ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; | |
445 | ||
446 | ring->stride = stride; | |
447 | if (ring->stride <= TXBB_SIZE) | |
448 | ring->buf += TXBB_SIZE; | |
449 | ||
450 | ring->log_stride = ffs(ring->stride) - 1; | |
451 | ring->buf_size = ring->size * ring->stride; | |
452 | ||
453 | memset(ring->buf, 0, ring->buf_size); | |
454 | mlx4_en_update_rx_prod_db(ring); | |
455 | ||
456 | /* Initialize all descriptors */ | |
457 | for (i = 0; i < ring->size; i++) | |
458 | mlx4_en_init_rx_desc(priv, ring, i); | |
459 | ||
460 | /* Initialize page allocators */ | |
461 | err = mlx4_en_init_allocator(priv, ring); | |
462 | if (err) { | |
463 | en_err(priv, "Failed initializing ring allocator\n"); | |
464 | if (ring->stride <= TXBB_SIZE) | |
465 | ring->buf -= TXBB_SIZE; | |
466 | ring_ind--; | |
467 | goto err_allocator; | |
468 | } | |
469 | } | |
470 | err = mlx4_en_fill_rx_buffers(priv); | |
471 | if (err) | |
472 | goto err_buffers; | |
473 | ||
474 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
475 | ring = priv->rx_ring[ring_ind]; | |
476 | ||
477 | ring->size_mask = ring->actual_size - 1; | |
478 | mlx4_en_update_rx_prod_db(ring); | |
479 | } | |
480 | ||
481 | return 0; | |
482 | ||
483 | err_buffers: | |
484 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | |
485 | mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); | |
486 | ||
487 | ring_ind = priv->rx_ring_num - 1; | |
488 | err_allocator: | |
489 | while (ring_ind >= 0) { | |
490 | if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) | |
491 | priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; | |
492 | mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]); | |
493 | ring_ind--; | |
494 | } | |
495 | return err; | |
496 | } | |
497 | ||
498 | /* We recover from out of memory by scheduling our napi poll | |
499 | * function (mlx4_en_process_cq), which tries to allocate | |
500 | * all missing RX buffers (call to mlx4_en_refill_rx_buffers). | |
501 | */ | |
502 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) | |
503 | { | |
504 | int ring; | |
505 | ||
506 | if (!priv->port_up) | |
507 | return; | |
508 | ||
509 | for (ring = 0; ring < priv->rx_ring_num; ring++) { | |
510 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) | |
511 | napi_reschedule(&priv->rx_cq[ring]->napi); | |
512 | } | |
513 | } | |
514 | ||
515 | /* When the rx ring is running in page-per-packet mode, a released frame can go | |
516 | * directly into a small cache, to avoid unmapping or touching the page | |
517 | * allocator. In bpf prog performance scenarios, buffers are either forwarded | |
518 | * or dropped, never converted to skbs, so every page can come directly from | |
519 | * this cache when it is sized to be a multiple of the napi budget. | |
520 | */ | |
521 | bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, | |
522 | struct mlx4_en_rx_alloc *frame) | |
523 | { | |
524 | struct mlx4_en_page_cache *cache = &ring->page_cache; | |
525 | ||
526 | if (cache->index >= MLX4_EN_CACHE_SIZE) | |
527 | return false; | |
528 | ||
529 | cache->buf[cache->index++] = *frame; | |
530 | return true; | |
531 | } | |
532 | ||
533 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | |
534 | struct mlx4_en_rx_ring **pring, | |
535 | u32 size, u16 stride) | |
536 | { | |
537 | struct mlx4_en_dev *mdev = priv->mdev; | |
538 | struct mlx4_en_rx_ring *ring = *pring; | |
539 | struct bpf_prog *old_prog; | |
540 | ||
541 | old_prog = rcu_dereference_protected( | |
542 | ring->xdp_prog, | |
543 | lockdep_is_held(&mdev->state_lock)); | |
544 | if (old_prog) | |
545 | bpf_prog_put(old_prog); | |
546 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); | |
547 | vfree(ring->rx_info); | |
548 | ring->rx_info = NULL; | |
549 | kfree(ring); | |
550 | *pring = NULL; | |
551 | } | |
552 | ||
553 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | |
554 | struct mlx4_en_rx_ring *ring) | |
555 | { | |
556 | int i; | |
557 | ||
558 | for (i = 0; i < ring->page_cache.index; i++) { | |
559 | struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; | |
560 | ||
561 | dma_unmap_page(priv->ddev, frame->dma, frame->page_size, | |
562 | priv->frag_info[0].dma_dir); | |
563 | put_page(frame->page); | |
564 | } | |
565 | ring->page_cache.index = 0; | |
566 | mlx4_en_free_rx_buf(priv, ring); | |
567 | if (ring->stride <= TXBB_SIZE) | |
568 | ring->buf -= TXBB_SIZE; | |
569 | mlx4_en_destroy_allocator(priv, ring); | |
570 | } | |
571 | ||
572 | ||
573 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, | |
574 | struct mlx4_en_rx_desc *rx_desc, | |
575 | struct mlx4_en_rx_alloc *frags, | |
576 | struct sk_buff *skb, | |
577 | int length) | |
578 | { | |
579 | struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; | |
580 | struct mlx4_en_frag_info *frag_info; | |
581 | int nr; | |
582 | dma_addr_t dma; | |
583 | ||
584 | /* Collect used fragments while replacing them in the HW descriptors */ | |
585 | for (nr = 0; nr < priv->num_frags; nr++) { | |
586 | frag_info = &priv->frag_info[nr]; | |
587 | if (length <= frag_info->frag_prefix_size) | |
588 | break; | |
589 | if (unlikely(!frags[nr].page)) | |
590 | goto fail; | |
591 | ||
592 | dma = be64_to_cpu(rx_desc->data[nr].addr); | |
593 | dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, | |
594 | DMA_FROM_DEVICE); | |
595 | ||
596 | /* Save page reference in skb */ | |
597 | __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); | |
598 | skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); | |
599 | skb_frags_rx[nr].page_offset = frags[nr].page_offset; | |
600 | skb->truesize += frag_info->frag_stride; | |
601 | frags[nr].page = NULL; | |
602 | } | |
603 | /* Adjust size of last fragment to match actual length */ | |
604 | if (nr > 0) | |
605 | skb_frag_size_set(&skb_frags_rx[nr - 1], | |
606 | length - priv->frag_info[nr - 1].frag_prefix_size); | |
607 | return nr; | |
608 | ||
609 | fail: | |
610 | while (nr > 0) { | |
611 | nr--; | |
612 | __skb_frag_unref(&skb_frags_rx[nr]); | |
613 | } | |
614 | return 0; | |
615 | } | |
616 | ||
617 | ||
618 | static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, | |
619 | struct mlx4_en_rx_desc *rx_desc, | |
620 | struct mlx4_en_rx_alloc *frags, | |
621 | unsigned int length) | |
622 | { | |
623 | struct sk_buff *skb; | |
624 | void *va; | |
625 | int used_frags; | |
626 | dma_addr_t dma; | |
627 | ||
628 | skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); | |
629 | if (unlikely(!skb)) { | |
630 | en_dbg(RX_ERR, priv, "Failed allocating skb\n"); | |
631 | return NULL; | |
632 | } | |
633 | skb_reserve(skb, NET_IP_ALIGN); | |
634 | skb->len = length; | |
635 | ||
636 | /* Get pointer to first fragment so we could copy the headers into the | |
637 | * (linear part of the) skb */ | |
638 | va = page_address(frags[0].page) + frags[0].page_offset; | |
639 | ||
640 | if (length <= SMALL_PACKET_SIZE) { | |
641 | /* We are copying all relevant data to the skb - temporarily | |
642 | * sync buffers for the copy */ | |
643 | dma = be64_to_cpu(rx_desc->data[0].addr); | |
644 | dma_sync_single_for_cpu(priv->ddev, dma, length, | |
645 | DMA_FROM_DEVICE); | |
646 | skb_copy_to_linear_data(skb, va, length); | |
647 | skb->tail += length; | |
648 | } else { | |
649 | unsigned int pull_len; | |
650 | ||
651 | /* Move relevant fragments to skb */ | |
652 | used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, | |
653 | skb, length); | |
654 | if (unlikely(!used_frags)) { | |
655 | kfree_skb(skb); | |
656 | return NULL; | |
657 | } | |
658 | skb_shinfo(skb)->nr_frags = used_frags; | |
659 | ||
660 | pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); | |
661 | /* Copy headers into the skb linear buffer */ | |
662 | memcpy(skb->data, va, pull_len); | |
663 | skb->tail += pull_len; | |
664 | ||
665 | /* Skip headers in first fragment */ | |
666 | skb_shinfo(skb)->frags[0].page_offset += pull_len; | |
667 | ||
668 | /* Adjust size of first fragment */ | |
669 | skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); | |
670 | skb->data_len = length - pull_len; | |
671 | } | |
672 | return skb; | |
673 | } | |
674 | ||
675 | static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) | |
676 | { | |
677 | int i; | |
678 | int offset = ETH_HLEN; | |
679 | ||
680 | for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { | |
681 | if (*(skb->data + offset) != (unsigned char) (i & 0xff)) | |
682 | goto out_loopback; | |
683 | } | |
684 | /* Loopback found */ | |
685 | priv->loopback_ok = 1; | |
686 | ||
687 | out_loopback: | |
688 | dev_kfree_skb_any(skb); | |
689 | } | |
690 | ||
691 | static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, | |
692 | struct mlx4_en_rx_ring *ring) | |
693 | { | |
694 | int index = ring->prod & ring->size_mask; | |
695 | ||
696 | while ((u32) (ring->prod - ring->cons) < ring->actual_size) { | |
697 | if (mlx4_en_prepare_rx_desc(priv, ring, index, | |
698 | GFP_ATOMIC | __GFP_COLD)) | |
699 | break; | |
700 | ring->prod++; | |
701 | index = ring->prod & ring->size_mask; | |
702 | } | |
703 | } | |
704 | ||
705 | /* When hardware doesn't strip the vlan, we need to calculate the checksum | |
706 | * over it and add it to the hardware's checksum calculation | |
707 | */ | |
708 | static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, | |
709 | struct vlan_hdr *vlanh) | |
710 | { | |
711 | return csum_add(hw_checksum, *(__wsum *)vlanh); | |
712 | } | |
713 | ||
714 | /* Although the stack expects checksum which doesn't include the pseudo | |
715 | * header, the HW adds it. To address that, we are subtracting the pseudo | |
716 | * header checksum from the checksum value provided by the HW. | |
717 | */ | |
718 | static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | |
719 | struct iphdr *iph) | |
720 | { | |
721 | __u16 length_for_csum = 0; | |
722 | __wsum csum_pseudo_header = 0; | |
723 | ||
724 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); | |
725 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, | |
726 | length_for_csum, iph->protocol, 0); | |
727 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); | |
728 | } | |
729 | ||
730 | #if IS_ENABLED(CONFIG_IPV6) | |
731 | /* In IPv6 packets, besides subtracting the pseudo header checksum, | |
732 | * we also compute/add the IP header checksum which | |
733 | * is not added by the HW. | |
734 | */ | |
735 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | |
736 | struct ipv6hdr *ipv6h) | |
737 | { | |
738 | __wsum csum_pseudo_hdr = 0; | |
739 | ||
740 | if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || | |
741 | ipv6h->nexthdr == IPPROTO_HOPOPTS)) | |
742 | return -1; | |
743 | hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); | |
744 | ||
745 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, | |
746 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); | |
747 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); | |
748 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); | |
749 | ||
750 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); | |
751 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); | |
752 | return 0; | |
753 | } | |
754 | #endif | |
755 | static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |
756 | netdev_features_t dev_features) | |
757 | { | |
758 | __wsum hw_checksum = 0; | |
759 | ||
760 | void *hdr = (u8 *)va + sizeof(struct ethhdr); | |
761 | ||
762 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); | |
763 | ||
764 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && | |
765 | !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { | |
766 | hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); | |
767 | hdr += sizeof(struct vlan_hdr); | |
768 | } | |
769 | ||
770 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) | |
771 | get_fixed_ipv4_csum(hw_checksum, skb, hdr); | |
772 | #if IS_ENABLED(CONFIG_IPV6) | |
773 | else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) | |
774 | if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) | |
775 | return -1; | |
776 | #endif | |
777 | return 0; | |
778 | } | |
779 | ||
780 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | |
781 | { | |
782 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
783 | struct mlx4_en_dev *mdev = priv->mdev; | |
784 | struct mlx4_cqe *cqe; | |
785 | struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; | |
786 | struct mlx4_en_rx_alloc *frags; | |
787 | struct mlx4_en_rx_desc *rx_desc; | |
788 | struct bpf_prog *xdp_prog; | |
789 | int doorbell_pending; | |
790 | struct sk_buff *skb; | |
791 | int tx_index; | |
792 | int index; | |
793 | int nr; | |
794 | unsigned int length; | |
795 | int polled = 0; | |
796 | int ip_summed; | |
797 | int factor = priv->cqe_factor; | |
798 | u64 timestamp; | |
799 | bool l2_tunnel; | |
800 | ||
801 | if (unlikely(!priv->port_up)) | |
802 | return 0; | |
803 | ||
804 | if (unlikely(budget <= 0)) | |
805 | return polled; | |
806 | ||
807 | /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ | |
808 | rcu_read_lock(); | |
809 | xdp_prog = rcu_dereference(ring->xdp_prog); | |
810 | doorbell_pending = 0; | |
811 | tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring; | |
812 | ||
813 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx | |
814 | * descriptor offset can be deduced from the CQE index instead of | |
815 | * reading 'cqe->index' */ | |
816 | index = cq->mcq.cons_index & ring->size_mask; | |
817 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; | |
818 | ||
819 | /* Process all completed CQEs */ | |
820 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | |
821 | cq->mcq.cons_index & cq->size)) { | |
822 | ||
823 | frags = ring->rx_info + (index << priv->log_rx_info); | |
824 | rx_desc = ring->buf + (index << ring->log_stride); | |
825 | ||
826 | /* | |
827 | * make sure we read the CQE after we read the ownership bit | |
828 | */ | |
829 | dma_rmb(); | |
830 | ||
831 | /* Drop packet on bad receive or bad checksum */ | |
832 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | |
833 | MLX4_CQE_OPCODE_ERROR)) { | |
834 | en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", | |
835 | ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, | |
836 | ((struct mlx4_err_cqe *)cqe)->syndrome); | |
837 | goto next; | |
838 | } | |
839 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { | |
840 | en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); | |
841 | goto next; | |
842 | } | |
843 | ||
844 | /* Check if we need to drop the packet if SRIOV is not enabled | |
845 | * and not performing the selftest or flb disabled | |
846 | */ | |
847 | if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { | |
848 | struct ethhdr *ethh; | |
849 | dma_addr_t dma; | |
850 | /* Get pointer to first fragment since we haven't | |
851 | * skb yet and cast it to ethhdr struct | |
852 | */ | |
853 | dma = be64_to_cpu(rx_desc->data[0].addr); | |
854 | dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), | |
855 | DMA_FROM_DEVICE); | |
856 | ethh = (struct ethhdr *)(page_address(frags[0].page) + | |
857 | frags[0].page_offset); | |
858 | ||
859 | if (is_multicast_ether_addr(ethh->h_dest)) { | |
860 | struct mlx4_mac_entry *entry; | |
861 | struct hlist_head *bucket; | |
862 | unsigned int mac_hash; | |
863 | ||
864 | /* Drop the packet, since HW loopback-ed it */ | |
865 | mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; | |
866 | bucket = &priv->mac_hash[mac_hash]; | |
867 | hlist_for_each_entry_rcu(entry, bucket, hlist) { | |
868 | if (ether_addr_equal_64bits(entry->mac, | |
869 | ethh->h_source)) | |
870 | goto next; | |
871 | } | |
872 | } | |
873 | } | |
874 | ||
875 | /* | |
876 | * Packet is OK - process it. | |
877 | */ | |
878 | length = be32_to_cpu(cqe->byte_cnt); | |
879 | length -= ring->fcs_del; | |
880 | ring->bytes += length; | |
881 | ring->packets++; | |
882 | l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && | |
883 | (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); | |
884 | ||
885 | /* A bpf program gets first chance to drop the packet. It may | |
886 | * read bytes but not past the end of the frag. | |
887 | */ | |
888 | if (xdp_prog) { | |
889 | struct xdp_buff xdp; | |
890 | dma_addr_t dma; | |
891 | u32 act; | |
892 | ||
893 | dma = be64_to_cpu(rx_desc->data[0].addr); | |
894 | dma_sync_single_for_cpu(priv->ddev, dma, | |
895 | priv->frag_info[0].frag_size, | |
896 | DMA_FROM_DEVICE); | |
897 | ||
898 | xdp.data = page_address(frags[0].page) + | |
899 | frags[0].page_offset; | |
900 | xdp.data_end = xdp.data + length; | |
901 | ||
902 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | |
903 | switch (act) { | |
904 | case XDP_PASS: | |
905 | break; | |
906 | case XDP_TX: | |
907 | if (likely(!mlx4_en_xmit_frame(frags, dev, | |
908 | length, tx_index, | |
909 | &doorbell_pending))) | |
910 | goto consumed; | |
911 | goto xdp_drop; /* Drop on xmit failure */ | |
912 | default: | |
913 | bpf_warn_invalid_xdp_action(act); | |
914 | case XDP_ABORTED: | |
915 | case XDP_DROP: | |
916 | xdp_drop: | |
917 | if (likely(mlx4_en_rx_recycle(ring, frags))) | |
918 | goto consumed; | |
919 | goto next; | |
920 | } | |
921 | } | |
922 | ||
923 | if (likely(dev->features & NETIF_F_RXCSUM)) { | |
924 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | | |
925 | MLX4_CQE_STATUS_UDP)) { | |
926 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | |
927 | cqe->checksum == cpu_to_be16(0xffff)) { | |
928 | ip_summed = CHECKSUM_UNNECESSARY; | |
929 | ring->csum_ok++; | |
930 | } else { | |
931 | ip_summed = CHECKSUM_NONE; | |
932 | ring->csum_none++; | |
933 | } | |
934 | } else { | |
935 | if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && | |
936 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | |
937 | MLX4_CQE_STATUS_IPV6))) { | |
938 | ip_summed = CHECKSUM_COMPLETE; | |
939 | ring->csum_complete++; | |
940 | } else { | |
941 | ip_summed = CHECKSUM_NONE; | |
942 | ring->csum_none++; | |
943 | } | |
944 | } | |
945 | } else { | |
946 | ip_summed = CHECKSUM_NONE; | |
947 | ring->csum_none++; | |
948 | } | |
949 | ||
950 | /* This packet is eligible for GRO if it is: | |
951 | * - DIX Ethernet (type interpretation) | |
952 | * - TCP/IP (v4) | |
953 | * - without IP options | |
954 | * - not an IP fragment | |
955 | */ | |
956 | if (dev->features & NETIF_F_GRO) { | |
957 | struct sk_buff *gro_skb = napi_get_frags(&cq->napi); | |
958 | if (!gro_skb) | |
959 | goto next; | |
960 | ||
961 | nr = mlx4_en_complete_rx_desc(priv, | |
962 | rx_desc, frags, gro_skb, | |
963 | length); | |
964 | if (!nr) | |
965 | goto next; | |
966 | ||
967 | if (ip_summed == CHECKSUM_COMPLETE) { | |
968 | void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); | |
969 | if (check_csum(cqe, gro_skb, va, | |
970 | dev->features)) { | |
971 | ip_summed = CHECKSUM_NONE; | |
972 | ring->csum_none++; | |
973 | ring->csum_complete--; | |
974 | } | |
975 | } | |
976 | ||
977 | skb_shinfo(gro_skb)->nr_frags = nr; | |
978 | gro_skb->len = length; | |
979 | gro_skb->data_len = length; | |
980 | gro_skb->ip_summed = ip_summed; | |
981 | ||
982 | if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) | |
983 | gro_skb->csum_level = 1; | |
984 | ||
985 | if ((cqe->vlan_my_qpn & | |
986 | cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && | |
987 | (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { | |
988 | u16 vid = be16_to_cpu(cqe->sl_vid); | |
989 | ||
990 | __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); | |
991 | } else if ((be32_to_cpu(cqe->vlan_my_qpn) & | |
992 | MLX4_CQE_SVLAN_PRESENT_MASK) && | |
993 | (dev->features & NETIF_F_HW_VLAN_STAG_RX)) { | |
994 | __vlan_hwaccel_put_tag(gro_skb, | |
995 | htons(ETH_P_8021AD), | |
996 | be16_to_cpu(cqe->sl_vid)); | |
997 | } | |
998 | ||
999 | if (dev->features & NETIF_F_RXHASH) | |
1000 | skb_set_hash(gro_skb, | |
1001 | be32_to_cpu(cqe->immed_rss_invalid), | |
1002 | (ip_summed == CHECKSUM_UNNECESSARY) ? | |
1003 | PKT_HASH_TYPE_L4 : | |
1004 | PKT_HASH_TYPE_L3); | |
1005 | ||
1006 | skb_record_rx_queue(gro_skb, cq->ring); | |
1007 | ||
1008 | if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { | |
1009 | timestamp = mlx4_en_get_cqe_ts(cqe); | |
1010 | mlx4_en_fill_hwtstamps(mdev, | |
1011 | skb_hwtstamps(gro_skb), | |
1012 | timestamp); | |
1013 | } | |
1014 | ||
1015 | napi_gro_frags(&cq->napi); | |
1016 | goto next; | |
1017 | } | |
1018 | ||
1019 | /* GRO not possible, complete processing here */ | |
1020 | skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); | |
1021 | if (unlikely(!skb)) { | |
1022 | ring->dropped++; | |
1023 | goto next; | |
1024 | } | |
1025 | ||
1026 | if (unlikely(priv->validate_loopback)) { | |
1027 | validate_loopback(priv, skb); | |
1028 | goto next; | |
1029 | } | |
1030 | ||
1031 | if (ip_summed == CHECKSUM_COMPLETE) { | |
1032 | if (check_csum(cqe, skb, skb->data, dev->features)) { | |
1033 | ip_summed = CHECKSUM_NONE; | |
1034 | ring->csum_complete--; | |
1035 | ring->csum_none++; | |
1036 | } | |
1037 | } | |
1038 | ||
1039 | skb->ip_summed = ip_summed; | |
1040 | skb->protocol = eth_type_trans(skb, dev); | |
1041 | skb_record_rx_queue(skb, cq->ring); | |
1042 | ||
1043 | if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) | |
1044 | skb->csum_level = 1; | |
1045 | ||
1046 | if (dev->features & NETIF_F_RXHASH) | |
1047 | skb_set_hash(skb, | |
1048 | be32_to_cpu(cqe->immed_rss_invalid), | |
1049 | (ip_summed == CHECKSUM_UNNECESSARY) ? | |
1050 | PKT_HASH_TYPE_L4 : | |
1051 | PKT_HASH_TYPE_L3); | |
1052 | ||
1053 | if ((be32_to_cpu(cqe->vlan_my_qpn) & | |
1054 | MLX4_CQE_CVLAN_PRESENT_MASK) && | |
1055 | (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) | |
1056 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); | |
1057 | else if ((be32_to_cpu(cqe->vlan_my_qpn) & | |
1058 | MLX4_CQE_SVLAN_PRESENT_MASK) && | |
1059 | (dev->features & NETIF_F_HW_VLAN_STAG_RX)) | |
1060 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), | |
1061 | be16_to_cpu(cqe->sl_vid)); | |
1062 | ||
1063 | if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { | |
1064 | timestamp = mlx4_en_get_cqe_ts(cqe); | |
1065 | mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), | |
1066 | timestamp); | |
1067 | } | |
1068 | ||
1069 | napi_gro_receive(&cq->napi, skb); | |
1070 | next: | |
1071 | for (nr = 0; nr < priv->num_frags; nr++) | |
1072 | mlx4_en_free_frag(priv, frags, nr); | |
1073 | ||
1074 | consumed: | |
1075 | ++cq->mcq.cons_index; | |
1076 | index = (cq->mcq.cons_index) & ring->size_mask; | |
1077 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; | |
1078 | if (++polled == budget) | |
1079 | goto out; | |
1080 | } | |
1081 | ||
1082 | out: | |
1083 | rcu_read_unlock(); | |
1084 | if (doorbell_pending) | |
1085 | mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]); | |
1086 | ||
1087 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); | |
1088 | mlx4_cq_set_ci(&cq->mcq); | |
1089 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | |
1090 | ring->cons = cq->mcq.cons_index; | |
1091 | mlx4_en_refill_rx_buffers(priv, ring); | |
1092 | mlx4_en_update_rx_prod_db(ring); | |
1093 | return polled; | |
1094 | } | |
1095 | ||
1096 | ||
1097 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) | |
1098 | { | |
1099 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | |
1100 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | |
1101 | ||
1102 | if (likely(priv->port_up)) | |
1103 | napi_schedule_irqoff(&cq->napi); | |
1104 | else | |
1105 | mlx4_en_arm_cq(priv, cq); | |
1106 | } | |
1107 | ||
1108 | /* Rx CQ polling - called by NAPI */ | |
1109 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | |
1110 | { | |
1111 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); | |
1112 | struct net_device *dev = cq->dev; | |
1113 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
1114 | int done; | |
1115 | ||
1116 | done = mlx4_en_process_rx_cq(dev, cq, budget); | |
1117 | ||
1118 | /* If we used up all the quota - we're probably not done yet... */ | |
1119 | if (done == budget) { | |
1120 | const struct cpumask *aff; | |
1121 | struct irq_data *idata; | |
1122 | int cpu_curr; | |
1123 | ||
1124 | INC_PERF_COUNTER(priv->pstats.napi_quota); | |
1125 | ||
1126 | cpu_curr = smp_processor_id(); | |
1127 | idata = irq_desc_get_irq_data(cq->irq_desc); | |
1128 | aff = irq_data_get_affinity_mask(idata); | |
1129 | ||
1130 | if (likely(cpumask_test_cpu(cpu_curr, aff))) | |
1131 | return budget; | |
1132 | ||
1133 | /* Current cpu is not according to smp_irq_affinity - | |
1134 | * probably affinity changed. need to stop this NAPI | |
1135 | * poll, and restart it on the right CPU | |
1136 | */ | |
1137 | done = 0; | |
1138 | } | |
1139 | /* Done for now */ | |
1140 | napi_complete_done(napi, done); | |
1141 | mlx4_en_arm_cq(priv, cq); | |
1142 | return done; | |
1143 | } | |
1144 | ||
1145 | static const int frag_sizes[] = { | |
1146 | FRAG_SZ0, | |
1147 | FRAG_SZ1, | |
1148 | FRAG_SZ2, | |
1149 | FRAG_SZ3 | |
1150 | }; | |
1151 | ||
1152 | void mlx4_en_calc_rx_buf(struct net_device *dev) | |
1153 | { | |
1154 | enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE; | |
1155 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
1156 | int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); | |
1157 | int order = MLX4_EN_ALLOC_PREFER_ORDER; | |
1158 | u32 align = SMP_CACHE_BYTES; | |
1159 | int buf_size = 0; | |
1160 | int i = 0; | |
1161 | ||
1162 | /* bpf requires buffers to be set up as 1 packet per page. | |
1163 | * This only works when num_frags == 1. | |
1164 | */ | |
1165 | if (priv->xdp_ring_num) { | |
1166 | dma_dir = PCI_DMA_BIDIRECTIONAL; | |
1167 | /* This will gain efficient xdp frame recycling at the expense | |
1168 | * of more costly truesize accounting | |
1169 | */ | |
1170 | align = PAGE_SIZE; | |
1171 | order = 0; | |
1172 | } | |
1173 | ||
1174 | while (buf_size < eff_mtu) { | |
1175 | priv->frag_info[i].order = order; | |
1176 | priv->frag_info[i].frag_size = | |
1177 | (eff_mtu > buf_size + frag_sizes[i]) ? | |
1178 | frag_sizes[i] : eff_mtu - buf_size; | |
1179 | priv->frag_info[i].frag_prefix_size = buf_size; | |
1180 | priv->frag_info[i].frag_stride = | |
1181 | ALIGN(priv->frag_info[i].frag_size, align); | |
1182 | priv->frag_info[i].dma_dir = dma_dir; | |
1183 | buf_size += priv->frag_info[i].frag_size; | |
1184 | i++; | |
1185 | } | |
1186 | ||
1187 | priv->num_frags = i; | |
1188 | priv->rx_skb_size = eff_mtu; | |
1189 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); | |
1190 | ||
1191 | en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", | |
1192 | eff_mtu, priv->num_frags); | |
1193 | for (i = 0; i < priv->num_frags; i++) { | |
1194 | en_err(priv, | |
1195 | " frag:%d - size:%d prefix:%d stride:%d\n", | |
1196 | i, | |
1197 | priv->frag_info[i].frag_size, | |
1198 | priv->frag_info[i].frag_prefix_size, | |
1199 | priv->frag_info[i].frag_stride); | |
1200 | } | |
1201 | } | |
1202 | ||
1203 | /* RSS related functions */ | |
1204 | ||
1205 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, | |
1206 | struct mlx4_en_rx_ring *ring, | |
1207 | enum mlx4_qp_state *state, | |
1208 | struct mlx4_qp *qp) | |
1209 | { | |
1210 | struct mlx4_en_dev *mdev = priv->mdev; | |
1211 | struct mlx4_qp_context *context; | |
1212 | int err = 0; | |
1213 | ||
1214 | context = kmalloc(sizeof(*context), GFP_KERNEL); | |
1215 | if (!context) | |
1216 | return -ENOMEM; | |
1217 | ||
1218 | err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); | |
1219 | if (err) { | |
1220 | en_err(priv, "Failed to allocate qp #%x\n", qpn); | |
1221 | goto out; | |
1222 | } | |
1223 | qp->event = mlx4_en_sqp_event; | |
1224 | ||
1225 | memset(context, 0, sizeof *context); | |
1226 | mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, | |
1227 | qpn, ring->cqn, -1, context); | |
1228 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); | |
1229 | ||
1230 | /* Cancel FCS removal if FW allows */ | |
1231 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { | |
1232 | context->param3 |= cpu_to_be32(1 << 29); | |
1233 | if (priv->dev->features & NETIF_F_RXFCS) | |
1234 | ring->fcs_del = 0; | |
1235 | else | |
1236 | ring->fcs_del = ETH_FCS_LEN; | |
1237 | } else | |
1238 | ring->fcs_del = 0; | |
1239 | ||
1240 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); | |
1241 | if (err) { | |
1242 | mlx4_qp_remove(mdev->dev, qp); | |
1243 | mlx4_qp_free(mdev->dev, qp); | |
1244 | } | |
1245 | mlx4_en_update_rx_prod_db(ring); | |
1246 | out: | |
1247 | kfree(context); | |
1248 | return err; | |
1249 | } | |
1250 | ||
1251 | int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) | |
1252 | { | |
1253 | int err; | |
1254 | u32 qpn; | |
1255 | ||
1256 | err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, | |
1257 | MLX4_RESERVE_A0_QP); | |
1258 | if (err) { | |
1259 | en_err(priv, "Failed reserving drop qpn\n"); | |
1260 | return err; | |
1261 | } | |
1262 | err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); | |
1263 | if (err) { | |
1264 | en_err(priv, "Failed allocating drop qp\n"); | |
1265 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1266 | return err; | |
1267 | } | |
1268 | ||
1269 | return 0; | |
1270 | } | |
1271 | ||
1272 | void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) | |
1273 | { | |
1274 | u32 qpn; | |
1275 | ||
1276 | qpn = priv->drop_qp.qpn; | |
1277 | mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); | |
1278 | mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); | |
1279 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1280 | } | |
1281 | ||
1282 | /* Allocate rx qp's and configure them according to rss map */ | |
1283 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |
1284 | { | |
1285 | struct mlx4_en_dev *mdev = priv->mdev; | |
1286 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1287 | struct mlx4_qp_context context; | |
1288 | struct mlx4_rss_context *rss_context; | |
1289 | int rss_rings; | |
1290 | void *ptr; | |
1291 | u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | | |
1292 | MLX4_RSS_TCP_IPV6); | |
1293 | int i, qpn; | |
1294 | int err = 0; | |
1295 | int good_qps = 0; | |
1296 | ||
1297 | en_dbg(DRV, priv, "Configuring rss steering\n"); | |
1298 | err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, | |
1299 | priv->rx_ring_num, | |
1300 | &rss_map->base_qpn, 0); | |
1301 | if (err) { | |
1302 | en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); | |
1303 | return err; | |
1304 | } | |
1305 | ||
1306 | for (i = 0; i < priv->rx_ring_num; i++) { | |
1307 | qpn = rss_map->base_qpn + i; | |
1308 | err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], | |
1309 | &rss_map->state[i], | |
1310 | &rss_map->qps[i]); | |
1311 | if (err) | |
1312 | goto rss_err; | |
1313 | ||
1314 | ++good_qps; | |
1315 | } | |
1316 | ||
1317 | /* Configure RSS indirection qp */ | |
1318 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL); | |
1319 | if (err) { | |
1320 | en_err(priv, "Failed to allocate RSS indirection QP\n"); | |
1321 | goto rss_err; | |
1322 | } | |
1323 | rss_map->indir_qp.event = mlx4_en_sqp_event; | |
1324 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | |
1325 | priv->rx_ring[0]->cqn, -1, &context); | |
1326 | ||
1327 | if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) | |
1328 | rss_rings = priv->rx_ring_num; | |
1329 | else | |
1330 | rss_rings = priv->prof->rss_rings; | |
1331 | ||
1332 | ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) | |
1333 | + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; | |
1334 | rss_context = ptr; | |
1335 | rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | | |
1336 | (rss_map->base_qpn)); | |
1337 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); | |
1338 | if (priv->mdev->profile.udp_rss) { | |
1339 | rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; | |
1340 | rss_context->base_qpn_udp = rss_context->default_qpn; | |
1341 | } | |
1342 | ||
1343 | if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | |
1344 | en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); | |
1345 | rss_mask |= MLX4_RSS_BY_INNER_HEADERS; | |
1346 | } | |
1347 | ||
1348 | rss_context->flags = rss_mask; | |
1349 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | |
1350 | if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { | |
1351 | rss_context->hash_fn = MLX4_RSS_HASH_XOR; | |
1352 | } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { | |
1353 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | |
1354 | memcpy(rss_context->rss_key, priv->rss_key, | |
1355 | MLX4_EN_RSS_KEY_SIZE); | |
1356 | } else { | |
1357 | en_err(priv, "Unknown RSS hash function requested\n"); | |
1358 | err = -EINVAL; | |
1359 | goto indir_err; | |
1360 | } | |
1361 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, | |
1362 | &rss_map->indir_qp, &rss_map->indir_state); | |
1363 | if (err) | |
1364 | goto indir_err; | |
1365 | ||
1366 | return 0; | |
1367 | ||
1368 | indir_err: | |
1369 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
1370 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | |
1371 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | |
1372 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | |
1373 | rss_err: | |
1374 | for (i = 0; i < good_qps; i++) { | |
1375 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | |
1376 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1377 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1378 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1379 | } | |
1380 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); | |
1381 | return err; | |
1382 | } | |
1383 | ||
1384 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | |
1385 | { | |
1386 | struct mlx4_en_dev *mdev = priv->mdev; | |
1387 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1388 | int i; | |
1389 | ||
1390 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
1391 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | |
1392 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | |
1393 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | |
1394 | ||
1395 | for (i = 0; i < priv->rx_ring_num; i++) { | |
1396 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | |
1397 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1398 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1399 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1400 | } | |
1401 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); | |
1402 | } |