]>
Commit | Line | Data |
---|---|---|
c27a02cd YP |
1 | /* |
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | ||
076bb0c8 | 34 | #include <net/busy_poll.h> |
47a38e15 | 35 | #include <linux/bpf.h> |
a67edbf4 | 36 | #include <linux/bpf_trace.h> |
c27a02cd | 37 | #include <linux/mlx4/cq.h> |
5a0e3ad6 | 38 | #include <linux/slab.h> |
c27a02cd YP |
39 | #include <linux/mlx4/qp.h> |
40 | #include <linux/skbuff.h> | |
b67bfe0d | 41 | #include <linux/rculist.h> |
c27a02cd YP |
42 | #include <linux/if_ether.h> |
43 | #include <linux/if_vlan.h> | |
44 | #include <linux/vmalloc.h> | |
35f6f453 | 45 | #include <linux/irq.h> |
c27a02cd | 46 | |
f8c6455b SM |
47 | #if IS_ENABLED(CONFIG_IPV6) |
48 | #include <net/ip6_checksum.h> | |
49 | #endif | |
50 | ||
c27a02cd YP |
51 | #include "mlx4_en.h" |
52 | ||
34db548b ED |
53 | static int mlx4_alloc_page(struct mlx4_en_priv *priv, |
54 | struct mlx4_en_rx_alloc *frag, | |
55 | gfp_t gfp) | |
51151a16 | 56 | { |
51151a16 ED |
57 | struct page *page; |
58 | dma_addr_t dma; | |
59 | ||
b5a54d9a ED |
60 | page = alloc_page(gfp); |
61 | if (unlikely(!page)) | |
62 | return -ENOMEM; | |
63 | dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir); | |
de3d6fa8 | 64 | if (unlikely(dma_mapping_error(priv->ddev, dma))) { |
34db548b | 65 | __free_page(page); |
51151a16 ED |
66 | return -ENOMEM; |
67 | } | |
34db548b ED |
68 | frag->page = page; |
69 | frag->dma = dma; | |
70 | frag->page_offset = priv->rx_headroom; | |
51151a16 ED |
71 | return 0; |
72 | } | |
73 | ||
4cce66cd | 74 | static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, |
7d7bfc6a | 75 | struct mlx4_en_rx_ring *ring, |
4cce66cd TLSC |
76 | struct mlx4_en_rx_desc *rx_desc, |
77 | struct mlx4_en_rx_alloc *frags, | |
51151a16 | 78 | gfp_t gfp) |
c27a02cd | 79 | { |
4cce66cd | 80 | int i; |
c27a02cd | 81 | |
34db548b | 82 | for (i = 0; i < priv->num_frags; i++, frags++) { |
7d7bfc6a ED |
83 | if (!frags->page) { |
84 | if (mlx4_alloc_page(priv, frags, gfp)) | |
85 | return -ENOMEM; | |
86 | ring->rx_alloc_pages++; | |
87 | } | |
34db548b ED |
88 | rx_desc->data[i].addr = cpu_to_be64(frags->dma + |
89 | frags->page_offset); | |
c27a02cd YP |
90 | } |
91 | return 0; | |
c27a02cd YP |
92 | } |
93 | ||
34db548b ED |
94 | static void mlx4_en_free_frag(const struct mlx4_en_priv *priv, |
95 | struct mlx4_en_rx_alloc *frag) | |
c27a02cd | 96 | { |
34db548b ED |
97 | if (frag->page) { |
98 | dma_unmap_page(priv->ddev, frag->dma, | |
b5a54d9a | 99 | PAGE_SIZE, priv->dma_dir); |
34db548b | 100 | __free_page(frag->page); |
c27a02cd | 101 | } |
34db548b ED |
102 | /* We need to clear all fields, otherwise a change of priv->log_rx_info |
103 | * could lead to see garbage later in frag->page. | |
104 | */ | |
105 | memset(frag, 0, sizeof(*frag)); | |
c27a02cd YP |
106 | } |
107 | ||
34db548b | 108 | static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv, |
c27a02cd YP |
109 | struct mlx4_en_rx_ring *ring, int index) |
110 | { | |
111 | struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; | |
c27a02cd YP |
112 | int possible_frags; |
113 | int i; | |
114 | ||
c27a02cd YP |
115 | /* Set size and memtype fields */ |
116 | for (i = 0; i < priv->num_frags; i++) { | |
c27a02cd YP |
117 | rx_desc->data[i].byte_count = |
118 | cpu_to_be32(priv->frag_info[i].frag_size); | |
119 | rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); | |
120 | } | |
121 | ||
122 | /* If the number of used fragments does not fill up the ring stride, | |
123 | * remaining (unused) fragments must be padded with null address/size | |
124 | * and a special memory key */ | |
125 | possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; | |
126 | for (i = priv->num_frags; i < possible_frags; i++) { | |
127 | rx_desc->data[i].byte_count = 0; | |
128 | rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); | |
129 | rx_desc->data[i].addr = 0; | |
130 | } | |
131 | } | |
132 | ||
c27a02cd | 133 | static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, |
51151a16 ED |
134 | struct mlx4_en_rx_ring *ring, int index, |
135 | gfp_t gfp) | |
c27a02cd YP |
136 | { |
137 | struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); | |
4cce66cd TLSC |
138 | struct mlx4_en_rx_alloc *frags = ring->rx_info + |
139 | (index << priv->log_rx_info); | |
d576acf0 | 140 | if (ring->page_cache.index > 0) { |
34db548b ED |
141 | /* XDP uses a single page per frame */ |
142 | if (!frags->page) { | |
143 | ring->page_cache.index--; | |
144 | frags->page = ring->page_cache.buf[ring->page_cache.index].page; | |
145 | frags->dma = ring->page_cache.buf[ring->page_cache.index].dma; | |
146 | } | |
147 | frags->page_offset = XDP_PACKET_HEADROOM; | |
148 | rx_desc->data[0].addr = cpu_to_be64(frags->dma + | |
149 | XDP_PACKET_HEADROOM); | |
d576acf0 BB |
150 | return 0; |
151 | } | |
152 | ||
7d7bfc6a | 153 | return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp); |
c27a02cd YP |
154 | } |
155 | ||
34db548b | 156 | static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring) |
07841f9d | 157 | { |
07841f9d IS |
158 | return ring->prod == ring->cons; |
159 | } | |
160 | ||
c27a02cd YP |
161 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) |
162 | { | |
163 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | |
164 | } | |
165 | ||
34db548b ED |
166 | /* slow path */ |
167 | static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv, | |
38aab07c YP |
168 | struct mlx4_en_rx_ring *ring, |
169 | int index) | |
170 | { | |
4cce66cd | 171 | struct mlx4_en_rx_alloc *frags; |
38aab07c YP |
172 | int nr; |
173 | ||
4cce66cd | 174 | frags = ring->rx_info + (index << priv->log_rx_info); |
38aab07c | 175 | for (nr = 0; nr < priv->num_frags; nr++) { |
453a6082 | 176 | en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); |
34db548b | 177 | mlx4_en_free_frag(priv, frags + nr); |
38aab07c YP |
178 | } |
179 | } | |
180 | ||
c27a02cd YP |
181 | static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) |
182 | { | |
c27a02cd YP |
183 | struct mlx4_en_rx_ring *ring; |
184 | int ring_ind; | |
185 | int buf_ind; | |
38aab07c | 186 | int new_size; |
c27a02cd YP |
187 | |
188 | for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { | |
189 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 190 | ring = priv->rx_ring[ring_ind]; |
c27a02cd YP |
191 | |
192 | if (mlx4_en_prepare_rx_desc(priv, ring, | |
51151a16 | 193 | ring->actual_size, |
1ab25f86 | 194 | GFP_KERNEL | __GFP_COLD)) { |
c27a02cd | 195 | if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { |
1a91de28 | 196 | en_err(priv, "Failed to allocate enough rx buffers\n"); |
c27a02cd YP |
197 | return -ENOMEM; |
198 | } else { | |
38aab07c | 199 | new_size = rounddown_pow_of_two(ring->actual_size); |
1a91de28 | 200 | en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", |
453a6082 | 201 | ring->actual_size, new_size); |
38aab07c | 202 | goto reduce_rings; |
c27a02cd YP |
203 | } |
204 | } | |
205 | ring->actual_size++; | |
206 | ring->prod++; | |
207 | } | |
208 | } | |
38aab07c YP |
209 | return 0; |
210 | ||
211 | reduce_rings: | |
212 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 213 | ring = priv->rx_ring[ring_ind]; |
38aab07c YP |
214 | while (ring->actual_size > new_size) { |
215 | ring->actual_size--; | |
216 | ring->prod--; | |
217 | mlx4_en_free_rx_desc(priv, ring, ring->actual_size); | |
218 | } | |
38aab07c YP |
219 | } |
220 | ||
c27a02cd YP |
221 | return 0; |
222 | } | |
223 | ||
c27a02cd YP |
224 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, |
225 | struct mlx4_en_rx_ring *ring) | |
226 | { | |
c27a02cd | 227 | int index; |
c27a02cd | 228 | |
453a6082 YP |
229 | en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", |
230 | ring->cons, ring->prod); | |
c27a02cd YP |
231 | |
232 | /* Unmap and free Rx buffers */ | |
34db548b | 233 | for (index = 0; index < ring->size; index++) { |
453a6082 | 234 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); |
38aab07c | 235 | mlx4_en_free_rx_desc(priv, ring, index); |
c27a02cd | 236 | } |
34db548b ED |
237 | ring->cons = 0; |
238 | ring->prod = 0; | |
c27a02cd YP |
239 | } |
240 | ||
02512482 IS |
241 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) |
242 | { | |
243 | int i; | |
244 | int num_of_eqs; | |
bb2146bc | 245 | int num_rx_rings; |
02512482 IS |
246 | struct mlx4_dev *dev = mdev->dev; |
247 | ||
248 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { | |
c66fa19c MB |
249 | num_of_eqs = max_t(int, MIN_RX_RINGS, |
250 | min_t(int, | |
251 | mlx4_get_eqs_per_port(mdev->dev, i), | |
252 | DEF_RX_RINGS)); | |
02512482 | 253 | |
ea1c1af1 AV |
254 | num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : |
255 | min_t(int, num_of_eqs, | |
256 | netif_get_num_default_rss_queues()); | |
02512482 | 257 | mdev->profile.prof[i].rx_ring_num = |
bb2146bc | 258 | rounddown_pow_of_two(num_rx_rings); |
02512482 IS |
259 | } |
260 | } | |
261 | ||
c27a02cd | 262 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, |
41d942d5 | 263 | struct mlx4_en_rx_ring **pring, |
163561a4 | 264 | u32 size, u16 stride, int node) |
c27a02cd YP |
265 | { |
266 | struct mlx4_en_dev *mdev = priv->mdev; | |
41d942d5 | 267 | struct mlx4_en_rx_ring *ring; |
4cce66cd | 268 | int err = -ENOMEM; |
c27a02cd YP |
269 | int tmp; |
270 | ||
163561a4 | 271 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); |
41d942d5 | 272 | if (!ring) { |
163561a4 EE |
273 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
274 | if (!ring) { | |
275 | en_err(priv, "Failed to allocate RX ring structure\n"); | |
276 | return -ENOMEM; | |
277 | } | |
41d942d5 EE |
278 | } |
279 | ||
c27a02cd YP |
280 | ring->prod = 0; |
281 | ring->cons = 0; | |
282 | ring->size = size; | |
283 | ring->size_mask = size - 1; | |
284 | ring->stride = stride; | |
285 | ring->log_stride = ffs(ring->stride) - 1; | |
9f519f68 | 286 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; |
c27a02cd YP |
287 | |
288 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | |
4cce66cd | 289 | sizeof(struct mlx4_en_rx_alloc)); |
34db548b | 290 | ring->rx_info = vzalloc_node(tmp, node); |
41d942d5 | 291 | if (!ring->rx_info) { |
34db548b | 292 | ring->rx_info = vzalloc(tmp); |
163561a4 EE |
293 | if (!ring->rx_info) { |
294 | err = -ENOMEM; | |
295 | goto err_ring; | |
296 | } | |
41d942d5 | 297 | } |
e404decb | 298 | |
453a6082 | 299 | en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", |
c27a02cd YP |
300 | ring->rx_info, tmp); |
301 | ||
163561a4 | 302 | /* Allocate HW buffers on provided NUMA node */ |
872bf2fb | 303 | set_dev_node(&mdev->dev->persist->pdev->dev, node); |
73898db0 | 304 | err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); |
872bf2fb | 305 | set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); |
c27a02cd | 306 | if (err) |
41d942d5 | 307 | goto err_info; |
c27a02cd | 308 | |
c27a02cd YP |
309 | ring->buf = ring->wqres.buf.direct.buf; |
310 | ||
ec693d47 AV |
311 | ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; |
312 | ||
41d942d5 | 313 | *pring = ring; |
c27a02cd YP |
314 | return 0; |
315 | ||
41d942d5 | 316 | err_info: |
c27a02cd YP |
317 | vfree(ring->rx_info); |
318 | ring->rx_info = NULL; | |
41d942d5 EE |
319 | err_ring: |
320 | kfree(ring); | |
321 | *pring = NULL; | |
322 | ||
c27a02cd YP |
323 | return err; |
324 | } | |
325 | ||
326 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |
327 | { | |
c27a02cd YP |
328 | struct mlx4_en_rx_ring *ring; |
329 | int i; | |
330 | int ring_ind; | |
331 | int err; | |
332 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | |
333 | DS_SIZE * priv->num_frags); | |
c27a02cd YP |
334 | |
335 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 336 | ring = priv->rx_ring[ring_ind]; |
c27a02cd YP |
337 | |
338 | ring->prod = 0; | |
339 | ring->cons = 0; | |
340 | ring->actual_size = 0; | |
41d942d5 | 341 | ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; |
c27a02cd YP |
342 | |
343 | ring->stride = stride; | |
6496bbf0 EE |
344 | if (ring->stride <= TXBB_SIZE) { |
345 | /* Stamp first unused send wqe */ | |
346 | __be32 *ptr = (__be32 *)ring->buf; | |
347 | __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); | |
348 | *ptr = stamp; | |
349 | /* Move pointer to start of rx section */ | |
9f519f68 | 350 | ring->buf += TXBB_SIZE; |
6496bbf0 | 351 | } |
9f519f68 | 352 | |
c27a02cd YP |
353 | ring->log_stride = ffs(ring->stride) - 1; |
354 | ring->buf_size = ring->size * ring->stride; | |
355 | ||
356 | memset(ring->buf, 0, ring->buf_size); | |
357 | mlx4_en_update_rx_prod_db(ring); | |
358 | ||
4cce66cd | 359 | /* Initialize all descriptors */ |
c27a02cd YP |
360 | for (i = 0; i < ring->size; i++) |
361 | mlx4_en_init_rx_desc(priv, ring, i); | |
c27a02cd | 362 | } |
b58515be IM |
363 | err = mlx4_en_fill_rx_buffers(priv); |
364 | if (err) | |
c27a02cd YP |
365 | goto err_buffers; |
366 | ||
367 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | |
41d942d5 | 368 | ring = priv->rx_ring[ring_ind]; |
c27a02cd | 369 | |
00d7d7bc | 370 | ring->size_mask = ring->actual_size - 1; |
c27a02cd | 371 | mlx4_en_update_rx_prod_db(ring); |
c27a02cd YP |
372 | } |
373 | ||
374 | return 0; | |
375 | ||
c27a02cd YP |
376 | err_buffers: |
377 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | |
41d942d5 | 378 | mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); |
c27a02cd YP |
379 | |
380 | ring_ind = priv->rx_ring_num - 1; | |
c27a02cd | 381 | while (ring_ind >= 0) { |
41d942d5 EE |
382 | if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) |
383 | priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; | |
c27a02cd YP |
384 | ring_ind--; |
385 | } | |
386 | return err; | |
387 | } | |
388 | ||
07841f9d IS |
389 | /* We recover from out of memory by scheduling our napi poll |
390 | * function (mlx4_en_process_cq), which tries to allocate | |
391 | * all missing RX buffers (call to mlx4_en_refill_rx_buffers). | |
392 | */ | |
393 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) | |
394 | { | |
395 | int ring; | |
396 | ||
397 | if (!priv->port_up) | |
398 | return; | |
399 | ||
400 | for (ring = 0; ring < priv->rx_ring_num; ring++) { | |
bd4ce941 BP |
401 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { |
402 | local_bh_disable(); | |
07841f9d | 403 | napi_reschedule(&priv->rx_cq[ring]->napi); |
bd4ce941 BP |
404 | local_bh_enable(); |
405 | } | |
07841f9d IS |
406 | } |
407 | } | |
408 | ||
d576acf0 BB |
409 | /* When the rx ring is running in page-per-packet mode, a released frame can go |
410 | * directly into a small cache, to avoid unmapping or touching the page | |
411 | * allocator. In bpf prog performance scenarios, buffers are either forwarded | |
412 | * or dropped, never converted to skbs, so every page can come directly from | |
413 | * this cache when it is sized to be a multiple of the napi budget. | |
414 | */ | |
415 | bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, | |
416 | struct mlx4_en_rx_alloc *frame) | |
417 | { | |
418 | struct mlx4_en_page_cache *cache = &ring->page_cache; | |
419 | ||
420 | if (cache->index >= MLX4_EN_CACHE_SIZE) | |
421 | return false; | |
422 | ||
acd7628d ED |
423 | cache->buf[cache->index].page = frame->page; |
424 | cache->buf[cache->index].dma = frame->dma; | |
425 | cache->index++; | |
d576acf0 BB |
426 | return true; |
427 | } | |
428 | ||
c27a02cd | 429 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, |
41d942d5 EE |
430 | struct mlx4_en_rx_ring **pring, |
431 | u32 size, u16 stride) | |
c27a02cd YP |
432 | { |
433 | struct mlx4_en_dev *mdev = priv->mdev; | |
41d942d5 | 434 | struct mlx4_en_rx_ring *ring = *pring; |
cb7386d3 | 435 | struct bpf_prog *old_prog; |
c27a02cd | 436 | |
326fe02d BB |
437 | old_prog = rcu_dereference_protected( |
438 | ring->xdp_prog, | |
439 | lockdep_is_held(&mdev->state_lock)); | |
cb7386d3 BB |
440 | if (old_prog) |
441 | bpf_prog_put(old_prog); | |
68355f71 | 442 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); |
c27a02cd YP |
443 | vfree(ring->rx_info); |
444 | ring->rx_info = NULL; | |
41d942d5 EE |
445 | kfree(ring); |
446 | *pring = NULL; | |
c27a02cd YP |
447 | } |
448 | ||
449 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | |
450 | struct mlx4_en_rx_ring *ring) | |
451 | { | |
d576acf0 BB |
452 | int i; |
453 | ||
454 | for (i = 0; i < ring->page_cache.index; i++) { | |
acd7628d ED |
455 | dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma, |
456 | PAGE_SIZE, priv->dma_dir); | |
457 | put_page(ring->page_cache.buf[i].page); | |
d576acf0 BB |
458 | } |
459 | ring->page_cache.index = 0; | |
c27a02cd | 460 | mlx4_en_free_rx_buf(priv, ring); |
9f519f68 YP |
461 | if (ring->stride <= TXBB_SIZE) |
462 | ring->buf -= TXBB_SIZE; | |
c27a02cd YP |
463 | } |
464 | ||
465 | ||
c27a02cd | 466 | static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, |
4cce66cd | 467 | struct mlx4_en_rx_alloc *frags, |
90278c9f | 468 | struct sk_buff *skb, |
c27a02cd YP |
469 | int length) |
470 | { | |
34db548b ED |
471 | const struct mlx4_en_frag_info *frag_info = priv->frag_info; |
472 | unsigned int truesize = 0; | |
aaca121d | 473 | int nr, frag_size; |
34db548b | 474 | struct page *page; |
c27a02cd | 475 | dma_addr_t dma; |
34db548b | 476 | bool release; |
c27a02cd | 477 | |
4cce66cd | 478 | /* Collect used fragments while replacing them in the HW descriptors */ |
34db548b | 479 | for (nr = 0;; frags++) { |
aaca121d ED |
480 | frag_size = min_t(int, length, frag_info->frag_size); |
481 | ||
34db548b ED |
482 | page = frags->page; |
483 | if (unlikely(!page)) | |
4cce66cd | 484 | goto fail; |
c27a02cd | 485 | |
34db548b ED |
486 | dma = frags->dma; |
487 | dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, | |
488 | frag_size, priv->dma_dir); | |
c27a02cd | 489 | |
34db548b | 490 | __skb_fill_page_desc(skb, nr, page, frags->page_offset, |
aaca121d | 491 | frag_size); |
7f0137e2 | 492 | |
34db548b ED |
493 | truesize += frag_info->frag_stride; |
494 | if (frag_info->frag_stride == PAGE_SIZE / 2) { | |
495 | frags->page_offset ^= PAGE_SIZE / 2; | |
496 | release = page_count(page) != 1 || | |
497 | page_is_pfmemalloc(page) || | |
498 | page_to_nid(page) != numa_mem_id(); | |
499 | } else { | |
500 | u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); | |
501 | ||
502 | frags->page_offset += sz_align; | |
503 | release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; | |
504 | } | |
505 | if (release) { | |
506 | dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir); | |
507 | frags->page = NULL; | |
508 | } else { | |
509 | page_ref_inc(page); | |
510 | } | |
511 | ||
aaca121d ED |
512 | nr++; |
513 | length -= frag_size; | |
514 | if (!length) | |
515 | break; | |
516 | frag_info++; | |
c27a02cd | 517 | } |
34db548b | 518 | skb->truesize += truesize; |
c27a02cd YP |
519 | return nr; |
520 | ||
521 | fail: | |
c27a02cd YP |
522 | while (nr > 0) { |
523 | nr--; | |
34db548b | 524 | __skb_frag_unref(skb_shinfo(skb)->frags + nr); |
c27a02cd YP |
525 | } |
526 | return 0; | |
527 | } | |
528 | ||
6969cf0f | 529 | static void validate_loopback(struct mlx4_en_priv *priv, void *va) |
e7c1c2c4 | 530 | { |
6969cf0f | 531 | const unsigned char *data = va + ETH_HLEN; |
e7c1c2c4 | 532 | int i; |
e7c1c2c4 | 533 | |
6969cf0f ED |
534 | for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) { |
535 | if (data[i] != (unsigned char)i) | |
536 | return; | |
e7c1c2c4 YP |
537 | } |
538 | /* Loopback found */ | |
539 | priv->loopback_ok = 1; | |
e7c1c2c4 | 540 | } |
c27a02cd | 541 | |
dad42c30 ED |
542 | static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, |
543 | struct mlx4_en_rx_ring *ring) | |
4cce66cd | 544 | { |
dad42c30 | 545 | u32 missing = ring->actual_size - (ring->prod - ring->cons); |
4cce66cd | 546 | |
dad42c30 ED |
547 | /* Try to batch allocations, but not too much. */ |
548 | if (missing < 8) | |
549 | return false; | |
550 | do { | |
551 | if (mlx4_en_prepare_rx_desc(priv, ring, | |
552 | ring->prod & ring->size_mask, | |
dceeab0e ED |
553 | GFP_ATOMIC | __GFP_COLD | |
554 | __GFP_MEMALLOC)) | |
4cce66cd TLSC |
555 | break; |
556 | ring->prod++; | |
dad42c30 ED |
557 | } while (--missing); |
558 | ||
559 | return true; | |
4cce66cd TLSC |
560 | } |
561 | ||
f8c6455b SM |
562 | /* When hardware doesn't strip the vlan, we need to calculate the checksum |
563 | * over it and add it to the hardware's checksum calculation | |
564 | */ | |
565 | static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, | |
566 | struct vlan_hdr *vlanh) | |
567 | { | |
568 | return csum_add(hw_checksum, *(__wsum *)vlanh); | |
569 | } | |
570 | ||
571 | /* Although the stack expects checksum which doesn't include the pseudo | |
572 | * header, the HW adds it. To address that, we are subtracting the pseudo | |
573 | * header checksum from the checksum value provided by the HW. | |
574 | */ | |
575 | static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | |
576 | struct iphdr *iph) | |
577 | { | |
578 | __u16 length_for_csum = 0; | |
579 | __wsum csum_pseudo_header = 0; | |
580 | ||
581 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); | |
582 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, | |
583 | length_for_csum, iph->protocol, 0); | |
584 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); | |
585 | } | |
586 | ||
587 | #if IS_ENABLED(CONFIG_IPV6) | |
588 | /* In IPv6 packets, besides subtracting the pseudo header checksum, | |
589 | * we also compute/add the IP header checksum which | |
590 | * is not added by the HW. | |
591 | */ | |
592 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | |
593 | struct ipv6hdr *ipv6h) | |
594 | { | |
595 | __wsum csum_pseudo_hdr = 0; | |
596 | ||
de3d6fa8 TT |
597 | if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || |
598 | ipv6h->nexthdr == IPPROTO_HOPOPTS)) | |
f8c6455b | 599 | return -1; |
82d69203 | 600 | hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); |
f8c6455b SM |
601 | |
602 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, | |
603 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); | |
604 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); | |
605 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); | |
606 | ||
607 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); | |
608 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); | |
609 | return 0; | |
610 | } | |
611 | #endif | |
612 | static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |
79a25852 | 613 | netdev_features_t dev_features) |
f8c6455b SM |
614 | { |
615 | __wsum hw_checksum = 0; | |
616 | ||
617 | void *hdr = (u8 *)va + sizeof(struct ethhdr); | |
618 | ||
619 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); | |
620 | ||
e802f8e4 | 621 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && |
79a25852 | 622 | !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { |
f8c6455b SM |
623 | hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); |
624 | hdr += sizeof(struct vlan_hdr); | |
625 | } | |
626 | ||
627 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) | |
628 | get_fixed_ipv4_csum(hw_checksum, skb, hdr); | |
629 | #if IS_ENABLED(CONFIG_IPV6) | |
630 | else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) | |
de3d6fa8 | 631 | if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) |
f8c6455b SM |
632 | return -1; |
633 | #endif | |
634 | return 0; | |
635 | } | |
636 | ||
c27a02cd YP |
637 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) |
638 | { | |
639 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
ec693d47 | 640 | struct mlx4_en_dev *mdev = priv->mdev; |
c27a02cd | 641 | struct mlx4_cqe *cqe; |
41d942d5 | 642 | struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; |
4cce66cd | 643 | struct mlx4_en_rx_alloc *frags; |
47a38e15 | 644 | struct bpf_prog *xdp_prog; |
9ecc2d86 | 645 | int doorbell_pending; |
c27a02cd YP |
646 | struct sk_buff *skb; |
647 | int index; | |
648 | int nr; | |
649 | unsigned int length; | |
650 | int polled = 0; | |
651 | int ip_summed; | |
08ff3235 | 652 | int factor = priv->cqe_factor; |
ec693d47 | 653 | u64 timestamp; |
837052d0 | 654 | bool l2_tunnel; |
c27a02cd | 655 | |
de3d6fa8 | 656 | if (unlikely(!priv->port_up)) |
c27a02cd YP |
657 | return 0; |
658 | ||
de3d6fa8 | 659 | if (unlikely(budget <= 0)) |
38be0a34 EB |
660 | return polled; |
661 | ||
326fe02d BB |
662 | /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ |
663 | rcu_read_lock(); | |
664 | xdp_prog = rcu_dereference(ring->xdp_prog); | |
9ecc2d86 | 665 | doorbell_pending = 0; |
47a38e15 | 666 | |
c27a02cd YP |
667 | /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx |
668 | * descriptor offset can be deduced from the CQE index instead of | |
669 | * reading 'cqe->index' */ | |
670 | index = cq->mcq.cons_index & ring->size_mask; | |
b1b6b4da | 671 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; |
c27a02cd YP |
672 | |
673 | /* Process all completed CQEs */ | |
674 | while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, | |
675 | cq->mcq.cons_index & cq->size)) { | |
02e6fd3e | 676 | void *va; |
c27a02cd | 677 | |
4cce66cd | 678 | frags = ring->rx_info + (index << priv->log_rx_info); |
02e6fd3e | 679 | va = page_address(frags[0].page) + frags[0].page_offset; |
c27a02cd YP |
680 | /* |
681 | * make sure we read the CQE after we read the ownership bit | |
682 | */ | |
12b3375f | 683 | dma_rmb(); |
c27a02cd YP |
684 | |
685 | /* Drop packet on bad receive or bad checksum */ | |
686 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | |
687 | MLX4_CQE_OPCODE_ERROR)) { | |
1a91de28 JP |
688 | en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", |
689 | ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, | |
690 | ((struct mlx4_err_cqe *)cqe)->syndrome); | |
c27a02cd YP |
691 | goto next; |
692 | } | |
693 | if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { | |
453a6082 | 694 | en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); |
c27a02cd YP |
695 | goto next; |
696 | } | |
697 | ||
79aeaccd YB |
698 | /* Check if we need to drop the packet if SRIOV is not enabled |
699 | * and not performing the selftest or flb disabled | |
700 | */ | |
701 | if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { | |
02e6fd3e | 702 | const struct ethhdr *ethh = va; |
79aeaccd | 703 | dma_addr_t dma; |
79aeaccd YB |
704 | /* Get pointer to first fragment since we haven't |
705 | * skb yet and cast it to ethhdr struct | |
706 | */ | |
9e8c0395 | 707 | dma = frags[0].dma + frags[0].page_offset; |
79aeaccd YB |
708 | dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), |
709 | DMA_FROM_DEVICE); | |
79aeaccd | 710 | |
c07cb4b0 YB |
711 | if (is_multicast_ether_addr(ethh->h_dest)) { |
712 | struct mlx4_mac_entry *entry; | |
c07cb4b0 YB |
713 | struct hlist_head *bucket; |
714 | unsigned int mac_hash; | |
715 | ||
716 | /* Drop the packet, since HW loopback-ed it */ | |
717 | mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; | |
718 | bucket = &priv->mac_hash[mac_hash]; | |
b67bfe0d | 719 | hlist_for_each_entry_rcu(entry, bucket, hlist) { |
c07cb4b0 | 720 | if (ether_addr_equal_64bits(entry->mac, |
326fe02d | 721 | ethh->h_source)) |
c07cb4b0 | 722 | goto next; |
c07cb4b0 | 723 | } |
c07cb4b0 | 724 | } |
79aeaccd | 725 | } |
5b4c4d36 | 726 | |
6969cf0f ED |
727 | if (unlikely(priv->validate_loopback)) { |
728 | validate_loopback(priv, va); | |
729 | goto next; | |
730 | } | |
731 | ||
c27a02cd YP |
732 | /* |
733 | * Packet is OK - process it. | |
734 | */ | |
735 | length = be32_to_cpu(cqe->byte_cnt); | |
4a5f4dd8 | 736 | length -= ring->fcs_del; |
c27a02cd | 737 | |
47a38e15 BB |
738 | /* A bpf program gets first chance to drop the packet. It may |
739 | * read bytes but not past the end of the frag. | |
740 | */ | |
741 | if (xdp_prog) { | |
742 | struct xdp_buff xdp; | |
743 | dma_addr_t dma; | |
ea3349a0 | 744 | void *orig_data; |
47a38e15 BB |
745 | u32 act; |
746 | ||
9e8c0395 | 747 | dma = frags[0].dma + frags[0].page_offset; |
47a38e15 BB |
748 | dma_sync_single_for_cpu(priv->ddev, dma, |
749 | priv->frag_info[0].frag_size, | |
750 | DMA_FROM_DEVICE); | |
751 | ||
02e6fd3e ED |
752 | xdp.data_hard_start = va - frags[0].page_offset; |
753 | xdp.data = va; | |
47a38e15 | 754 | xdp.data_end = xdp.data + length; |
ea3349a0 | 755 | orig_data = xdp.data; |
47a38e15 BB |
756 | |
757 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | |
ea3349a0 MKL |
758 | |
759 | if (xdp.data != orig_data) { | |
760 | length = xdp.data_end - xdp.data; | |
761 | frags[0].page_offset = xdp.data - | |
762 | xdp.data_hard_start; | |
02e6fd3e | 763 | va = xdp.data; |
ea3349a0 MKL |
764 | } |
765 | ||
47a38e15 BB |
766 | switch (act) { |
767 | case XDP_PASS: | |
768 | break; | |
9ecc2d86 | 769 | case XDP_TX: |
15fca2c8 | 770 | if (likely(!mlx4_en_xmit_frame(ring, frags, dev, |
67f8b1dc | 771 | length, cq->ring, |
34db548b ED |
772 | &doorbell_pending))) { |
773 | frags[0].page = NULL; | |
774 | goto next; | |
775 | } | |
a67edbf4 | 776 | trace_xdp_exception(dev, xdp_prog, act); |
15fca2c8 | 777 | goto xdp_drop_no_cnt; /* Drop on xmit failure */ |
47a38e15 BB |
778 | default: |
779 | bpf_warn_invalid_xdp_action(act); | |
780 | case XDP_ABORTED: | |
a67edbf4 | 781 | trace_xdp_exception(dev, xdp_prog, act); |
47a38e15 | 782 | case XDP_DROP: |
15fca2c8 TT |
783 | ring->xdp_drop++; |
784 | xdp_drop_no_cnt: | |
47a38e15 BB |
785 | goto next; |
786 | } | |
787 | } | |
788 | ||
15fca2c8 TT |
789 | ring->bytes += length; |
790 | ring->packets++; | |
791 | ||
68b8df46 ED |
792 | skb = napi_get_frags(&cq->napi); |
793 | if (!skb) | |
794 | goto next; | |
795 | ||
796 | if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) { | |
797 | timestamp = mlx4_en_get_cqe_ts(cqe); | |
798 | mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), | |
799 | timestamp); | |
800 | } | |
801 | skb_record_rx_queue(skb, cq->ring); | |
802 | ||
c8c64cff | 803 | if (likely(dev->features & NETIF_F_RXCSUM)) { |
f8c6455b SM |
804 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | |
805 | MLX4_CQE_STATUS_UDP)) { | |
806 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | |
807 | cqe->checksum == cpu_to_be16(0xffff)) { | |
808 | ip_summed = CHECKSUM_UNNECESSARY; | |
68b8df46 ED |
809 | l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && |
810 | (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); | |
811 | if (l2_tunnel) | |
812 | skb->csum_level = 1; | |
f8c6455b SM |
813 | ring->csum_ok++; |
814 | } else { | |
68b8df46 | 815 | goto csum_none; |
f8c6455b | 816 | } |
c27a02cd | 817 | } else { |
f8c6455b SM |
818 | if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && |
819 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | | |
820 | MLX4_CQE_STATUS_IPV6))) { | |
68b8df46 ED |
821 | if (check_csum(cqe, skb, va, dev->features)) { |
822 | goto csum_none; | |
823 | } else { | |
824 | ip_summed = CHECKSUM_COMPLETE; | |
825 | ring->csum_complete++; | |
826 | } | |
f8c6455b | 827 | } else { |
68b8df46 | 828 | goto csum_none; |
f8c6455b | 829 | } |
c27a02cd YP |
830 | } |
831 | } else { | |
68b8df46 | 832 | csum_none: |
c27a02cd | 833 | ip_summed = CHECKSUM_NONE; |
ad04378c | 834 | ring->csum_none++; |
c27a02cd | 835 | } |
c27a02cd | 836 | skb->ip_summed = ip_summed; |
ad86107f | 837 | if (dev->features & NETIF_F_RXHASH) |
69174416 TH |
838 | skb_set_hash(skb, |
839 | be32_to_cpu(cqe->immed_rss_invalid), | |
0a6d4245 ED |
840 | (ip_summed == CHECKSUM_UNNECESSARY) ? |
841 | PKT_HASH_TYPE_L4 : | |
842 | PKT_HASH_TYPE_L3); | |
ad86107f | 843 | |
68b8df46 ED |
844 | |
845 | if ((cqe->vlan_my_qpn & | |
846 | cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && | |
ec693d47 | 847 | (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) |
68b8df46 ED |
848 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
849 | be16_to_cpu(cqe->sl_vid)); | |
850 | else if ((cqe->vlan_my_qpn & | |
851 | cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) && | |
e38af4fa HHZ |
852 | (dev->features & NETIF_F_HW_VLAN_STAG_RX)) |
853 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), | |
854 | be16_to_cpu(cqe->sl_vid)); | |
f1b553fb | 855 | |
68b8df46 ED |
856 | nr = mlx4_en_complete_rx_desc(priv, frags, skb, length); |
857 | if (likely(nr)) { | |
858 | skb_shinfo(skb)->nr_frags = nr; | |
859 | skb->len = length; | |
860 | skb->data_len = length; | |
861 | napi_gro_frags(&cq->napi); | |
862 | } else { | |
863 | skb->vlan_tci = 0; | |
864 | skb_clear_hash(skb); | |
ec693d47 | 865 | } |
c27a02cd YP |
866 | next: |
867 | ++cq->mcq.cons_index; | |
868 | index = (cq->mcq.cons_index) & ring->size_mask; | |
b1b6b4da | 869 | cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; |
f1d29a3f | 870 | if (++polled == budget) |
68b8df46 | 871 | break; |
c27a02cd YP |
872 | } |
873 | ||
326fe02d | 874 | rcu_read_unlock(); |
9ecc2d86 | 875 | |
dad42c30 ED |
876 | if (polled) { |
877 | if (doorbell_pending) | |
878 | mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq->ring]); | |
879 | ||
880 | mlx4_cq_set_ci(&cq->mcq); | |
881 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | |
882 | ring->cons = cq->mcq.cons_index; | |
883 | } | |
c27a02cd | 884 | AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); |
dad42c30 ED |
885 | |
886 | if (mlx4_en_refill_rx_buffers(priv, ring)) | |
887 | mlx4_en_update_rx_prod_db(ring); | |
888 | ||
c27a02cd YP |
889 | return polled; |
890 | } | |
891 | ||
892 | ||
893 | void mlx4_en_rx_irq(struct mlx4_cq *mcq) | |
894 | { | |
895 | struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); | |
896 | struct mlx4_en_priv *priv = netdev_priv(cq->dev); | |
897 | ||
477b35b4 ED |
898 | if (likely(priv->port_up)) |
899 | napi_schedule_irqoff(&cq->napi); | |
c27a02cd YP |
900 | else |
901 | mlx4_en_arm_cq(priv, cq); | |
902 | } | |
903 | ||
904 | /* Rx CQ polling - called by NAPI */ | |
905 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) | |
906 | { | |
907 | struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); | |
908 | struct net_device *dev = cq->dev; | |
909 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
910 | int done; | |
911 | ||
912 | done = mlx4_en_process_rx_cq(dev, cq, budget); | |
913 | ||
914 | /* If we used up all the quota - we're probably not done yet... */ | |
2eacc23c | 915 | if (done == budget) { |
35f6f453 | 916 | const struct cpumask *aff; |
dc2ec62f TG |
917 | struct irq_data *idata; |
918 | int cpu_curr; | |
35f6f453 | 919 | |
c27a02cd | 920 | INC_PERF_COUNTER(priv->pstats.napi_quota); |
35f6f453 AV |
921 | |
922 | cpu_curr = smp_processor_id(); | |
dc2ec62f TG |
923 | idata = irq_desc_get_irq_data(cq->irq_desc); |
924 | aff = irq_data_get_affinity_mask(idata); | |
35f6f453 | 925 | |
2e1af7d7 ED |
926 | if (likely(cpumask_test_cpu(cpu_curr, aff))) |
927 | return budget; | |
928 | ||
929 | /* Current cpu is not according to smp_irq_affinity - | |
dad42c30 ED |
930 | * probably affinity changed. Need to stop this NAPI |
931 | * poll, and restart it on the right CPU. | |
932 | * Try to avoid returning a too small value (like 0), | |
933 | * to not fool net_rx_action() and its netdev_budget | |
2e1af7d7 | 934 | */ |
dad42c30 ED |
935 | if (done) |
936 | done--; | |
c27a02cd | 937 | } |
1a288172 | 938 | /* Done for now */ |
2e713283 ED |
939 | if (napi_complete_done(napi, done)) |
940 | mlx4_en_arm_cq(priv, cq); | |
c27a02cd YP |
941 | return done; |
942 | } | |
943 | ||
c27a02cd YP |
944 | void mlx4_en_calc_rx_buf(struct net_device *dev) |
945 | { | |
946 | struct mlx4_en_priv *priv = netdev_priv(dev); | |
47a38e15 | 947 | int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); |
c27a02cd YP |
948 | int i = 0; |
949 | ||
d576acf0 BB |
950 | /* bpf requires buffers to be set up as 1 packet per page. |
951 | * This only works when num_frags == 1. | |
952 | */ | |
67f8b1dc | 953 | if (priv->tx_ring_num[TX_XDP]) { |
b45f0674 | 954 | priv->frag_info[0].frag_size = eff_mtu; |
b45f0674 MKL |
955 | /* This will gain efficient xdp frame recycling at the |
956 | * expense of more costly truesize accounting | |
d576acf0 | 957 | */ |
b45f0674 | 958 | priv->frag_info[0].frag_stride = PAGE_SIZE; |
69ba9431 | 959 | priv->dma_dir = PCI_DMA_BIDIRECTIONAL; |
d85f6c14 | 960 | priv->rx_headroom = XDP_PACKET_HEADROOM; |
b45f0674 MKL |
961 | i = 1; |
962 | } else { | |
b5a54d9a ED |
963 | int frag_size_max = 2048, buf_size = 0; |
964 | ||
965 | /* should not happen, right ? */ | |
966 | if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048) | |
967 | frag_size_max = PAGE_SIZE; | |
b45f0674 MKL |
968 | |
969 | while (buf_size < eff_mtu) { | |
b5a54d9a ED |
970 | int frag_stride, frag_size = eff_mtu - buf_size; |
971 | int pad, nb; | |
60c7f5ae ED |
972 | |
973 | if (i < MLX4_EN_MAX_RX_FRAGS - 1) | |
b5a54d9a | 974 | frag_size = min(frag_size, frag_size_max); |
60c7f5ae ED |
975 | |
976 | priv->frag_info[i].frag_size = frag_size; | |
b5a54d9a ED |
977 | frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES); |
978 | /* We can only pack 2 1536-bytes frames in on 4K page | |
979 | * Therefore, each frame would consume more bytes (truesize) | |
980 | */ | |
981 | nb = PAGE_SIZE / frag_stride; | |
982 | pad = (PAGE_SIZE - nb * frag_stride) / nb; | |
983 | pad &= ~(SMP_CACHE_BYTES - 1); | |
984 | priv->frag_info[i].frag_stride = frag_stride + pad; | |
60c7f5ae | 985 | |
60c7f5ae | 986 | buf_size += frag_size; |
b45f0674 MKL |
987 | i++; |
988 | } | |
69ba9431 | 989 | priv->dma_dir = PCI_DMA_FROMDEVICE; |
d85f6c14 | 990 | priv->rx_headroom = 0; |
c27a02cd YP |
991 | } |
992 | ||
993 | priv->num_frags = i; | |
994 | priv->rx_skb_size = eff_mtu; | |
4cce66cd | 995 | priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); |
c27a02cd | 996 | |
1a91de28 JP |
997 | en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", |
998 | eff_mtu, priv->num_frags); | |
c27a02cd | 999 | for (i = 0; i < priv->num_frags; i++) { |
505a9249 KH |
1000 | en_dbg(DRV, |
1001 | priv, | |
aaca121d | 1002 | " frag:%d - size:%d stride:%d\n", |
51151a16 ED |
1003 | i, |
1004 | priv->frag_info[i].frag_size, | |
51151a16 | 1005 | priv->frag_info[i].frag_stride); |
c27a02cd YP |
1006 | } |
1007 | } | |
1008 | ||
1009 | /* RSS related functions */ | |
1010 | ||
9f519f68 YP |
1011 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, |
1012 | struct mlx4_en_rx_ring *ring, | |
c27a02cd YP |
1013 | enum mlx4_qp_state *state, |
1014 | struct mlx4_qp *qp) | |
1015 | { | |
1016 | struct mlx4_en_dev *mdev = priv->mdev; | |
1017 | struct mlx4_qp_context *context; | |
1018 | int err = 0; | |
1019 | ||
14f8dc49 JP |
1020 | context = kmalloc(sizeof(*context), GFP_KERNEL); |
1021 | if (!context) | |
c27a02cd | 1022 | return -ENOMEM; |
c27a02cd | 1023 | |
40f2287b | 1024 | err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); |
c27a02cd | 1025 | if (err) { |
453a6082 | 1026 | en_err(priv, "Failed to allocate qp #%x\n", qpn); |
c27a02cd | 1027 | goto out; |
c27a02cd YP |
1028 | } |
1029 | qp->event = mlx4_en_sqp_event; | |
1030 | ||
1031 | memset(context, 0, sizeof *context); | |
00d7d7bc | 1032 | mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, |
0e98b523 | 1033 | qpn, ring->cqn, -1, context); |
9f519f68 | 1034 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); |
c27a02cd | 1035 | |
f3a9d1f2 | 1036 | /* Cancel FCS removal if FW allows */ |
4a5f4dd8 | 1037 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { |
f3a9d1f2 | 1038 | context->param3 |= cpu_to_be32(1 << 29); |
f0df3503 MM |
1039 | if (priv->dev->features & NETIF_F_RXFCS) |
1040 | ring->fcs_del = 0; | |
1041 | else | |
1042 | ring->fcs_del = ETH_FCS_LEN; | |
4a5f4dd8 YP |
1043 | } else |
1044 | ring->fcs_del = 0; | |
f3a9d1f2 | 1045 | |
9f519f68 | 1046 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); |
c27a02cd YP |
1047 | if (err) { |
1048 | mlx4_qp_remove(mdev->dev, qp); | |
1049 | mlx4_qp_free(mdev->dev, qp); | |
1050 | } | |
9f519f68 | 1051 | mlx4_en_update_rx_prod_db(ring); |
c27a02cd YP |
1052 | out: |
1053 | kfree(context); | |
1054 | return err; | |
1055 | } | |
1056 | ||
cabdc8ee HHZ |
1057 | int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) |
1058 | { | |
1059 | int err; | |
1060 | u32 qpn; | |
1061 | ||
d57febe1 MB |
1062 | err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, |
1063 | MLX4_RESERVE_A0_QP); | |
cabdc8ee HHZ |
1064 | if (err) { |
1065 | en_err(priv, "Failed reserving drop qpn\n"); | |
1066 | return err; | |
1067 | } | |
40f2287b | 1068 | err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); |
cabdc8ee HHZ |
1069 | if (err) { |
1070 | en_err(priv, "Failed allocating drop qp\n"); | |
1071 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1072 | return err; | |
1073 | } | |
1074 | ||
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) | |
1079 | { | |
1080 | u32 qpn; | |
1081 | ||
1082 | qpn = priv->drop_qp.qpn; | |
1083 | mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); | |
1084 | mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); | |
1085 | mlx4_qp_release_range(priv->mdev->dev, qpn, 1); | |
1086 | } | |
1087 | ||
c27a02cd YP |
1088 | /* Allocate rx qp's and configure them according to rss map */ |
1089 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |
1090 | { | |
1091 | struct mlx4_en_dev *mdev = priv->mdev; | |
1092 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1093 | struct mlx4_qp_context context; | |
876f6e67 | 1094 | struct mlx4_rss_context *rss_context; |
93d3e367 | 1095 | int rss_rings; |
c27a02cd | 1096 | void *ptr; |
876f6e67 | 1097 | u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | |
1202d460 | 1098 | MLX4_RSS_TCP_IPV6); |
9f519f68 | 1099 | int i, qpn; |
c27a02cd YP |
1100 | int err = 0; |
1101 | int good_qps = 0; | |
1102 | ||
453a6082 | 1103 | en_dbg(DRV, priv, "Configuring rss steering\n"); |
b6b912e0 YP |
1104 | err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, |
1105 | priv->rx_ring_num, | |
ddae0349 | 1106 | &rss_map->base_qpn, 0); |
c27a02cd | 1107 | if (err) { |
b6b912e0 | 1108 | en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); |
c27a02cd YP |
1109 | return err; |
1110 | } | |
1111 | ||
b6b912e0 | 1112 | for (i = 0; i < priv->rx_ring_num; i++) { |
c27a02cd | 1113 | qpn = rss_map->base_qpn + i; |
41d942d5 | 1114 | err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], |
c27a02cd YP |
1115 | &rss_map->state[i], |
1116 | &rss_map->qps[i]); | |
1117 | if (err) | |
1118 | goto rss_err; | |
1119 | ||
1120 | ++good_qps; | |
1121 | } | |
1122 | ||
1123 | /* Configure RSS indirection qp */ | |
40f2287b | 1124 | err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL); |
c27a02cd | 1125 | if (err) { |
453a6082 | 1126 | en_err(priv, "Failed to allocate RSS indirection QP\n"); |
1679200f | 1127 | goto rss_err; |
c27a02cd YP |
1128 | } |
1129 | rss_map->indir_qp.event = mlx4_en_sqp_event; | |
1130 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | |
41d942d5 | 1131 | priv->rx_ring[0]->cqn, -1, &context); |
c27a02cd | 1132 | |
93d3e367 YP |
1133 | if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) |
1134 | rss_rings = priv->rx_ring_num; | |
1135 | else | |
1136 | rss_rings = priv->prof->rss_rings; | |
1137 | ||
876f6e67 OG |
1138 | ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) |
1139 | + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; | |
43d620c8 | 1140 | rss_context = ptr; |
93d3e367 | 1141 | rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | |
c27a02cd | 1142 | (rss_map->base_qpn)); |
89efea25 | 1143 | rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); |
1202d460 OG |
1144 | if (priv->mdev->profile.udp_rss) { |
1145 | rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; | |
1146 | rss_context->base_qpn_udp = rss_context->default_qpn; | |
1147 | } | |
837052d0 OG |
1148 | |
1149 | if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | |
1150 | en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); | |
1151 | rss_mask |= MLX4_RSS_BY_INNER_HEADERS; | |
1152 | } | |
1153 | ||
0533943c | 1154 | rss_context->flags = rss_mask; |
876f6e67 | 1155 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; |
947cbb0a EP |
1156 | if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { |
1157 | rss_context->hash_fn = MLX4_RSS_HASH_XOR; | |
1158 | } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { | |
1159 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | |
1160 | memcpy(rss_context->rss_key, priv->rss_key, | |
1161 | MLX4_EN_RSS_KEY_SIZE); | |
947cbb0a EP |
1162 | } else { |
1163 | en_err(priv, "Unknown RSS hash function requested\n"); | |
1164 | err = -EINVAL; | |
1165 | goto indir_err; | |
1166 | } | |
c27a02cd YP |
1167 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, |
1168 | &rss_map->indir_qp, &rss_map->indir_state); | |
1169 | if (err) | |
1170 | goto indir_err; | |
1171 | ||
1172 | return 0; | |
1173 | ||
1174 | indir_err: | |
1175 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
1176 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | |
1177 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | |
1178 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | |
c27a02cd YP |
1179 | rss_err: |
1180 | for (i = 0; i < good_qps; i++) { | |
1181 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], | |
1182 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1183 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1184 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1185 | } | |
b6b912e0 | 1186 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); |
c27a02cd YP |
1187 | return err; |
1188 | } | |
1189 | ||
1190 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) | |
1191 | { | |
1192 | struct mlx4_en_dev *mdev = priv->mdev; | |
1193 | struct mlx4_en_rss_map *rss_map = &priv->rss_map; | |
1194 | int i; | |
1195 | ||
1196 | mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, | |
1197 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); | |
1198 | mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); | |
1199 | mlx4_qp_free(mdev->dev, &rss_map->indir_qp); | |
c27a02cd | 1200 | |
b6b912e0 | 1201 | for (i = 0; i < priv->rx_ring_num; i++) { |
c27a02cd YP |
1202 | mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
1203 | MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); | |
1204 | mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); | |
1205 | mlx4_qp_free(mdev->dev, &rss_map->qps[i]); | |
1206 | } | |
b6b912e0 | 1207 | mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); |
c27a02cd | 1208 | } |