]>
Commit | Line | Data |
---|---|---|
2d4238f5 KK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Intel Corporation. */ | |
3 | ||
4 | #include <linux/bpf_trace.h> | |
a71506a4 | 5 | #include <net/xdp_sock_drv.h> |
2d4238f5 KK |
6 | #include <net/xdp.h> |
7 | #include "ice.h" | |
8 | #include "ice_base.h" | |
9 | #include "ice_type.h" | |
10 | #include "ice_xsk.h" | |
11 | #include "ice_txrx.h" | |
12 | #include "ice_txrx_lib.h" | |
13 | #include "ice_lib.h" | |
14 | ||
15 | /** | |
16 | * ice_qp_reset_stats - Resets all stats for rings of given index | |
17 | * @vsi: VSI that contains rings of interest | |
18 | * @q_idx: ring index in array | |
19 | */ | |
20 | static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) | |
21 | { | |
22 | memset(&vsi->rx_rings[q_idx]->rx_stats, 0, | |
23 | sizeof(vsi->rx_rings[q_idx]->rx_stats)); | |
24 | memset(&vsi->tx_rings[q_idx]->stats, 0, | |
25 | sizeof(vsi->tx_rings[q_idx]->stats)); | |
26 | if (ice_is_xdp_ena_vsi(vsi)) | |
27 | memset(&vsi->xdp_rings[q_idx]->stats, 0, | |
28 | sizeof(vsi->xdp_rings[q_idx]->stats)); | |
29 | } | |
30 | ||
31 | /** | |
32 | * ice_qp_clean_rings - Cleans all the rings of a given index | |
33 | * @vsi: VSI that contains rings of interest | |
34 | * @q_idx: ring index in array | |
35 | */ | |
36 | static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) | |
37 | { | |
38 | ice_clean_tx_ring(vsi->tx_rings[q_idx]); | |
39 | if (ice_is_xdp_ena_vsi(vsi)) | |
40 | ice_clean_tx_ring(vsi->xdp_rings[q_idx]); | |
41 | ice_clean_rx_ring(vsi->rx_rings[q_idx]); | |
42 | } | |
43 | ||
44 | /** | |
45 | * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector | |
46 | * @vsi: VSI that has netdev | |
47 | * @q_vector: q_vector that has NAPI context | |
48 | * @enable: true for enable, false for disable | |
49 | */ | |
50 | static void | |
51 | ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, | |
52 | bool enable) | |
53 | { | |
54 | if (!vsi->netdev || !q_vector) | |
55 | return; | |
56 | ||
57 | if (enable) | |
58 | napi_enable(&q_vector->napi); | |
59 | else | |
60 | napi_disable(&q_vector->napi); | |
61 | } | |
62 | ||
63 | /** | |
64 | * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring | |
65 | * @vsi: the VSI that contains queue vector being un-configured | |
66 | * @rx_ring: Rx ring that will have its IRQ disabled | |
67 | * @q_vector: queue vector | |
68 | */ | |
69 | static void | |
70 | ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, | |
71 | struct ice_q_vector *q_vector) | |
72 | { | |
73 | struct ice_pf *pf = vsi->back; | |
74 | struct ice_hw *hw = &pf->hw; | |
75 | int base = vsi->base_vector; | |
76 | u16 reg; | |
77 | u32 val; | |
78 | ||
79 | /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle | |
80 | * here only QINT_RQCTL | |
81 | */ | |
82 | reg = rx_ring->reg_idx; | |
83 | val = rd32(hw, QINT_RQCTL(reg)); | |
84 | val &= ~QINT_RQCTL_CAUSE_ENA_M; | |
85 | wr32(hw, QINT_RQCTL(reg), val); | |
86 | ||
87 | if (q_vector) { | |
88 | u16 v_idx = q_vector->v_idx; | |
89 | ||
90 | wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); | |
91 | ice_flush(hw); | |
92 | synchronize_irq(pf->msix_entries[v_idx + base].vector); | |
93 | } | |
94 | } | |
95 | ||
96 | /** | |
97 | * ice_qvec_cfg_msix - Enable IRQ for given queue vector | |
98 | * @vsi: the VSI that contains queue vector | |
99 | * @q_vector: queue vector | |
100 | */ | |
101 | static void | |
102 | ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) | |
103 | { | |
104 | u16 reg_idx = q_vector->reg_idx; | |
105 | struct ice_pf *pf = vsi->back; | |
106 | struct ice_hw *hw = &pf->hw; | |
107 | struct ice_ring *ring; | |
108 | ||
109 | ice_cfg_itr(hw, q_vector); | |
110 | ||
111 | wr32(hw, GLINT_RATE(reg_idx), | |
112 | ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); | |
113 | ||
114 | ice_for_each_ring(ring, q_vector->tx) | |
115 | ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx, | |
116 | q_vector->tx.itr_idx); | |
117 | ||
118 | ice_for_each_ring(ring, q_vector->rx) | |
119 | ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx, | |
120 | q_vector->rx.itr_idx); | |
121 | ||
122 | ice_flush(hw); | |
123 | } | |
124 | ||
125 | /** | |
126 | * ice_qvec_ena_irq - Enable IRQ for given queue vector | |
127 | * @vsi: the VSI that contains queue vector | |
128 | * @q_vector: queue vector | |
129 | */ | |
130 | static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) | |
131 | { | |
132 | struct ice_pf *pf = vsi->back; | |
133 | struct ice_hw *hw = &pf->hw; | |
134 | ||
135 | ice_irq_dynamic_ena(hw, vsi, q_vector); | |
136 | ||
137 | ice_flush(hw); | |
138 | } | |
139 | ||
140 | /** | |
141 | * ice_qp_dis - Disables a queue pair | |
142 | * @vsi: VSI of interest | |
143 | * @q_idx: ring index in array | |
144 | * | |
145 | * Returns 0 on success, negative on failure. | |
146 | */ | |
147 | static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) | |
148 | { | |
149 | struct ice_txq_meta txq_meta = { }; | |
150 | struct ice_ring *tx_ring, *rx_ring; | |
151 | struct ice_q_vector *q_vector; | |
152 | int timeout = 50; | |
153 | int err; | |
154 | ||
155 | if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) | |
156 | return -EINVAL; | |
157 | ||
158 | tx_ring = vsi->tx_rings[q_idx]; | |
159 | rx_ring = vsi->rx_rings[q_idx]; | |
160 | q_vector = rx_ring->q_vector; | |
161 | ||
162 | while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) { | |
163 | timeout--; | |
164 | if (!timeout) | |
165 | return -EBUSY; | |
166 | usleep_range(1000, 2000); | |
167 | } | |
168 | netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); | |
169 | ||
170 | ice_qvec_dis_irq(vsi, rx_ring, q_vector); | |
171 | ||
172 | ice_fill_txq_meta(vsi, tx_ring, &txq_meta); | |
173 | err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); | |
174 | if (err) | |
175 | return err; | |
176 | if (ice_is_xdp_ena_vsi(vsi)) { | |
177 | struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; | |
178 | ||
179 | memset(&txq_meta, 0, sizeof(txq_meta)); | |
180 | ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); | |
181 | err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, | |
182 | &txq_meta); | |
183 | if (err) | |
184 | return err; | |
185 | } | |
13a6233b | 186 | err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); |
2d4238f5 KK |
187 | if (err) |
188 | return err; | |
189 | ||
190 | ice_qvec_toggle_napi(vsi, q_vector, false); | |
191 | ice_qp_clean_rings(vsi, q_idx); | |
192 | ice_qp_reset_stats(vsi, q_idx); | |
193 | ||
194 | return 0; | |
195 | } | |
196 | ||
197 | /** | |
198 | * ice_qp_ena - Enables a queue pair | |
199 | * @vsi: VSI of interest | |
200 | * @q_idx: ring index in array | |
201 | * | |
202 | * Returns 0 on success, negative on failure. | |
203 | */ | |
204 | static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) | |
205 | { | |
206 | struct ice_aqc_add_tx_qgrp *qg_buf; | |
207 | struct ice_ring *tx_ring, *rx_ring; | |
208 | struct ice_q_vector *q_vector; | |
66486d89 | 209 | u16 size; |
2d4238f5 KK |
210 | int err; |
211 | ||
212 | if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) | |
213 | return -EINVAL; | |
214 | ||
66486d89 BA |
215 | size = struct_size(qg_buf, txqs, 1); |
216 | qg_buf = kzalloc(size, GFP_KERNEL); | |
2d4238f5 KK |
217 | if (!qg_buf) |
218 | return -ENOMEM; | |
219 | ||
220 | qg_buf->num_txqs = 1; | |
221 | ||
222 | tx_ring = vsi->tx_rings[q_idx]; | |
223 | rx_ring = vsi->rx_rings[q_idx]; | |
224 | q_vector = rx_ring->q_vector; | |
225 | ||
226 | err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf); | |
227 | if (err) | |
228 | goto free_buf; | |
229 | ||
230 | if (ice_is_xdp_ena_vsi(vsi)) { | |
231 | struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; | |
232 | ||
66486d89 | 233 | memset(qg_buf, 0, size); |
2d4238f5 KK |
234 | qg_buf->num_txqs = 1; |
235 | err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); | |
236 | if (err) | |
237 | goto free_buf; | |
238 | ice_set_ring_xdp(xdp_ring); | |
1742b3d5 | 239 | xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); |
2d4238f5 KK |
240 | } |
241 | ||
242 | err = ice_setup_rx_ctx(rx_ring); | |
243 | if (err) | |
244 | goto free_buf; | |
245 | ||
246 | ice_qvec_cfg_msix(vsi, q_vector); | |
247 | ||
13a6233b | 248 | err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); |
2d4238f5 KK |
249 | if (err) |
250 | goto free_buf; | |
251 | ||
252 | clear_bit(__ICE_CFG_BUSY, vsi->state); | |
253 | ice_qvec_toggle_napi(vsi, q_vector, true); | |
254 | ice_qvec_ena_irq(vsi, q_vector); | |
255 | ||
256 | netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); | |
257 | free_buf: | |
258 | kfree(qg_buf); | |
259 | return err; | |
260 | } | |
261 | ||
262 | /** | |
1742b3d5 MK |
263 | * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket |
264 | * @vsi: VSI to allocate the buffer pool on | |
2d4238f5 KK |
265 | * |
266 | * Returns 0 on success, negative on error | |
267 | */ | |
1742b3d5 | 268 | static int ice_xsk_alloc_pools(struct ice_vsi *vsi) |
2d4238f5 | 269 | { |
1742b3d5 | 270 | if (vsi->xsk_pools) |
2d4238f5 KK |
271 | return 0; |
272 | ||
1742b3d5 | 273 | vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools), |
2d4238f5 KK |
274 | GFP_KERNEL); |
275 | ||
1742b3d5 MK |
276 | if (!vsi->xsk_pools) { |
277 | vsi->num_xsk_pools = 0; | |
2d4238f5 KK |
278 | return -ENOMEM; |
279 | } | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
2d4238f5 | 284 | /** |
1742b3d5 | 285 | * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid |
2d4238f5 | 286 | * @vsi: VSI from which the VSI will be removed |
1742b3d5 | 287 | * @qid: Ring/qid associated with the buffer pool |
2d4238f5 | 288 | */ |
1742b3d5 | 289 | static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid) |
2d4238f5 | 290 | { |
1742b3d5 MK |
291 | vsi->xsk_pools[qid] = NULL; |
292 | vsi->num_xsk_pools_used--; | |
2d4238f5 | 293 | |
1742b3d5 MK |
294 | if (vsi->num_xsk_pools_used == 0) { |
295 | kfree(vsi->xsk_pools); | |
296 | vsi->xsk_pools = NULL; | |
297 | vsi->num_xsk_pools = 0; | |
2d4238f5 KK |
298 | } |
299 | } | |
300 | ||
2d4238f5 | 301 | /** |
1742b3d5 | 302 | * ice_xsk_pool_disable - disable a buffer pool region |
2d4238f5 KK |
303 | * @vsi: Current VSI |
304 | * @qid: queue ID | |
305 | * | |
306 | * Returns 0 on success, negative on failure | |
307 | */ | |
1742b3d5 | 308 | static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) |
2d4238f5 | 309 | { |
1742b3d5 MK |
310 | if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools || |
311 | !vsi->xsk_pools[qid]) | |
2d4238f5 KK |
312 | return -EINVAL; |
313 | ||
c4655761 | 314 | xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR); |
1742b3d5 | 315 | ice_xsk_remove_pool(vsi, qid); |
2d4238f5 KK |
316 | |
317 | return 0; | |
318 | } | |
319 | ||
320 | /** | |
1742b3d5 | 321 | * ice_xsk_pool_enable - enable a buffer pool region |
2d4238f5 | 322 | * @vsi: Current VSI |
1742b3d5 | 323 | * @pool: pointer to a requested buffer pool region |
2d4238f5 KK |
324 | * @qid: queue ID |
325 | * | |
326 | * Returns 0 on success, negative on failure | |
327 | */ | |
328 | static int | |
1742b3d5 | 329 | ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) |
2d4238f5 | 330 | { |
2d4238f5 KK |
331 | int err; |
332 | ||
333 | if (vsi->type != ICE_VSI_PF) | |
334 | return -EINVAL; | |
335 | ||
1742b3d5 MK |
336 | if (!vsi->num_xsk_pools) |
337 | vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq); | |
338 | if (qid >= vsi->num_xsk_pools) | |
2d4238f5 KK |
339 | return -EINVAL; |
340 | ||
1742b3d5 | 341 | err = ice_xsk_alloc_pools(vsi); |
175fc430 BT |
342 | if (err) |
343 | return err; | |
344 | ||
1742b3d5 | 345 | if (vsi->xsk_pools && vsi->xsk_pools[qid]) |
2d4238f5 KK |
346 | return -EBUSY; |
347 | ||
1742b3d5 MK |
348 | vsi->xsk_pools[qid] = pool; |
349 | vsi->num_xsk_pools_used++; | |
2d4238f5 | 350 | |
c4655761 | 351 | err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back), |
175fc430 | 352 | ICE_RX_DMA_ATTR); |
2d4238f5 KK |
353 | if (err) |
354 | return err; | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
359 | /** | |
1742b3d5 | 360 | * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state |
2d4238f5 | 361 | * @vsi: Current VSI |
1742b3d5 | 362 | * @pool: buffer pool to enable/associate to a ring, NULL to disable |
2d4238f5 KK |
363 | * @qid: queue ID |
364 | * | |
365 | * Returns 0 on success, negative on failure | |
366 | */ | |
1742b3d5 | 367 | int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) |
2d4238f5 | 368 | { |
1742b3d5 MK |
369 | bool if_running, pool_present = !!pool; |
370 | int ret = 0, pool_failure = 0; | |
2d4238f5 KK |
371 | |
372 | if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); | |
373 | ||
374 | if (if_running) { | |
375 | ret = ice_qp_dis(vsi, qid); | |
376 | if (ret) { | |
af23635a | 377 | netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); |
1742b3d5 | 378 | goto xsk_pool_if_up; |
2d4238f5 KK |
379 | } |
380 | } | |
381 | ||
1742b3d5 MK |
382 | pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) : |
383 | ice_xsk_pool_disable(vsi, qid); | |
2d4238f5 | 384 | |
1742b3d5 | 385 | xsk_pool_if_up: |
2d4238f5 KK |
386 | if (if_running) { |
387 | ret = ice_qp_ena(vsi, qid); | |
1742b3d5 | 388 | if (!ret && pool_present) |
2d4238f5 KK |
389 | napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); |
390 | else if (ret) | |
af23635a | 391 | netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); |
2d4238f5 KK |
392 | } |
393 | ||
1742b3d5 MK |
394 | if (pool_failure) { |
395 | netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n", | |
396 | pool_present ? "en" : "dis", pool_failure); | |
397 | return pool_failure; | |
2d4238f5 KK |
398 | } |
399 | ||
400 | return ret; | |
401 | } | |
402 | ||
2d4238f5 KK |
403 | /** |
404 | * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers | |
405 | * @rx_ring: Rx ring | |
406 | * @count: The number of buffers to allocate | |
2d4238f5 KK |
407 | * |
408 | * This function allocates a number of Rx buffers from the fill ring | |
409 | * or the internal recycle mechanism and places them on the Rx ring. | |
410 | * | |
8634cc12 | 411 | * Returns true if all allocations were successful, false if any fail. |
2d4238f5 | 412 | */ |
175fc430 | 413 | bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) |
2d4238f5 KK |
414 | { |
415 | union ice_32b_rx_flex_desc *rx_desc; | |
416 | u16 ntu = rx_ring->next_to_use; | |
417 | struct ice_rx_buf *rx_buf; | |
8634cc12 | 418 | bool ok = true; |
175fc430 | 419 | dma_addr_t dma; |
2d4238f5 KK |
420 | |
421 | if (!count) | |
8634cc12 | 422 | return true; |
2d4238f5 KK |
423 | |
424 | rx_desc = ICE_RX_DESC(rx_ring, ntu); | |
425 | rx_buf = &rx_ring->rx_buf[ntu]; | |
426 | ||
427 | do { | |
c4655761 | 428 | rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); |
175fc430 | 429 | if (!rx_buf->xdp) { |
8634cc12 | 430 | ok = false; |
2d4238f5 KK |
431 | break; |
432 | } | |
433 | ||
175fc430 BT |
434 | dma = xsk_buff_xdp_get_dma(rx_buf->xdp); |
435 | rx_desc->read.pkt_addr = cpu_to_le64(dma); | |
2d4238f5 KK |
436 | rx_desc->wb.status_error0 = 0; |
437 | ||
438 | rx_desc++; | |
439 | rx_buf++; | |
440 | ntu++; | |
441 | ||
442 | if (unlikely(ntu == rx_ring->count)) { | |
443 | rx_desc = ICE_RX_DESC(rx_ring, 0); | |
444 | rx_buf = rx_ring->rx_buf; | |
445 | ntu = 0; | |
446 | } | |
447 | } while (--count); | |
448 | ||
8d14768a BT |
449 | if (rx_ring->next_to_use != ntu) { |
450 | /* clear the status bits for the next_to_use descriptor */ | |
451 | rx_desc->wb.status_error0 = 0; | |
2d4238f5 | 452 | ice_release_rx_desc(rx_ring, ntu); |
8d14768a | 453 | } |
2d4238f5 | 454 | |
8634cc12 | 455 | return ok; |
2d4238f5 KK |
456 | } |
457 | ||
2d4238f5 KK |
458 | /** |
459 | * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring | |
460 | * @rx_ring: Rx ring | |
461 | */ | |
462 | static void ice_bump_ntc(struct ice_ring *rx_ring) | |
463 | { | |
464 | int ntc = rx_ring->next_to_clean + 1; | |
465 | ||
466 | ntc = (ntc < rx_ring->count) ? ntc : 0; | |
467 | rx_ring->next_to_clean = ntc; | |
468 | prefetch(ICE_RX_DESC(rx_ring, ntc)); | |
469 | } | |
470 | ||
2d4238f5 KK |
471 | /** |
472 | * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer | |
473 | * @rx_ring: Rx ring | |
474 | * @rx_buf: zero-copy Rx buffer | |
2d4238f5 KK |
475 | * |
476 | * This function allocates a new skb from a zero-copy Rx buffer. | |
477 | * | |
478 | * Returns the skb on success, NULL on failure. | |
479 | */ | |
480 | static struct sk_buff * | |
175fc430 | 481 | ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) |
2d4238f5 | 482 | { |
175fc430 BT |
483 | unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta; |
484 | unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data; | |
485 | unsigned int datasize_hard = rx_buf->xdp->data_end - | |
486 | rx_buf->xdp->data_hard_start; | |
2d4238f5 KK |
487 | struct sk_buff *skb; |
488 | ||
489 | skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, | |
490 | GFP_ATOMIC | __GFP_NOWARN); | |
491 | if (unlikely(!skb)) | |
492 | return NULL; | |
493 | ||
175fc430 BT |
494 | skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start); |
495 | memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize); | |
2d4238f5 KK |
496 | if (metasize) |
497 | skb_metadata_set(skb, metasize); | |
498 | ||
175fc430 BT |
499 | xsk_buff_free(rx_buf->xdp); |
500 | rx_buf->xdp = NULL; | |
2d4238f5 KK |
501 | return skb; |
502 | } | |
503 | ||
504 | /** | |
505 | * ice_run_xdp_zc - Executes an XDP program in zero-copy path | |
506 | * @rx_ring: Rx ring | |
507 | * @xdp: xdp_buff used as input to the XDP program | |
508 | * | |
509 | * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} | |
510 | */ | |
511 | static int | |
512 | ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) | |
513 | { | |
514 | int err, result = ICE_XDP_PASS; | |
515 | struct bpf_prog *xdp_prog; | |
516 | struct ice_ring *xdp_ring; | |
517 | u32 act; | |
518 | ||
519 | rcu_read_lock(); | |
520 | xdp_prog = READ_ONCE(rx_ring->xdp_prog); | |
521 | if (!xdp_prog) { | |
522 | rcu_read_unlock(); | |
523 | return ICE_XDP_PASS; | |
524 | } | |
525 | ||
526 | act = bpf_prog_run_xdp(xdp_prog, xdp); | |
d388a605 MK |
527 | |
528 | if (likely(act == XDP_REDIRECT)) { | |
529 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | |
530 | result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; | |
531 | rcu_read_unlock(); | |
532 | return result; | |
533 | } | |
534 | ||
2d4238f5 KK |
535 | switch (act) { |
536 | case XDP_PASS: | |
537 | break; | |
538 | case XDP_TX: | |
539 | xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; | |
540 | result = ice_xmit_xdp_buff(xdp, xdp_ring); | |
541 | break; | |
2d4238f5 KK |
542 | default: |
543 | bpf_warn_invalid_xdp_action(act); | |
4e83fc93 | 544 | fallthrough; |
2d4238f5 KK |
545 | case XDP_ABORTED: |
546 | trace_xdp_exception(rx_ring->netdev, xdp_prog, act); | |
4e83fc93 | 547 | fallthrough; |
2d4238f5 KK |
548 | case XDP_DROP: |
549 | result = ICE_XDP_CONSUMED; | |
550 | break; | |
551 | } | |
552 | ||
553 | rcu_read_unlock(); | |
554 | return result; | |
555 | } | |
556 | ||
557 | /** | |
558 | * ice_clean_rx_irq_zc - consumes packets from the hardware ring | |
559 | * @rx_ring: AF_XDP Rx ring | |
560 | * @budget: NAPI budget | |
561 | * | |
562 | * Returns number of processed packets on success, remaining budget on failure. | |
563 | */ | |
564 | int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) | |
565 | { | |
566 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | |
567 | u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); | |
568 | unsigned int xdp_xmit = 0; | |
fb0c5b05 | 569 | bool failure = false; |
2d4238f5 KK |
570 | |
571 | while (likely(total_rx_packets < (unsigned int)budget)) { | |
572 | union ice_32b_rx_flex_desc *rx_desc; | |
573 | unsigned int size, xdp_res = 0; | |
574 | struct ice_rx_buf *rx_buf; | |
575 | struct sk_buff *skb; | |
576 | u16 stat_err_bits; | |
577 | u16 vlan_tag = 0; | |
578 | u8 rx_ptype; | |
579 | ||
2d4238f5 KK |
580 | rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); |
581 | ||
582 | stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); | |
583 | if (!ice_test_staterr(rx_desc, stat_err_bits)) | |
584 | break; | |
585 | ||
586 | /* This memory barrier is needed to keep us from reading | |
587 | * any other fields out of the rx_desc until we have | |
588 | * verified the descriptor has been written back. | |
589 | */ | |
590 | dma_rmb(); | |
591 | ||
592 | size = le16_to_cpu(rx_desc->wb.pkt_len) & | |
593 | ICE_RX_FLX_DESC_PKT_LEN_M; | |
594 | if (!size) | |
595 | break; | |
596 | ||
175fc430 BT |
597 | rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; |
598 | rx_buf->xdp->data_end = rx_buf->xdp->data + size; | |
9647c57b | 599 | xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool); |
2d4238f5 | 600 | |
175fc430 | 601 | xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp); |
2d4238f5 | 602 | if (xdp_res) { |
175fc430 | 603 | if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) |
2d4238f5 | 604 | xdp_xmit |= xdp_res; |
175fc430 BT |
605 | else |
606 | xsk_buff_free(rx_buf->xdp); | |
2d4238f5 | 607 | |
175fc430 | 608 | rx_buf->xdp = NULL; |
2d4238f5 KK |
609 | total_rx_bytes += size; |
610 | total_rx_packets++; | |
611 | cleaned_count++; | |
612 | ||
613 | ice_bump_ntc(rx_ring); | |
614 | continue; | |
615 | } | |
616 | ||
617 | /* XDP_PASS path */ | |
175fc430 | 618 | skb = ice_construct_skb_zc(rx_ring, rx_buf); |
2d4238f5 KK |
619 | if (!skb) { |
620 | rx_ring->rx_stats.alloc_buf_failed++; | |
621 | break; | |
622 | } | |
623 | ||
624 | cleaned_count++; | |
625 | ice_bump_ntc(rx_ring); | |
626 | ||
627 | if (eth_skb_pad(skb)) { | |
628 | skb = NULL; | |
629 | continue; | |
630 | } | |
631 | ||
632 | total_rx_bytes += skb->len; | |
633 | total_rx_packets++; | |
634 | ||
635 | stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); | |
636 | if (ice_test_staterr(rx_desc, stat_err_bits)) | |
637 | vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); | |
638 | ||
639 | rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & | |
640 | ICE_RX_FLEX_DESC_PTYPE_M; | |
641 | ||
642 | ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); | |
643 | ice_receive_skb(rx_ring, skb, vlan_tag); | |
644 | } | |
645 | ||
5bb0c4b5 BT |
646 | if (cleaned_count >= ICE_RX_BUF_WRITE) |
647 | failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count); | |
648 | ||
2d4238f5 KK |
649 | ice_finalize_xdp_rx(rx_ring, xdp_xmit); |
650 | ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); | |
651 | ||
c4655761 | 652 | if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { |
5fa23e0b | 653 | if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) |
c4655761 | 654 | xsk_set_rx_need_wakeup(rx_ring->xsk_pool); |
5fa23e0b | 655 | else |
c4655761 | 656 | xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); |
5fa23e0b KK |
657 | |
658 | return (int)total_rx_packets; | |
659 | } | |
660 | ||
2d4238f5 KK |
661 | return failure ? budget : (int)total_rx_packets; |
662 | } | |
663 | ||
664 | /** | |
665 | * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries | |
666 | * @xdp_ring: XDP Tx ring | |
667 | * @budget: max number of frames to xmit | |
668 | * | |
669 | * Returns true if cleanup/transmission is done. | |
670 | */ | |
671 | static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) | |
672 | { | |
673 | struct ice_tx_desc *tx_desc = NULL; | |
674 | bool work_done = true; | |
675 | struct xdp_desc desc; | |
676 | dma_addr_t dma; | |
677 | ||
678 | while (likely(budget-- > 0)) { | |
679 | struct ice_tx_buf *tx_buf; | |
680 | ||
681 | if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) { | |
682 | xdp_ring->tx_stats.tx_busy++; | |
683 | work_done = false; | |
684 | break; | |
685 | } | |
686 | ||
687 | tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; | |
688 | ||
c4655761 | 689 | if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) |
2d4238f5 KK |
690 | break; |
691 | ||
c4655761 MK |
692 | dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); |
693 | xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, | |
175fc430 | 694 | desc.len); |
2d4238f5 KK |
695 | |
696 | tx_buf->bytecount = desc.len; | |
697 | ||
698 | tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use); | |
699 | tx_desc->buf_addr = cpu_to_le64(dma); | |
5757cc7c TN |
700 | tx_desc->cmd_type_offset_bsz = |
701 | ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0); | |
2d4238f5 KK |
702 | |
703 | xdp_ring->next_to_use++; | |
704 | if (xdp_ring->next_to_use == xdp_ring->count) | |
705 | xdp_ring->next_to_use = 0; | |
706 | } | |
707 | ||
708 | if (tx_desc) { | |
709 | ice_xdp_ring_update_tail(xdp_ring); | |
c4655761 | 710 | xsk_tx_release(xdp_ring->xsk_pool); |
2d4238f5 KK |
711 | } |
712 | ||
713 | return budget > 0 && work_done; | |
714 | } | |
715 | ||
716 | /** | |
717 | * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer | |
718 | * @xdp_ring: XDP Tx ring | |
719 | * @tx_buf: Tx buffer to clean | |
720 | */ | |
721 | static void | |
722 | ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) | |
723 | { | |
724 | xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); | |
725 | dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), | |
726 | dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); | |
727 | dma_unmap_len_set(tx_buf, len, 0); | |
728 | } | |
729 | ||
730 | /** | |
731 | * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries | |
732 | * @xdp_ring: XDP Tx ring | |
733 | * @budget: NAPI budget | |
734 | * | |
735 | * Returns true if cleanup/tranmission is done. | |
736 | */ | |
737 | bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) | |
738 | { | |
739 | int total_packets = 0, total_bytes = 0; | |
740 | s16 ntc = xdp_ring->next_to_clean; | |
741 | struct ice_tx_desc *tx_desc; | |
742 | struct ice_tx_buf *tx_buf; | |
2d4238f5 | 743 | u32 xsk_frames = 0; |
102d412a | 744 | bool xmit_done; |
2d4238f5 KK |
745 | |
746 | tx_desc = ICE_TX_DESC(xdp_ring, ntc); | |
747 | tx_buf = &xdp_ring->tx_buf[ntc]; | |
748 | ntc -= xdp_ring->count; | |
749 | ||
750 | do { | |
751 | if (!(tx_desc->cmd_type_offset_bsz & | |
752 | cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) | |
753 | break; | |
754 | ||
755 | total_bytes += tx_buf->bytecount; | |
756 | total_packets++; | |
757 | ||
758 | if (tx_buf->raw_buf) { | |
759 | ice_clean_xdp_tx_buf(xdp_ring, tx_buf); | |
760 | tx_buf->raw_buf = NULL; | |
761 | } else { | |
762 | xsk_frames++; | |
763 | } | |
764 | ||
765 | tx_desc->cmd_type_offset_bsz = 0; | |
766 | tx_buf++; | |
767 | tx_desc++; | |
768 | ntc++; | |
769 | ||
770 | if (unlikely(!ntc)) { | |
771 | ntc -= xdp_ring->count; | |
772 | tx_buf = xdp_ring->tx_buf; | |
773 | tx_desc = ICE_TX_DESC(xdp_ring, 0); | |
774 | } | |
775 | ||
776 | prefetch(tx_desc); | |
777 | ||
778 | } while (likely(--budget)); | |
779 | ||
780 | ntc += xdp_ring->count; | |
781 | xdp_ring->next_to_clean = ntc; | |
782 | ||
783 | if (xsk_frames) | |
c4655761 | 784 | xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); |
2d4238f5 | 785 | |
c4655761 MK |
786 | if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) |
787 | xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); | |
5fa23e0b | 788 | |
2d4238f5 KK |
789 | ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); |
790 | xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); | |
791 | ||
792 | return budget > 0 && xmit_done; | |
793 | } | |
794 | ||
795 | /** | |
796 | * ice_xsk_wakeup - Implements ndo_xsk_wakeup | |
797 | * @netdev: net_device | |
798 | * @queue_id: queue to wake up | |
799 | * @flags: ignored in our case, since we have Rx and Tx in the same NAPI | |
800 | * | |
801 | * Returns negative on error, zero otherwise. | |
802 | */ | |
803 | int | |
804 | ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, | |
805 | u32 __always_unused flags) | |
806 | { | |
807 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
808 | struct ice_q_vector *q_vector; | |
809 | struct ice_vsi *vsi = np->vsi; | |
810 | struct ice_ring *ring; | |
811 | ||
812 | if (test_bit(__ICE_DOWN, vsi->state)) | |
813 | return -ENETDOWN; | |
814 | ||
815 | if (!ice_is_xdp_ena_vsi(vsi)) | |
816 | return -ENXIO; | |
817 | ||
818 | if (queue_id >= vsi->num_txq) | |
819 | return -ENXIO; | |
820 | ||
1742b3d5 | 821 | if (!vsi->xdp_rings[queue_id]->xsk_pool) |
2d4238f5 KK |
822 | return -ENXIO; |
823 | ||
824 | ring = vsi->xdp_rings[queue_id]; | |
825 | ||
826 | /* The idea here is that if NAPI is running, mark a miss, so | |
827 | * it will run again. If not, trigger an interrupt and | |
828 | * schedule the NAPI from interrupt context. If NAPI would be | |
829 | * scheduled here, the interrupt affinity would not be | |
830 | * honored. | |
831 | */ | |
832 | q_vector = ring->q_vector; | |
833 | if (!napi_if_scheduled_mark_missed(&q_vector->napi)) | |
834 | ice_trigger_sw_intr(&vsi->back->hw, q_vector); | |
835 | ||
836 | return 0; | |
837 | } | |
838 | ||
839 | /** | |
1742b3d5 | 840 | * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached |
2d4238f5 KK |
841 | * @vsi: VSI to be checked |
842 | * | |
1742b3d5 | 843 | * Returns true if any of the Rx rings has an AF_XDP buff pool attached |
2d4238f5 KK |
844 | */ |
845 | bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) | |
846 | { | |
847 | int i; | |
848 | ||
1742b3d5 | 849 | if (!vsi->xsk_pools) |
2d4238f5 KK |
850 | return false; |
851 | ||
1742b3d5 MK |
852 | for (i = 0; i < vsi->num_xsk_pools; i++) { |
853 | if (vsi->xsk_pools[i]) | |
2d4238f5 KK |
854 | return true; |
855 | } | |
856 | ||
857 | return false; | |
858 | } | |
859 | ||
860 | /** | |
1742b3d5 | 861 | * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring |
2d4238f5 KK |
862 | * @rx_ring: ring to be cleaned |
863 | */ | |
864 | void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) | |
865 | { | |
866 | u16 i; | |
867 | ||
868 | for (i = 0; i < rx_ring->count; i++) { | |
869 | struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; | |
870 | ||
175fc430 | 871 | if (!rx_buf->xdp) |
2d4238f5 KK |
872 | continue; |
873 | ||
175fc430 | 874 | rx_buf->xdp = NULL; |
2d4238f5 KK |
875 | } |
876 | } | |
877 | ||
878 | /** | |
1742b3d5 | 879 | * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues |
2d4238f5 KK |
880 | * @xdp_ring: XDP_Tx ring |
881 | */ | |
882 | void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) | |
883 | { | |
884 | u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; | |
885 | u32 xsk_frames = 0; | |
886 | ||
887 | while (ntc != ntu) { | |
888 | struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; | |
889 | ||
890 | if (tx_buf->raw_buf) | |
891 | ice_clean_xdp_tx_buf(xdp_ring, tx_buf); | |
892 | else | |
893 | xsk_frames++; | |
894 | ||
895 | tx_buf->raw_buf = NULL; | |
896 | ||
897 | ntc++; | |
898 | if (ntc >= xdp_ring->count) | |
899 | ntc = 0; | |
900 | } | |
901 | ||
902 | if (xsk_frames) | |
c4655761 | 903 | xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); |
2d4238f5 | 904 | } |