]>
Commit | Line | Data |
---|---|---|
0a714186 BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2018 Intel Corporation. */ | |
3 | ||
4 | #include <linux/bpf_trace.h> | |
3106c580 | 5 | #include <linux/stringify.h> |
a71506a4 | 6 | #include <net/xdp_sock_drv.h> |
0a714186 BT |
7 | #include <net/xdp.h> |
8 | ||
9 | #include "i40e.h" | |
10 | #include "i40e_txrx_common.h" | |
11 | #include "i40e_xsk.h" | |
12 | ||
be1222b5 | 13 | int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) |
e1675f97 | 14 | { |
be1222b5 BT |
15 | unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; |
16 | ||
17 | rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); | |
18 | return rx_ring->rx_bi_zc ? 0 : -ENOMEM; | |
19 | } | |
20 | ||
21 | void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) | |
22 | { | |
23 | memset(rx_ring->rx_bi_zc, 0, | |
24 | sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); | |
25 | } | |
26 | ||
3b4f0b66 | 27 | static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) |
be1222b5 BT |
28 | { |
29 | return &rx_ring->rx_bi_zc[idx]; | |
e1675f97 BT |
30 | } |
31 | ||
0a714186 | 32 | /** |
1742b3d5 MK |
33 | * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a |
34 | * certain ring/qid | |
0a714186 | 35 | * @vsi: Current VSI |
1742b3d5 MK |
36 | * @pool: buffer pool |
37 | * @qid: Rx ring to associate buffer pool with | |
0a714186 BT |
38 | * |
39 | * Returns 0 on success, <0 on failure | |
40 | **/ | |
1742b3d5 MK |
41 | static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, |
42 | struct xsk_buff_pool *pool, | |
0a714186 BT |
43 | u16 qid) |
44 | { | |
f3fef2b6 | 45 | struct net_device *netdev = vsi->netdev; |
0a714186 BT |
46 | bool if_running; |
47 | int err; | |
48 | ||
49 | if (vsi->type != I40E_VSI_MAIN) | |
50 | return -EINVAL; | |
51 | ||
52 | if (qid >= vsi->num_queue_pairs) | |
53 | return -EINVAL; | |
54 | ||
f3fef2b6 JS |
55 | if (qid >= netdev->real_num_rx_queues || |
56 | qid >= netdev->real_num_tx_queues) | |
57 | return -EINVAL; | |
0a714186 | 58 | |
c4655761 | 59 | err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); |
0a714186 BT |
60 | if (err) |
61 | return err; | |
62 | ||
44ddd4f1 BT |
63 | set_bit(qid, vsi->af_xdp_zc_qps); |
64 | ||
0a714186 BT |
65 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); |
66 | ||
67 | if (if_running) { | |
68 | err = i40e_queue_pair_disable(vsi, qid); | |
69 | if (err) | |
70 | return err; | |
0a714186 | 71 | |
0a714186 BT |
72 | err = i40e_queue_pair_enable(vsi, qid); |
73 | if (err) | |
74 | return err; | |
14ffeb52 MK |
75 | |
76 | /* Kick start the NAPI context so that receiving will start */ | |
9116e5e2 | 77 | err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); |
14ffeb52 MK |
78 | if (err) |
79 | return err; | |
0a714186 BT |
80 | } |
81 | ||
82 | return 0; | |
83 | } | |
84 | ||
85 | /** | |
1742b3d5 MK |
86 | * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a |
87 | * certain ring/qid | |
0a714186 | 88 | * @vsi: Current VSI |
1742b3d5 | 89 | * @qid: Rx ring to associate buffer pool with |
0a714186 BT |
90 | * |
91 | * Returns 0 on success, <0 on failure | |
92 | **/ | |
1742b3d5 | 93 | static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) |
0a714186 | 94 | { |
f3fef2b6 | 95 | struct net_device *netdev = vsi->netdev; |
1742b3d5 | 96 | struct xsk_buff_pool *pool; |
0a714186 BT |
97 | bool if_running; |
98 | int err; | |
99 | ||
c4655761 | 100 | pool = xsk_get_pool_from_qid(netdev, qid); |
1742b3d5 | 101 | if (!pool) |
0a714186 BT |
102 | return -EINVAL; |
103 | ||
104 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); | |
105 | ||
106 | if (if_running) { | |
107 | err = i40e_queue_pair_disable(vsi, qid); | |
108 | if (err) | |
109 | return err; | |
110 | } | |
111 | ||
44ddd4f1 | 112 | clear_bit(qid, vsi->af_xdp_zc_qps); |
c4655761 | 113 | xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); |
0a714186 BT |
114 | |
115 | if (if_running) { | |
116 | err = i40e_queue_pair_enable(vsi, qid); | |
117 | if (err) | |
118 | return err; | |
119 | } | |
120 | ||
121 | return 0; | |
122 | } | |
123 | ||
0a714186 | 124 | /** |
1742b3d5 MK |
125 | * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from |
126 | * a ring/qid | |
0a714186 | 127 | * @vsi: Current VSI |
1742b3d5 MK |
128 | * @pool: Buffer pool to enable/associate to a ring, or NULL to disable |
129 | * @qid: Rx ring to (dis)associate buffer pool (from)to | |
0a714186 | 130 | * |
1742b3d5 | 131 | * This function enables or disables a buffer pool to a certain ring. |
0a714186 BT |
132 | * |
133 | * Returns 0 on success, <0 on failure | |
134 | **/ | |
1742b3d5 | 135 | int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, |
0a714186 BT |
136 | u16 qid) |
137 | { | |
1742b3d5 MK |
138 | return pool ? i40e_xsk_pool_enable(vsi, pool, qid) : |
139 | i40e_xsk_pool_disable(vsi, qid); | |
0a714186 BT |
140 | } |
141 | ||
142 | /** | |
143 | * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff | |
144 | * @rx_ring: Rx ring | |
145 | * @xdp: xdp_buff used as input to the XDP program | |
146 | * | |
0a714186 BT |
147 | * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} |
148 | **/ | |
149 | static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) | |
150 | { | |
151 | int err, result = I40E_XDP_PASS; | |
152 | struct i40e_ring *xdp_ring; | |
153 | struct bpf_prog *xdp_prog; | |
154 | u32 act; | |
155 | ||
156 | rcu_read_lock(); | |
157 | /* NB! xdp_prog will always be !NULL, due to the fact that | |
158 | * this path is enabled by setting an XDP program. | |
159 | */ | |
160 | xdp_prog = READ_ONCE(rx_ring->xdp_prog); | |
161 | act = bpf_prog_run_xdp(xdp_prog, xdp); | |
2f86c806 | 162 | |
0a714186 BT |
163 | switch (act) { |
164 | case XDP_PASS: | |
165 | break; | |
166 | case XDP_TX: | |
167 | xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | |
168 | result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); | |
169 | break; | |
170 | case XDP_REDIRECT: | |
171 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | |
172 | result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; | |
173 | break; | |
174 | default: | |
175 | bpf_warn_invalid_xdp_action(act); | |
5463fce6 | 176 | fallthrough; |
0a714186 BT |
177 | case XDP_ABORTED: |
178 | trace_xdp_exception(rx_ring->netdev, xdp_prog, act); | |
5463fce6 | 179 | fallthrough; /* handle aborts by dropping packet */ |
0a714186 BT |
180 | case XDP_DROP: |
181 | result = I40E_XDP_CONSUMED; | |
182 | break; | |
183 | } | |
184 | rcu_read_unlock(); | |
185 | return result; | |
186 | } | |
187 | ||
3b4f0b66 | 188 | bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) |
0a714186 BT |
189 | { |
190 | u16 ntu = rx_ring->next_to_use; | |
191 | union i40e_rx_desc *rx_desc; | |
3b4f0b66 BT |
192 | struct xdp_buff **bi, *xdp; |
193 | dma_addr_t dma; | |
0a714186 BT |
194 | bool ok = true; |
195 | ||
196 | rx_desc = I40E_RX_DESC(rx_ring, ntu); | |
e1675f97 | 197 | bi = i40e_rx_bi(rx_ring, ntu); |
0a714186 | 198 | do { |
c4655761 | 199 | xdp = xsk_buff_alloc(rx_ring->xsk_pool); |
3b4f0b66 | 200 | if (!xdp) { |
0a714186 BT |
201 | ok = false; |
202 | goto no_buffers; | |
203 | } | |
3b4f0b66 BT |
204 | *bi = xdp; |
205 | dma = xsk_buff_xdp_get_dma(xdp); | |
206 | rx_desc->read.pkt_addr = cpu_to_le64(dma); | |
207 | rx_desc->read.hdr_addr = 0; | |
0a714186 BT |
208 | |
209 | rx_desc++; | |
210 | bi++; | |
211 | ntu++; | |
212 | ||
213 | if (unlikely(ntu == rx_ring->count)) { | |
214 | rx_desc = I40E_RX_DESC(rx_ring, 0); | |
e1675f97 | 215 | bi = i40e_rx_bi(rx_ring, 0); |
0a714186 BT |
216 | ntu = 0; |
217 | } | |
218 | ||
0a714186 BT |
219 | count--; |
220 | } while (count); | |
221 | ||
222 | no_buffers: | |
223 | if (rx_ring->next_to_use != ntu) | |
224 | i40e_release_rx_desc(rx_ring, ntu); | |
225 | ||
226 | return ok; | |
227 | } | |
228 | ||
0a714186 | 229 | /** |
e92c0e02 | 230 | * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer |
0a714186 | 231 | * @rx_ring: Rx ring |
0a714186 BT |
232 | * @xdp: xdp_buff |
233 | * | |
234 | * This functions allocates a new skb from a zero-copy Rx buffer. | |
235 | * | |
236 | * Returns the skb, or NULL on failure. | |
237 | **/ | |
238 | static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, | |
0a714186 BT |
239 | struct xdp_buff *xdp) |
240 | { | |
241 | unsigned int metasize = xdp->data - xdp->data_meta; | |
242 | unsigned int datasize = xdp->data_end - xdp->data; | |
243 | struct sk_buff *skb; | |
244 | ||
245 | /* allocate a skb to store the frags */ | |
246 | skb = __napi_alloc_skb(&rx_ring->q_vector->napi, | |
247 | xdp->data_end - xdp->data_hard_start, | |
248 | GFP_ATOMIC | __GFP_NOWARN); | |
249 | if (unlikely(!skb)) | |
250 | return NULL; | |
251 | ||
252 | skb_reserve(skb, xdp->data - xdp->data_hard_start); | |
253 | memcpy(__skb_put(skb, datasize), xdp->data, datasize); | |
254 | if (metasize) | |
255 | skb_metadata_set(skb, metasize); | |
256 | ||
3b4f0b66 | 257 | xsk_buff_free(xdp); |
0a714186 BT |
258 | return skb; |
259 | } | |
260 | ||
f78bd130 BT |
261 | /** |
262 | * i40e_inc_ntc: Advance the next_to_clean index | |
263 | * @rx_ring: Rx ring | |
264 | **/ | |
265 | static void i40e_inc_ntc(struct i40e_ring *rx_ring) | |
266 | { | |
267 | u32 ntc = rx_ring->next_to_clean + 1; | |
268 | ||
269 | ntc = (ntc < rx_ring->count) ? ntc : 0; | |
270 | rx_ring->next_to_clean = ntc; | |
271 | } | |
272 | ||
0a714186 BT |
273 | /** |
274 | * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring | |
275 | * @rx_ring: Rx ring | |
276 | * @budget: NAPI budget | |
277 | * | |
278 | * Returns amount of work completed | |
279 | **/ | |
280 | int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) | |
281 | { | |
282 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | |
283 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | |
284 | unsigned int xdp_res, xdp_xmit = 0; | |
1773482f | 285 | bool failure = false; |
0a714186 | 286 | struct sk_buff *skb; |
0a714186 BT |
287 | |
288 | while (likely(total_rx_packets < (unsigned int)budget)) { | |
0a714186 | 289 | union i40e_rx_desc *rx_desc; |
3b4f0b66 | 290 | struct xdp_buff **bi; |
0a714186 | 291 | unsigned int size; |
0a714186 BT |
292 | u64 qword; |
293 | ||
0a714186 BT |
294 | rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); |
295 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | |
296 | ||
297 | /* This memory barrier is needed to keep us from reading | |
298 | * any other fields out of the rx_desc until we have | |
299 | * verified the descriptor has been written back. | |
300 | */ | |
301 | dma_rmb(); | |
302 | ||
be1222b5 BT |
303 | if (i40e_rx_is_programming_status(qword)) { |
304 | i40e_clean_programming_status(rx_ring, | |
305 | rx_desc->raw.qword[0], | |
306 | qword); | |
307 | bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); | |
3b4f0b66 BT |
308 | xsk_buff_free(*bi); |
309 | *bi = NULL; | |
0a714186 | 310 | cleaned_count++; |
3b4f0b66 | 311 | i40e_inc_ntc(rx_ring); |
0a714186 BT |
312 | continue; |
313 | } | |
314 | ||
be1222b5 | 315 | bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); |
0a714186 BT |
316 | size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> |
317 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | |
318 | if (!size) | |
319 | break; | |
320 | ||
3b4f0b66 BT |
321 | bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); |
322 | (*bi)->data_end = (*bi)->data + size; | |
9647c57b | 323 | xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool); |
0a714186 | 324 | |
3b4f0b66 | 325 | xdp_res = i40e_run_xdp_zc(rx_ring, *bi); |
0a714186 | 326 | if (xdp_res) { |
3b4f0b66 | 327 | if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) |
0a714186 | 328 | xdp_xmit |= xdp_res; |
3b4f0b66 BT |
329 | else |
330 | xsk_buff_free(*bi); | |
0a714186 | 331 | |
3b4f0b66 | 332 | *bi = NULL; |
0a714186 BT |
333 | total_rx_bytes += size; |
334 | total_rx_packets++; | |
335 | ||
336 | cleaned_count++; | |
337 | i40e_inc_ntc(rx_ring); | |
338 | continue; | |
339 | } | |
340 | ||
341 | /* XDP_PASS path */ | |
342 | ||
343 | /* NB! We are not checking for errors using | |
344 | * i40e_test_staterr with | |
345 | * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that | |
346 | * SBP is *not* set in PRT_SBPVSI (default not set). | |
347 | */ | |
3b4f0b66 BT |
348 | skb = i40e_construct_skb_zc(rx_ring, *bi); |
349 | *bi = NULL; | |
0a714186 BT |
350 | if (!skb) { |
351 | rx_ring->rx_stats.alloc_buff_failed++; | |
352 | break; | |
353 | } | |
354 | ||
355 | cleaned_count++; | |
356 | i40e_inc_ntc(rx_ring); | |
357 | ||
358 | if (eth_skb_pad(skb)) | |
359 | continue; | |
360 | ||
361 | total_rx_bytes += skb->len; | |
362 | total_rx_packets++; | |
363 | ||
800b8f63 | 364 | i40e_process_skb_fields(rx_ring, rx_desc, skb); |
2a508c64 | 365 | napi_gro_receive(&rx_ring->q_vector->napi, skb); |
0a714186 BT |
366 | } |
367 | ||
8cbf7414 BT |
368 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) |
369 | failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); | |
370 | ||
0a714186 BT |
371 | i40e_finalize_xdp_rx(rx_ring, xdp_xmit); |
372 | i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); | |
3d0c5f1c | 373 | |
c4655761 | 374 | if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { |
3d0c5f1c | 375 | if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) |
c4655761 | 376 | xsk_set_rx_need_wakeup(rx_ring->xsk_pool); |
3d0c5f1c | 377 | else |
c4655761 | 378 | xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); |
3d0c5f1c MK |
379 | |
380 | return (int)total_rx_packets; | |
381 | } | |
0a714186 BT |
382 | return failure ? budget : (int)total_rx_packets; |
383 | } | |
384 | ||
3106c580 MK |
385 | static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, |
386 | unsigned int *total_bytes) | |
387 | { | |
388 | struct i40e_tx_desc *tx_desc; | |
389 | dma_addr_t dma; | |
390 | ||
391 | dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); | |
392 | xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); | |
393 | ||
394 | tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); | |
395 | tx_desc->buffer_addr = cpu_to_le64(dma); | |
396 | tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, | |
397 | 0, desc->len, 0); | |
398 | ||
399 | *total_bytes += desc->len; | |
400 | } | |
401 | ||
402 | static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, | |
403 | unsigned int *total_bytes) | |
404 | { | |
405 | u16 ntu = xdp_ring->next_to_use; | |
406 | struct i40e_tx_desc *tx_desc; | |
407 | dma_addr_t dma; | |
408 | u32 i; | |
409 | ||
410 | loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { | |
411 | dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); | |
412 | xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); | |
413 | ||
414 | tx_desc = I40E_TX_DESC(xdp_ring, ntu++); | |
415 | tx_desc->buffer_addr = cpu_to_le64(dma); | |
416 | tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | | |
417 | I40E_TX_DESC_CMD_EOP, | |
418 | 0, desc[i].len, 0); | |
419 | ||
420 | *total_bytes += desc[i].len; | |
421 | } | |
422 | ||
423 | xdp_ring->next_to_use = ntu; | |
424 | } | |
425 | ||
426 | static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, | |
427 | unsigned int *total_bytes) | |
428 | { | |
429 | u32 batched, leftover, i; | |
430 | ||
431 | batched = nb_pkts & ~(PKTS_PER_BATCH - 1); | |
432 | leftover = nb_pkts & (PKTS_PER_BATCH - 1); | |
433 | for (i = 0; i < batched; i += PKTS_PER_BATCH) | |
434 | i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); | |
435 | for (i = batched; i < batched + leftover; i++) | |
436 | i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes); | |
437 | } | |
438 | ||
439 | static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) | |
440 | { | |
441 | u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; | |
442 | struct i40e_tx_desc *tx_desc; | |
443 | ||
444 | tx_desc = I40E_TX_DESC(xdp_ring, ntu); | |
445 | tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); | |
446 | } | |
447 | ||
1328dcdd MK |
448 | /** |
449 | * i40e_xmit_zc - Performs zero-copy Tx AF_XDP | |
450 | * @xdp_ring: XDP Tx ring | |
451 | * @budget: NAPI budget | |
452 | * | |
453 | * Returns true if the work is finished. | |
454 | **/ | |
455 | static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) | |
456 | { | |
3106c580 MK |
457 | struct xdp_desc *descs = xdp_ring->xsk_descs; |
458 | u32 nb_pkts, nb_processed = 0; | |
459 | unsigned int total_bytes = 0; | |
460 | ||
461 | nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget); | |
462 | if (!nb_pkts) | |
463 | return false; | |
464 | ||
465 | if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { | |
466 | nb_processed = xdp_ring->count - xdp_ring->next_to_use; | |
467 | i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); | |
468 | xdp_ring->next_to_use = 0; | |
1328dcdd MK |
469 | } |
470 | ||
3106c580 MK |
471 | i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, |
472 | &total_bytes); | |
1328dcdd | 473 | |
3106c580 MK |
474 | /* Request an interrupt for the last frame and bump tail ptr. */ |
475 | i40e_set_rs_bit(xdp_ring); | |
476 | i40e_xdp_ring_update_tail(xdp_ring); | |
477 | ||
478 | i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); | |
1328dcdd | 479 | |
3106c580 | 480 | return true; |
1328dcdd MK |
481 | } |
482 | ||
483 | /** | |
484 | * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry | |
485 | * @tx_ring: XDP Tx ring | |
486 | * @tx_bi: Tx buffer info to clean | |
487 | **/ | |
488 | static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, | |
489 | struct i40e_tx_buffer *tx_bi) | |
490 | { | |
491 | xdp_return_frame(tx_bi->xdpf); | |
5574ff7b | 492 | tx_ring->xdp_tx_active--; |
1328dcdd MK |
493 | dma_unmap_single(tx_ring->dev, |
494 | dma_unmap_addr(tx_bi, dma), | |
495 | dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); | |
496 | dma_unmap_len_set(tx_bi, len, 0); | |
497 | } | |
498 | ||
499 | /** | |
500 | * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries | |
1fd972eb | 501 | * @vsi: Current VSI |
1328dcdd | 502 | * @tx_ring: XDP Tx ring |
1328dcdd MK |
503 | * |
504 | * Returns true if cleanup/tranmission is done. | |
505 | **/ | |
5574ff7b | 506 | bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) |
1328dcdd | 507 | { |
1742b3d5 | 508 | struct xsk_buff_pool *bp = tx_ring->xsk_pool; |
5574ff7b | 509 | u32 i, completed_frames, xsk_frames = 0; |
1328dcdd | 510 | u32 head_idx = i40e_get_head(tx_ring); |
1328dcdd | 511 | struct i40e_tx_buffer *tx_bi; |
1fd972eb | 512 | unsigned int ntc; |
1328dcdd MK |
513 | |
514 | if (head_idx < tx_ring->next_to_clean) | |
515 | head_idx += tx_ring->count; | |
5574ff7b | 516 | completed_frames = head_idx - tx_ring->next_to_clean; |
1328dcdd | 517 | |
5574ff7b | 518 | if (completed_frames == 0) |
1328dcdd | 519 | goto out_xmit; |
5574ff7b MK |
520 | |
521 | if (likely(!tx_ring->xdp_tx_active)) { | |
522 | xsk_frames = completed_frames; | |
523 | goto skip; | |
1328dcdd MK |
524 | } |
525 | ||
526 | ntc = tx_ring->next_to_clean; | |
527 | ||
528 | for (i = 0; i < completed_frames; i++) { | |
529 | tx_bi = &tx_ring->tx_bi[ntc]; | |
530 | ||
5574ff7b | 531 | if (tx_bi->xdpf) { |
1328dcdd | 532 | i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); |
5574ff7b MK |
533 | tx_bi->xdpf = NULL; |
534 | } else { | |
1328dcdd | 535 | xsk_frames++; |
5574ff7b | 536 | } |
1328dcdd MK |
537 | |
538 | if (++ntc >= tx_ring->count) | |
539 | ntc = 0; | |
540 | } | |
541 | ||
5574ff7b | 542 | skip: |
1328dcdd MK |
543 | tx_ring->next_to_clean += completed_frames; |
544 | if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) | |
545 | tx_ring->next_to_clean -= tx_ring->count; | |
546 | ||
547 | if (xsk_frames) | |
c4655761 | 548 | xsk_tx_completed(bp, xsk_frames); |
1328dcdd | 549 | |
5574ff7b | 550 | i40e_arm_wb(tx_ring, vsi, completed_frames); |
1328dcdd MK |
551 | |
552 | out_xmit: | |
c4655761 MK |
553 | if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) |
554 | xsk_set_tx_need_wakeup(tx_ring->xsk_pool); | |
3d0c5f1c | 555 | |
1fd972eb | 556 | return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); |
1328dcdd MK |
557 | } |
558 | ||
559 | /** | |
9116e5e2 | 560 | * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup |
1328dcdd MK |
561 | * @dev: the netdevice |
562 | * @queue_id: queue id to wake up | |
9116e5e2 | 563 | * @flags: ignored in our case since we have Rx and Tx in the same NAPI. |
1328dcdd MK |
564 | * |
565 | * Returns <0 for errors, 0 otherwise. | |
566 | **/ | |
9116e5e2 | 567 | int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) |
1328dcdd MK |
568 | { |
569 | struct i40e_netdev_priv *np = netdev_priv(dev); | |
570 | struct i40e_vsi *vsi = np->vsi; | |
b3873a5b | 571 | struct i40e_pf *pf = vsi->back; |
1328dcdd MK |
572 | struct i40e_ring *ring; |
573 | ||
b3873a5b | 574 | if (test_bit(__I40E_CONFIG_BUSY, pf->state)) |
c77e9f09 | 575 | return -EAGAIN; |
b3873a5b | 576 | |
1328dcdd MK |
577 | if (test_bit(__I40E_VSI_DOWN, vsi->state)) |
578 | return -ENETDOWN; | |
579 | ||
580 | if (!i40e_enabled_xdp_vsi(vsi)) | |
581 | return -ENXIO; | |
582 | ||
583 | if (queue_id >= vsi->num_queue_pairs) | |
584 | return -ENXIO; | |
585 | ||
1742b3d5 | 586 | if (!vsi->xdp_rings[queue_id]->xsk_pool) |
1328dcdd MK |
587 | return -ENXIO; |
588 | ||
589 | ring = vsi->xdp_rings[queue_id]; | |
590 | ||
591 | /* The idea here is that if NAPI is running, mark a miss, so | |
592 | * it will run again. If not, trigger an interrupt and | |
593 | * schedule the NAPI from interrupt context. If NAPI would be | |
594 | * scheduled here, the interrupt affinity would not be | |
595 | * honored. | |
596 | */ | |
597 | if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) | |
598 | i40e_force_wb(vsi, ring->q_vector); | |
599 | ||
600 | return 0; | |
601 | } | |
9dbb1370 | 602 | |
411dc16f BT |
603 | void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) |
604 | { | |
605 | u16 i; | |
606 | ||
607 | for (i = 0; i < rx_ring->count; i++) { | |
3b4f0b66 | 608 | struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i); |
411dc16f | 609 | |
3b4f0b66 | 610 | if (!rx_bi) |
411dc16f BT |
611 | continue; |
612 | ||
3b4f0b66 BT |
613 | xsk_buff_free(rx_bi); |
614 | rx_bi = NULL; | |
411dc16f BT |
615 | } |
616 | } | |
617 | ||
9dbb1370 BT |
618 | /** |
619 | * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown | |
1fd972eb | 620 | * @tx_ring: XDP Tx ring |
9dbb1370 BT |
621 | **/ |
622 | void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) | |
623 | { | |
624 | u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; | |
1742b3d5 | 625 | struct xsk_buff_pool *bp = tx_ring->xsk_pool; |
9dbb1370 BT |
626 | struct i40e_tx_buffer *tx_bi; |
627 | u32 xsk_frames = 0; | |
628 | ||
629 | while (ntc != ntu) { | |
630 | tx_bi = &tx_ring->tx_bi[ntc]; | |
631 | ||
632 | if (tx_bi->xdpf) | |
633 | i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); | |
634 | else | |
635 | xsk_frames++; | |
636 | ||
637 | tx_bi->xdpf = NULL; | |
638 | ||
639 | ntc++; | |
640 | if (ntc >= tx_ring->count) | |
641 | ntc = 0; | |
642 | } | |
643 | ||
644 | if (xsk_frames) | |
c4655761 | 645 | xsk_tx_completed(bp, xsk_frames); |
9dbb1370 | 646 | } |
3ab52af5 BT |
647 | |
648 | /** | |
1742b3d5 MK |
649 | * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP |
650 | * buffer pool attached | |
3ab52af5 BT |
651 | * @vsi: vsi |
652 | * | |
1742b3d5 | 653 | * Returns true if any of the Rx rings has an AF_XDP buffer pool attached |
3ab52af5 BT |
654 | **/ |
655 | bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) | |
656 | { | |
f3fef2b6 | 657 | struct net_device *netdev = vsi->netdev; |
3ab52af5 BT |
658 | int i; |
659 | ||
3ab52af5 | 660 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
c4655761 | 661 | if (xsk_get_pool_from_qid(netdev, i)) |
3ab52af5 BT |
662 | return true; |
663 | } | |
664 | ||
665 | return false; | |
666 | } |