]>
Commit | Line | Data |
---|---|---|
0a714186 BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2018 Intel Corporation. */ | |
3 | ||
4 | #include <linux/bpf_trace.h> | |
a71506a4 | 5 | #include <net/xdp_sock_drv.h> |
0a714186 BT |
6 | #include <net/xdp.h> |
7 | ||
8 | #include "i40e.h" | |
9 | #include "i40e_txrx_common.h" | |
10 | #include "i40e_xsk.h" | |
11 | ||
be1222b5 | 12 | int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) |
e1675f97 | 13 | { |
be1222b5 BT |
14 | unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; |
15 | ||
16 | rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); | |
17 | return rx_ring->rx_bi_zc ? 0 : -ENOMEM; | |
18 | } | |
19 | ||
20 | void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) | |
21 | { | |
22 | memset(rx_ring->rx_bi_zc, 0, | |
23 | sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); | |
24 | } | |
25 | ||
3b4f0b66 | 26 | static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) |
be1222b5 BT |
27 | { |
28 | return &rx_ring->rx_bi_zc[idx]; | |
e1675f97 BT |
29 | } |
30 | ||
0a714186 | 31 | /** |
529eb362 | 32 | * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid |
0a714186 BT |
33 | * @vsi: Current VSI |
34 | * @umem: UMEM | |
35 | * @qid: Rx ring to associate UMEM to | |
36 | * | |
37 | * Returns 0 on success, <0 on failure | |
38 | **/ | |
39 | static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, | |
40 | u16 qid) | |
41 | { | |
f3fef2b6 | 42 | struct net_device *netdev = vsi->netdev; |
0a714186 BT |
43 | bool if_running; |
44 | int err; | |
45 | ||
46 | if (vsi->type != I40E_VSI_MAIN) | |
47 | return -EINVAL; | |
48 | ||
49 | if (qid >= vsi->num_queue_pairs) | |
50 | return -EINVAL; | |
51 | ||
f3fef2b6 JS |
52 | if (qid >= netdev->real_num_rx_queues || |
53 | qid >= netdev->real_num_tx_queues) | |
54 | return -EINVAL; | |
0a714186 | 55 | |
3b4f0b66 | 56 | err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); |
0a714186 BT |
57 | if (err) |
58 | return err; | |
59 | ||
44ddd4f1 BT |
60 | set_bit(qid, vsi->af_xdp_zc_qps); |
61 | ||
0a714186 BT |
62 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); |
63 | ||
64 | if (if_running) { | |
65 | err = i40e_queue_pair_disable(vsi, qid); | |
66 | if (err) | |
67 | return err; | |
0a714186 | 68 | |
0a714186 BT |
69 | err = i40e_queue_pair_enable(vsi, qid); |
70 | if (err) | |
71 | return err; | |
14ffeb52 MK |
72 | |
73 | /* Kick start the NAPI context so that receiving will start */ | |
9116e5e2 | 74 | err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); |
14ffeb52 MK |
75 | if (err) |
76 | return err; | |
0a714186 BT |
77 | } |
78 | ||
79 | return 0; | |
80 | } | |
81 | ||
82 | /** | |
529eb362 | 83 | * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid |
0a714186 BT |
84 | * @vsi: Current VSI |
85 | * @qid: Rx ring to associate UMEM to | |
86 | * | |
87 | * Returns 0 on success, <0 on failure | |
88 | **/ | |
89 | static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) | |
90 | { | |
f3fef2b6 JS |
91 | struct net_device *netdev = vsi->netdev; |
92 | struct xdp_umem *umem; | |
0a714186 BT |
93 | bool if_running; |
94 | int err; | |
95 | ||
f3fef2b6 JS |
96 | umem = xdp_get_umem_from_qid(netdev, qid); |
97 | if (!umem) | |
0a714186 BT |
98 | return -EINVAL; |
99 | ||
100 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); | |
101 | ||
102 | if (if_running) { | |
103 | err = i40e_queue_pair_disable(vsi, qid); | |
104 | if (err) | |
105 | return err; | |
106 | } | |
107 | ||
44ddd4f1 | 108 | clear_bit(qid, vsi->af_xdp_zc_qps); |
3b4f0b66 | 109 | xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR); |
0a714186 BT |
110 | |
111 | if (if_running) { | |
112 | err = i40e_queue_pair_enable(vsi, qid); | |
113 | if (err) | |
114 | return err; | |
115 | } | |
116 | ||
117 | return 0; | |
118 | } | |
119 | ||
0a714186 | 120 | /** |
529eb362 | 121 | * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid |
0a714186 BT |
122 | * @vsi: Current VSI |
123 | * @umem: UMEM to enable/associate to a ring, or NULL to disable | |
124 | * @qid: Rx ring to (dis)associate UMEM (from)to | |
125 | * | |
529eb362 | 126 | * This function enables or disables a UMEM to a certain ring. |
0a714186 BT |
127 | * |
128 | * Returns 0 on success, <0 on failure | |
129 | **/ | |
130 | int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, | |
131 | u16 qid) | |
132 | { | |
133 | return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : | |
134 | i40e_xsk_umem_disable(vsi, qid); | |
135 | } | |
136 | ||
137 | /** | |
138 | * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff | |
139 | * @rx_ring: Rx ring | |
140 | * @xdp: xdp_buff used as input to the XDP program | |
141 | * | |
0a714186 BT |
142 | * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} |
143 | **/ | |
144 | static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) | |
145 | { | |
146 | int err, result = I40E_XDP_PASS; | |
147 | struct i40e_ring *xdp_ring; | |
148 | struct bpf_prog *xdp_prog; | |
149 | u32 act; | |
150 | ||
151 | rcu_read_lock(); | |
152 | /* NB! xdp_prog will always be !NULL, due to the fact that | |
153 | * this path is enabled by setting an XDP program. | |
154 | */ | |
155 | xdp_prog = READ_ONCE(rx_ring->xdp_prog); | |
156 | act = bpf_prog_run_xdp(xdp_prog, xdp); | |
2f86c806 | 157 | |
0a714186 BT |
158 | switch (act) { |
159 | case XDP_PASS: | |
160 | break; | |
161 | case XDP_TX: | |
162 | xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | |
163 | result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); | |
164 | break; | |
165 | case XDP_REDIRECT: | |
166 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | |
167 | result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; | |
168 | break; | |
169 | default: | |
170 | bpf_warn_invalid_xdp_action(act); | |
5463fce6 | 171 | fallthrough; |
0a714186 BT |
172 | case XDP_ABORTED: |
173 | trace_xdp_exception(rx_ring->netdev, xdp_prog, act); | |
5463fce6 | 174 | fallthrough; /* handle aborts by dropping packet */ |
0a714186 BT |
175 | case XDP_DROP: |
176 | result = I40E_XDP_CONSUMED; | |
177 | break; | |
178 | } | |
179 | rcu_read_unlock(); | |
180 | return result; | |
181 | } | |
182 | ||
3b4f0b66 | 183 | bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) |
0a714186 BT |
184 | { |
185 | u16 ntu = rx_ring->next_to_use; | |
186 | union i40e_rx_desc *rx_desc; | |
3b4f0b66 BT |
187 | struct xdp_buff **bi, *xdp; |
188 | dma_addr_t dma; | |
0a714186 BT |
189 | bool ok = true; |
190 | ||
191 | rx_desc = I40E_RX_DESC(rx_ring, ntu); | |
e1675f97 | 192 | bi = i40e_rx_bi(rx_ring, ntu); |
0a714186 | 193 | do { |
3b4f0b66 BT |
194 | xdp = xsk_buff_alloc(rx_ring->xsk_umem); |
195 | if (!xdp) { | |
0a714186 BT |
196 | ok = false; |
197 | goto no_buffers; | |
198 | } | |
3b4f0b66 BT |
199 | *bi = xdp; |
200 | dma = xsk_buff_xdp_get_dma(xdp); | |
201 | rx_desc->read.pkt_addr = cpu_to_le64(dma); | |
202 | rx_desc->read.hdr_addr = 0; | |
0a714186 BT |
203 | |
204 | rx_desc++; | |
205 | bi++; | |
206 | ntu++; | |
207 | ||
208 | if (unlikely(ntu == rx_ring->count)) { | |
209 | rx_desc = I40E_RX_DESC(rx_ring, 0); | |
e1675f97 | 210 | bi = i40e_rx_bi(rx_ring, 0); |
0a714186 BT |
211 | ntu = 0; |
212 | } | |
213 | ||
0a714186 BT |
214 | count--; |
215 | } while (count); | |
216 | ||
217 | no_buffers: | |
218 | if (rx_ring->next_to_use != ntu) | |
219 | i40e_release_rx_desc(rx_ring, ntu); | |
220 | ||
221 | return ok; | |
222 | } | |
223 | ||
0a714186 | 224 | /** |
e92c0e02 | 225 | * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer |
0a714186 | 226 | * @rx_ring: Rx ring |
0a714186 BT |
227 | * @xdp: xdp_buff |
228 | * | |
229 | * This functions allocates a new skb from a zero-copy Rx buffer. | |
230 | * | |
231 | * Returns the skb, or NULL on failure. | |
232 | **/ | |
233 | static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, | |
0a714186 BT |
234 | struct xdp_buff *xdp) |
235 | { | |
236 | unsigned int metasize = xdp->data - xdp->data_meta; | |
237 | unsigned int datasize = xdp->data_end - xdp->data; | |
238 | struct sk_buff *skb; | |
239 | ||
240 | /* allocate a skb to store the frags */ | |
241 | skb = __napi_alloc_skb(&rx_ring->q_vector->napi, | |
242 | xdp->data_end - xdp->data_hard_start, | |
243 | GFP_ATOMIC | __GFP_NOWARN); | |
244 | if (unlikely(!skb)) | |
245 | return NULL; | |
246 | ||
247 | skb_reserve(skb, xdp->data - xdp->data_hard_start); | |
248 | memcpy(__skb_put(skb, datasize), xdp->data, datasize); | |
249 | if (metasize) | |
250 | skb_metadata_set(skb, metasize); | |
251 | ||
3b4f0b66 | 252 | xsk_buff_free(xdp); |
0a714186 BT |
253 | return skb; |
254 | } | |
255 | ||
0a714186 BT |
256 | /** |
257 | * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring | |
258 | * @rx_ring: Rx ring | |
259 | * @budget: NAPI budget | |
260 | * | |
261 | * Returns amount of work completed | |
262 | **/ | |
263 | int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) | |
264 | { | |
265 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | |
266 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | |
267 | unsigned int xdp_res, xdp_xmit = 0; | |
268 | bool failure = false; | |
269 | struct sk_buff *skb; | |
0a714186 BT |
270 | |
271 | while (likely(total_rx_packets < (unsigned int)budget)) { | |
0a714186 | 272 | union i40e_rx_desc *rx_desc; |
3b4f0b66 | 273 | struct xdp_buff **bi; |
0a714186 | 274 | unsigned int size; |
0a714186 BT |
275 | u64 qword; |
276 | ||
277 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | |
278 | failure = failure || | |
3b4f0b66 BT |
279 | !i40e_alloc_rx_buffers_zc(rx_ring, |
280 | cleaned_count); | |
0a714186 BT |
281 | cleaned_count = 0; |
282 | } | |
283 | ||
284 | rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); | |
285 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | |
286 | ||
287 | /* This memory barrier is needed to keep us from reading | |
288 | * any other fields out of the rx_desc until we have | |
289 | * verified the descriptor has been written back. | |
290 | */ | |
291 | dma_rmb(); | |
292 | ||
be1222b5 BT |
293 | if (i40e_rx_is_programming_status(qword)) { |
294 | i40e_clean_programming_status(rx_ring, | |
295 | rx_desc->raw.qword[0], | |
296 | qword); | |
297 | bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); | |
3b4f0b66 BT |
298 | xsk_buff_free(*bi); |
299 | *bi = NULL; | |
0a714186 | 300 | cleaned_count++; |
3b4f0b66 | 301 | i40e_inc_ntc(rx_ring); |
0a714186 BT |
302 | continue; |
303 | } | |
304 | ||
be1222b5 | 305 | bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); |
0a714186 BT |
306 | size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> |
307 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | |
308 | if (!size) | |
309 | break; | |
310 | ||
3b4f0b66 BT |
311 | bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); |
312 | (*bi)->data_end = (*bi)->data + size; | |
313 | xsk_buff_dma_sync_for_cpu(*bi); | |
0a714186 | 314 | |
3b4f0b66 | 315 | xdp_res = i40e_run_xdp_zc(rx_ring, *bi); |
0a714186 | 316 | if (xdp_res) { |
3b4f0b66 | 317 | if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) |
0a714186 | 318 | xdp_xmit |= xdp_res; |
3b4f0b66 BT |
319 | else |
320 | xsk_buff_free(*bi); | |
0a714186 | 321 | |
3b4f0b66 | 322 | *bi = NULL; |
0a714186 BT |
323 | total_rx_bytes += size; |
324 | total_rx_packets++; | |
325 | ||
326 | cleaned_count++; | |
327 | i40e_inc_ntc(rx_ring); | |
328 | continue; | |
329 | } | |
330 | ||
331 | /* XDP_PASS path */ | |
332 | ||
333 | /* NB! We are not checking for errors using | |
334 | * i40e_test_staterr with | |
335 | * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that | |
336 | * SBP is *not* set in PRT_SBPVSI (default not set). | |
337 | */ | |
3b4f0b66 BT |
338 | skb = i40e_construct_skb_zc(rx_ring, *bi); |
339 | *bi = NULL; | |
0a714186 BT |
340 | if (!skb) { |
341 | rx_ring->rx_stats.alloc_buff_failed++; | |
342 | break; | |
343 | } | |
344 | ||
345 | cleaned_count++; | |
346 | i40e_inc_ntc(rx_ring); | |
347 | ||
348 | if (eth_skb_pad(skb)) | |
349 | continue; | |
350 | ||
351 | total_rx_bytes += skb->len; | |
352 | total_rx_packets++; | |
353 | ||
800b8f63 | 354 | i40e_process_skb_fields(rx_ring, rx_desc, skb); |
2a508c64 | 355 | napi_gro_receive(&rx_ring->q_vector->napi, skb); |
0a714186 BT |
356 | } |
357 | ||
358 | i40e_finalize_xdp_rx(rx_ring, xdp_xmit); | |
359 | i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); | |
3d0c5f1c MK |
360 | |
361 | if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { | |
362 | if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) | |
363 | xsk_set_rx_need_wakeup(rx_ring->xsk_umem); | |
364 | else | |
365 | xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); | |
366 | ||
367 | return (int)total_rx_packets; | |
368 | } | |
0a714186 BT |
369 | return failure ? budget : (int)total_rx_packets; |
370 | } | |
371 | ||
1328dcdd MK |
372 | /** |
373 | * i40e_xmit_zc - Performs zero-copy Tx AF_XDP | |
374 | * @xdp_ring: XDP Tx ring | |
375 | * @budget: NAPI budget | |
376 | * | |
377 | * Returns true if the work is finished. | |
378 | **/ | |
379 | static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) | |
380 | { | |
cf484f9f | 381 | struct i40e_tx_desc *tx_desc = NULL; |
1328dcdd | 382 | struct i40e_tx_buffer *tx_bi; |
1328dcdd | 383 | bool work_done = true; |
4bce4e5c | 384 | struct xdp_desc desc; |
1328dcdd | 385 | dma_addr_t dma; |
1328dcdd MK |
386 | |
387 | while (budget-- > 0) { | |
388 | if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { | |
389 | xdp_ring->tx_stats.tx_busy++; | |
390 | work_done = false; | |
391 | break; | |
392 | } | |
393 | ||
4bce4e5c | 394 | if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) |
1328dcdd MK |
395 | break; |
396 | ||
3b4f0b66 BT |
397 | dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr); |
398 | xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma, | |
399 | desc.len); | |
1328dcdd MK |
400 | |
401 | tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; | |
4bce4e5c | 402 | tx_bi->bytecount = desc.len; |
1328dcdd MK |
403 | |
404 | tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); | |
405 | tx_desc->buffer_addr = cpu_to_le64(dma); | |
406 | tx_desc->cmd_type_offset_bsz = | |
407 | build_ctob(I40E_TX_DESC_CMD_ICRC | |
408 | | I40E_TX_DESC_CMD_EOP, | |
4bce4e5c | 409 | 0, desc.len, 0); |
1328dcdd MK |
410 | |
411 | xdp_ring->next_to_use++; | |
412 | if (xdp_ring->next_to_use == xdp_ring->count) | |
413 | xdp_ring->next_to_use = 0; | |
414 | } | |
415 | ||
cf484f9f | 416 | if (tx_desc) { |
1328dcdd MK |
417 | /* Request an interrupt for the last frame and bump tail ptr. */ |
418 | tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << | |
419 | I40E_TXD_QW1_CMD_SHIFT); | |
420 | i40e_xdp_ring_update_tail(xdp_ring); | |
421 | ||
422 | xsk_umem_consume_tx_done(xdp_ring->xsk_umem); | |
423 | } | |
424 | ||
425 | return !!budget && work_done; | |
426 | } | |
427 | ||
428 | /** | |
429 | * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry | |
430 | * @tx_ring: XDP Tx ring | |
431 | * @tx_bi: Tx buffer info to clean | |
432 | **/ | |
433 | static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, | |
434 | struct i40e_tx_buffer *tx_bi) | |
435 | { | |
436 | xdp_return_frame(tx_bi->xdpf); | |
437 | dma_unmap_single(tx_ring->dev, | |
438 | dma_unmap_addr(tx_bi, dma), | |
439 | dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); | |
440 | dma_unmap_len_set(tx_bi, len, 0); | |
441 | } | |
442 | ||
443 | /** | |
444 | * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries | |
445 | * @tx_ring: XDP Tx ring | |
446 | * @tx_bi: Tx buffer info to clean | |
447 | * | |
448 | * Returns true if cleanup/tranmission is done. | |
449 | **/ | |
450 | bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, | |
451 | struct i40e_ring *tx_ring, int napi_budget) | |
452 | { | |
453 | unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; | |
454 | u32 i, completed_frames, frames_ready, xsk_frames = 0; | |
455 | struct xdp_umem *umem = tx_ring->xsk_umem; | |
456 | u32 head_idx = i40e_get_head(tx_ring); | |
457 | bool work_done = true, xmit_done; | |
458 | struct i40e_tx_buffer *tx_bi; | |
459 | ||
460 | if (head_idx < tx_ring->next_to_clean) | |
461 | head_idx += tx_ring->count; | |
462 | frames_ready = head_idx - tx_ring->next_to_clean; | |
463 | ||
464 | if (frames_ready == 0) { | |
465 | goto out_xmit; | |
466 | } else if (frames_ready > budget) { | |
467 | completed_frames = budget; | |
468 | work_done = false; | |
469 | } else { | |
470 | completed_frames = frames_ready; | |
471 | } | |
472 | ||
473 | ntc = tx_ring->next_to_clean; | |
474 | ||
475 | for (i = 0; i < completed_frames; i++) { | |
476 | tx_bi = &tx_ring->tx_bi[ntc]; | |
477 | ||
478 | if (tx_bi->xdpf) | |
479 | i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); | |
480 | else | |
481 | xsk_frames++; | |
482 | ||
483 | tx_bi->xdpf = NULL; | |
484 | total_bytes += tx_bi->bytecount; | |
485 | ||
486 | if (++ntc >= tx_ring->count) | |
487 | ntc = 0; | |
488 | } | |
489 | ||
490 | tx_ring->next_to_clean += completed_frames; | |
491 | if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) | |
492 | tx_ring->next_to_clean -= tx_ring->count; | |
493 | ||
494 | if (xsk_frames) | |
495 | xsk_umem_complete_tx(umem, xsk_frames); | |
496 | ||
497 | i40e_arm_wb(tx_ring, vsi, budget); | |
498 | i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); | |
499 | ||
500 | out_xmit: | |
70563957 MK |
501 | if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) |
502 | xsk_set_tx_need_wakeup(tx_ring->xsk_umem); | |
3d0c5f1c | 503 | |
1328dcdd MK |
504 | xmit_done = i40e_xmit_zc(tx_ring, budget); |
505 | ||
506 | return work_done && xmit_done; | |
507 | } | |
508 | ||
509 | /** | |
9116e5e2 | 510 | * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup |
1328dcdd MK |
511 | * @dev: the netdevice |
512 | * @queue_id: queue id to wake up | |
9116e5e2 | 513 | * @flags: ignored in our case since we have Rx and Tx in the same NAPI. |
1328dcdd MK |
514 | * |
515 | * Returns <0 for errors, 0 otherwise. | |
516 | **/ | |
9116e5e2 | 517 | int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) |
1328dcdd MK |
518 | { |
519 | struct i40e_netdev_priv *np = netdev_priv(dev); | |
520 | struct i40e_vsi *vsi = np->vsi; | |
b3873a5b | 521 | struct i40e_pf *pf = vsi->back; |
1328dcdd MK |
522 | struct i40e_ring *ring; |
523 | ||
b3873a5b | 524 | if (test_bit(__I40E_CONFIG_BUSY, pf->state)) |
c77e9f09 | 525 | return -EAGAIN; |
b3873a5b | 526 | |
1328dcdd MK |
527 | if (test_bit(__I40E_VSI_DOWN, vsi->state)) |
528 | return -ENETDOWN; | |
529 | ||
530 | if (!i40e_enabled_xdp_vsi(vsi)) | |
531 | return -ENXIO; | |
532 | ||
533 | if (queue_id >= vsi->num_queue_pairs) | |
534 | return -ENXIO; | |
535 | ||
536 | if (!vsi->xdp_rings[queue_id]->xsk_umem) | |
537 | return -ENXIO; | |
538 | ||
539 | ring = vsi->xdp_rings[queue_id]; | |
540 | ||
541 | /* The idea here is that if NAPI is running, mark a miss, so | |
542 | * it will run again. If not, trigger an interrupt and | |
543 | * schedule the NAPI from interrupt context. If NAPI would be | |
544 | * scheduled here, the interrupt affinity would not be | |
545 | * honored. | |
546 | */ | |
547 | if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) | |
548 | i40e_force_wb(vsi, ring->q_vector); | |
549 | ||
550 | return 0; | |
551 | } | |
9dbb1370 | 552 | |
411dc16f BT |
553 | void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) |
554 | { | |
555 | u16 i; | |
556 | ||
557 | for (i = 0; i < rx_ring->count; i++) { | |
3b4f0b66 | 558 | struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i); |
411dc16f | 559 | |
3b4f0b66 | 560 | if (!rx_bi) |
411dc16f BT |
561 | continue; |
562 | ||
3b4f0b66 BT |
563 | xsk_buff_free(rx_bi); |
564 | rx_bi = NULL; | |
411dc16f BT |
565 | } |
566 | } | |
567 | ||
9dbb1370 BT |
568 | /** |
569 | * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown | |
570 | * @xdp_ring: XDP Tx ring | |
571 | **/ | |
572 | void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) | |
573 | { | |
574 | u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; | |
575 | struct xdp_umem *umem = tx_ring->xsk_umem; | |
576 | struct i40e_tx_buffer *tx_bi; | |
577 | u32 xsk_frames = 0; | |
578 | ||
579 | while (ntc != ntu) { | |
580 | tx_bi = &tx_ring->tx_bi[ntc]; | |
581 | ||
582 | if (tx_bi->xdpf) | |
583 | i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); | |
584 | else | |
585 | xsk_frames++; | |
586 | ||
587 | tx_bi->xdpf = NULL; | |
588 | ||
589 | ntc++; | |
590 | if (ntc >= tx_ring->count) | |
591 | ntc = 0; | |
592 | } | |
593 | ||
594 | if (xsk_frames) | |
595 | xsk_umem_complete_tx(umem, xsk_frames); | |
596 | } | |
3ab52af5 BT |
597 | |
598 | /** | |
599 | * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached | |
600 | * @vsi: vsi | |
601 | * | |
602 | * Returns true if any of the Rx rings has an AF_XDP UMEM attached | |
603 | **/ | |
604 | bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) | |
605 | { | |
f3fef2b6 | 606 | struct net_device *netdev = vsi->netdev; |
3ab52af5 BT |
607 | int i; |
608 | ||
3ab52af5 | 609 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
f3fef2b6 | 610 | if (xdp_get_umem_from_qid(netdev, i)) |
3ab52af5 BT |
611 | return true; |
612 | } | |
613 | ||
614 | return false; | |
615 | } |