1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
12 #include "ice_txrx_lib.h"
16 * ice_qp_reset_stats - Resets all stats for rings of given index
17 * @vsi: VSI that contains rings of interest
18 * @q_idx: ring index in array
20 static void ice_qp_reset_stats(struct ice_vsi
*vsi
, u16 q_idx
)
22 memset(&vsi
->rx_rings
[q_idx
]->rx_stats
, 0,
23 sizeof(vsi
->rx_rings
[q_idx
]->rx_stats
));
24 memset(&vsi
->tx_rings
[q_idx
]->stats
, 0,
25 sizeof(vsi
->tx_rings
[q_idx
]->stats
));
26 if (ice_is_xdp_ena_vsi(vsi
))
27 memset(&vsi
->xdp_rings
[q_idx
]->stats
, 0,
28 sizeof(vsi
->xdp_rings
[q_idx
]->stats
));
32 * ice_qp_clean_rings - Cleans all the rings of a given index
33 * @vsi: VSI that contains rings of interest
34 * @q_idx: ring index in array
36 static void ice_qp_clean_rings(struct ice_vsi
*vsi
, u16 q_idx
)
38 ice_clean_tx_ring(vsi
->tx_rings
[q_idx
]);
39 if (ice_is_xdp_ena_vsi(vsi
))
40 ice_clean_tx_ring(vsi
->xdp_rings
[q_idx
]);
41 ice_clean_rx_ring(vsi
->rx_rings
[q_idx
]);
45 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
46 * @vsi: VSI that has netdev
47 * @q_vector: q_vector that has NAPI context
48 * @enable: true for enable, false for disable
51 ice_qvec_toggle_napi(struct ice_vsi
*vsi
, struct ice_q_vector
*q_vector
,
54 if (!vsi
->netdev
|| !q_vector
)
58 napi_enable(&q_vector
->napi
);
60 napi_disable(&q_vector
->napi
);
64 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
65 * @vsi: the VSI that contains queue vector being un-configured
66 * @rx_ring: Rx ring that will have its IRQ disabled
67 * @q_vector: queue vector
70 ice_qvec_dis_irq(struct ice_vsi
*vsi
, struct ice_ring
*rx_ring
,
71 struct ice_q_vector
*q_vector
)
73 struct ice_pf
*pf
= vsi
->back
;
74 struct ice_hw
*hw
= &pf
->hw
;
75 int base
= vsi
->base_vector
;
79 /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
80 * here only QINT_RQCTL
82 reg
= rx_ring
->reg_idx
;
83 val
= rd32(hw
, QINT_RQCTL(reg
));
84 val
&= ~QINT_RQCTL_CAUSE_ENA_M
;
85 wr32(hw
, QINT_RQCTL(reg
), val
);
88 u16 v_idx
= q_vector
->v_idx
;
90 wr32(hw
, GLINT_DYN_CTL(q_vector
->reg_idx
), 0);
92 synchronize_irq(pf
->msix_entries
[v_idx
+ base
].vector
);
97 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
98 * @vsi: the VSI that contains queue vector
99 * @q_vector: queue vector
102 ice_qvec_cfg_msix(struct ice_vsi
*vsi
, struct ice_q_vector
*q_vector
)
104 u16 reg_idx
= q_vector
->reg_idx
;
105 struct ice_pf
*pf
= vsi
->back
;
106 struct ice_hw
*hw
= &pf
->hw
;
107 struct ice_ring
*ring
;
109 ice_cfg_itr(hw
, q_vector
);
111 wr32(hw
, GLINT_RATE(reg_idx
),
112 ice_intrl_usec_to_reg(q_vector
->intrl
, hw
->intrl_gran
));
114 ice_for_each_ring(ring
, q_vector
->tx
)
115 ice_cfg_txq_interrupt(vsi
, ring
->reg_idx
, reg_idx
,
116 q_vector
->tx
.itr_idx
);
118 ice_for_each_ring(ring
, q_vector
->rx
)
119 ice_cfg_rxq_interrupt(vsi
, ring
->reg_idx
, reg_idx
,
120 q_vector
->rx
.itr_idx
);
126 * ice_qvec_ena_irq - Enable IRQ for given queue vector
127 * @vsi: the VSI that contains queue vector
128 * @q_vector: queue vector
130 static void ice_qvec_ena_irq(struct ice_vsi
*vsi
, struct ice_q_vector
*q_vector
)
132 struct ice_pf
*pf
= vsi
->back
;
133 struct ice_hw
*hw
= &pf
->hw
;
135 ice_irq_dynamic_ena(hw
, vsi
, q_vector
);
141 * ice_qp_dis - Disables a queue pair
142 * @vsi: VSI of interest
143 * @q_idx: ring index in array
145 * Returns 0 on success, negative on failure.
147 static int ice_qp_dis(struct ice_vsi
*vsi
, u16 q_idx
)
149 struct ice_txq_meta txq_meta
= { };
150 struct ice_ring
*tx_ring
, *rx_ring
;
151 struct ice_q_vector
*q_vector
;
155 if (q_idx
>= vsi
->num_rxq
|| q_idx
>= vsi
->num_txq
)
158 tx_ring
= vsi
->tx_rings
[q_idx
];
159 rx_ring
= vsi
->rx_rings
[q_idx
];
160 q_vector
= rx_ring
->q_vector
;
162 while (test_and_set_bit(__ICE_CFG_BUSY
, vsi
->state
)) {
166 usleep_range(1000, 2000);
168 netif_tx_stop_queue(netdev_get_tx_queue(vsi
->netdev
, q_idx
));
170 ice_qvec_dis_irq(vsi
, rx_ring
, q_vector
);
172 ice_fill_txq_meta(vsi
, tx_ring
, &txq_meta
);
173 err
= ice_vsi_stop_tx_ring(vsi
, ICE_NO_RESET
, 0, tx_ring
, &txq_meta
);
176 if (ice_is_xdp_ena_vsi(vsi
)) {
177 struct ice_ring
*xdp_ring
= vsi
->xdp_rings
[q_idx
];
179 memset(&txq_meta
, 0, sizeof(txq_meta
));
180 ice_fill_txq_meta(vsi
, xdp_ring
, &txq_meta
);
181 err
= ice_vsi_stop_tx_ring(vsi
, ICE_NO_RESET
, 0, xdp_ring
,
186 err
= ice_vsi_ctrl_one_rx_ring(vsi
, false, q_idx
, true);
190 ice_qvec_toggle_napi(vsi
, q_vector
, false);
191 ice_qp_clean_rings(vsi
, q_idx
);
192 ice_qp_reset_stats(vsi
, q_idx
);
198 * ice_qp_ena - Enables a queue pair
199 * @vsi: VSI of interest
200 * @q_idx: ring index in array
202 * Returns 0 on success, negative on failure.
204 static int ice_qp_ena(struct ice_vsi
*vsi
, u16 q_idx
)
206 struct ice_aqc_add_tx_qgrp
*qg_buf
;
207 struct ice_ring
*tx_ring
, *rx_ring
;
208 struct ice_q_vector
*q_vector
;
212 if (q_idx
>= vsi
->num_rxq
|| q_idx
>= vsi
->num_txq
)
215 size
= struct_size(qg_buf
, txqs
, 1);
216 qg_buf
= kzalloc(size
, GFP_KERNEL
);
220 qg_buf
->num_txqs
= 1;
222 tx_ring
= vsi
->tx_rings
[q_idx
];
223 rx_ring
= vsi
->rx_rings
[q_idx
];
224 q_vector
= rx_ring
->q_vector
;
226 err
= ice_vsi_cfg_txq(vsi
, tx_ring
, qg_buf
);
230 if (ice_is_xdp_ena_vsi(vsi
)) {
231 struct ice_ring
*xdp_ring
= vsi
->xdp_rings
[q_idx
];
233 memset(qg_buf
, 0, size
);
234 qg_buf
->num_txqs
= 1;
235 err
= ice_vsi_cfg_txq(vsi
, xdp_ring
, qg_buf
);
238 ice_set_ring_xdp(xdp_ring
);
239 xdp_ring
->xsk_pool
= ice_xsk_pool(xdp_ring
);
242 err
= ice_setup_rx_ctx(rx_ring
);
246 ice_qvec_cfg_msix(vsi
, q_vector
);
248 err
= ice_vsi_ctrl_one_rx_ring(vsi
, true, q_idx
, true);
252 clear_bit(__ICE_CFG_BUSY
, vsi
->state
);
253 ice_qvec_toggle_napi(vsi
, q_vector
, true);
254 ice_qvec_ena_irq(vsi
, q_vector
);
256 netif_tx_start_queue(netdev_get_tx_queue(vsi
->netdev
, q_idx
));
263 * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
264 * @vsi: VSI to allocate the buffer pool on
266 * Returns 0 on success, negative on error
268 static int ice_xsk_alloc_pools(struct ice_vsi
*vsi
)
273 vsi
->xsk_pools
= kcalloc(vsi
->num_xsk_pools
, sizeof(*vsi
->xsk_pools
),
276 if (!vsi
->xsk_pools
) {
277 vsi
->num_xsk_pools
= 0;
285 * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
286 * @vsi: VSI from which the VSI will be removed
287 * @qid: Ring/qid associated with the buffer pool
289 static void ice_xsk_remove_pool(struct ice_vsi
*vsi
, u16 qid
)
291 vsi
->xsk_pools
[qid
] = NULL
;
292 vsi
->num_xsk_pools_used
--;
294 if (vsi
->num_xsk_pools_used
== 0) {
295 kfree(vsi
->xsk_pools
);
296 vsi
->xsk_pools
= NULL
;
297 vsi
->num_xsk_pools
= 0;
302 * ice_xsk_pool_disable - disable a buffer pool region
306 * Returns 0 on success, negative on failure
308 static int ice_xsk_pool_disable(struct ice_vsi
*vsi
, u16 qid
)
310 if (!vsi
->xsk_pools
|| qid
>= vsi
->num_xsk_pools
||
311 !vsi
->xsk_pools
[qid
])
314 xsk_pool_dma_unmap(vsi
->xsk_pools
[qid
], ICE_RX_DMA_ATTR
);
315 ice_xsk_remove_pool(vsi
, qid
);
321 * ice_xsk_pool_enable - enable a buffer pool region
323 * @pool: pointer to a requested buffer pool region
326 * Returns 0 on success, negative on failure
329 ice_xsk_pool_enable(struct ice_vsi
*vsi
, struct xsk_buff_pool
*pool
, u16 qid
)
333 if (vsi
->type
!= ICE_VSI_PF
)
336 if (!vsi
->num_xsk_pools
)
337 vsi
->num_xsk_pools
= min_t(u16
, vsi
->num_rxq
, vsi
->num_txq
);
338 if (qid
>= vsi
->num_xsk_pools
)
341 err
= ice_xsk_alloc_pools(vsi
);
345 if (vsi
->xsk_pools
&& vsi
->xsk_pools
[qid
])
348 vsi
->xsk_pools
[qid
] = pool
;
349 vsi
->num_xsk_pools_used
++;
351 err
= xsk_pool_dma_map(vsi
->xsk_pools
[qid
], ice_pf_to_dev(vsi
->back
),
360 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
362 * @pool: buffer pool to enable/associate to a ring, NULL to disable
365 * Returns 0 on success, negative on failure
367 int ice_xsk_pool_setup(struct ice_vsi
*vsi
, struct xsk_buff_pool
*pool
, u16 qid
)
369 bool if_running
, pool_present
= !!pool
;
370 int ret
= 0, pool_failure
= 0;
372 if_running
= netif_running(vsi
->netdev
) && ice_is_xdp_ena_vsi(vsi
);
375 ret
= ice_qp_dis(vsi
, qid
);
377 netdev_err(vsi
->netdev
, "ice_qp_dis error = %d\n", ret
);
382 pool_failure
= pool_present
? ice_xsk_pool_enable(vsi
, pool
, qid
) :
383 ice_xsk_pool_disable(vsi
, qid
);
387 ret
= ice_qp_ena(vsi
, qid
);
388 if (!ret
&& pool_present
)
389 napi_schedule(&vsi
->xdp_rings
[qid
]->q_vector
->napi
);
391 netdev_err(vsi
->netdev
, "ice_qp_ena error = %d\n", ret
);
395 netdev_err(vsi
->netdev
, "Could not %sable buffer pool, error = %d\n",
396 pool_present
? "en" : "dis", pool_failure
);
404 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
406 * @count: The number of buffers to allocate
408 * This function allocates a number of Rx buffers from the fill ring
409 * or the internal recycle mechanism and places them on the Rx ring.
411 * Returns true if all allocations were successful, false if any fail.
413 bool ice_alloc_rx_bufs_zc(struct ice_ring
*rx_ring
, u16 count
)
415 union ice_32b_rx_flex_desc
*rx_desc
;
416 u16 ntu
= rx_ring
->next_to_use
;
417 struct ice_rx_buf
*rx_buf
;
424 rx_desc
= ICE_RX_DESC(rx_ring
, ntu
);
425 rx_buf
= &rx_ring
->rx_buf
[ntu
];
428 rx_buf
->xdp
= xsk_buff_alloc(rx_ring
->xsk_pool
);
434 dma
= xsk_buff_xdp_get_dma(rx_buf
->xdp
);
435 rx_desc
->read
.pkt_addr
= cpu_to_le64(dma
);
436 rx_desc
->wb
.status_error0
= 0;
442 if (unlikely(ntu
== rx_ring
->count
)) {
443 rx_desc
= ICE_RX_DESC(rx_ring
, 0);
444 rx_buf
= rx_ring
->rx_buf
;
449 if (rx_ring
->next_to_use
!= ntu
) {
450 /* clear the status bits for the next_to_use descriptor */
451 rx_desc
->wb
.status_error0
= 0;
452 ice_release_rx_desc(rx_ring
, ntu
);
459 * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
462 static void ice_bump_ntc(struct ice_ring
*rx_ring
)
464 int ntc
= rx_ring
->next_to_clean
+ 1;
466 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
467 rx_ring
->next_to_clean
= ntc
;
468 prefetch(ICE_RX_DESC(rx_ring
, ntc
));
472 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
474 * @rx_buf: zero-copy Rx buffer
476 * This function allocates a new skb from a zero-copy Rx buffer.
478 * Returns the skb on success, NULL on failure.
480 static struct sk_buff
*
481 ice_construct_skb_zc(struct ice_ring
*rx_ring
, struct ice_rx_buf
*rx_buf
)
483 unsigned int metasize
= rx_buf
->xdp
->data
- rx_buf
->xdp
->data_meta
;
484 unsigned int datasize
= rx_buf
->xdp
->data_end
- rx_buf
->xdp
->data
;
485 unsigned int datasize_hard
= rx_buf
->xdp
->data_end
-
486 rx_buf
->xdp
->data_hard_start
;
489 skb
= __napi_alloc_skb(&rx_ring
->q_vector
->napi
, datasize_hard
,
490 GFP_ATOMIC
| __GFP_NOWARN
);
494 skb_reserve(skb
, rx_buf
->xdp
->data
- rx_buf
->xdp
->data_hard_start
);
495 memcpy(__skb_put(skb
, datasize
), rx_buf
->xdp
->data
, datasize
);
497 skb_metadata_set(skb
, metasize
);
499 xsk_buff_free(rx_buf
->xdp
);
505 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
507 * @xdp: xdp_buff used as input to the XDP program
509 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
512 ice_run_xdp_zc(struct ice_ring
*rx_ring
, struct xdp_buff
*xdp
)
514 int err
, result
= ICE_XDP_PASS
;
515 struct bpf_prog
*xdp_prog
;
516 struct ice_ring
*xdp_ring
;
520 xdp_prog
= READ_ONCE(rx_ring
->xdp_prog
);
526 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
528 if (likely(act
== XDP_REDIRECT
)) {
529 err
= xdp_do_redirect(rx_ring
->netdev
, xdp
, xdp_prog
);
530 result
= !err
? ICE_XDP_REDIR
: ICE_XDP_CONSUMED
;
539 xdp_ring
= rx_ring
->vsi
->xdp_rings
[rx_ring
->q_index
];
540 result
= ice_xmit_xdp_buff(xdp
, xdp_ring
);
543 bpf_warn_invalid_xdp_action(act
);
546 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, act
);
549 result
= ICE_XDP_CONSUMED
;
558 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
559 * @rx_ring: AF_XDP Rx ring
560 * @budget: NAPI budget
562 * Returns number of processed packets on success, remaining budget on failure.
564 int ice_clean_rx_irq_zc(struct ice_ring
*rx_ring
, int budget
)
566 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
567 u16 cleaned_count
= ICE_DESC_UNUSED(rx_ring
);
568 unsigned int xdp_xmit
= 0;
569 bool failure
= false;
571 while (likely(total_rx_packets
< (unsigned int)budget
)) {
572 union ice_32b_rx_flex_desc
*rx_desc
;
573 unsigned int size
, xdp_res
= 0;
574 struct ice_rx_buf
*rx_buf
;
580 rx_desc
= ICE_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
582 stat_err_bits
= BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S
);
583 if (!ice_test_staterr(rx_desc
, stat_err_bits
))
586 /* This memory barrier is needed to keep us from reading
587 * any other fields out of the rx_desc until we have
588 * verified the descriptor has been written back.
592 size
= le16_to_cpu(rx_desc
->wb
.pkt_len
) &
593 ICE_RX_FLX_DESC_PKT_LEN_M
;
597 rx_buf
= &rx_ring
->rx_buf
[rx_ring
->next_to_clean
];
598 rx_buf
->xdp
->data_end
= rx_buf
->xdp
->data
+ size
;
599 xsk_buff_dma_sync_for_cpu(rx_buf
->xdp
, rx_ring
->xsk_pool
);
601 xdp_res
= ice_run_xdp_zc(rx_ring
, rx_buf
->xdp
);
603 if (xdp_res
& (ICE_XDP_TX
| ICE_XDP_REDIR
))
606 xsk_buff_free(rx_buf
->xdp
);
609 total_rx_bytes
+= size
;
613 ice_bump_ntc(rx_ring
);
618 skb
= ice_construct_skb_zc(rx_ring
, rx_buf
);
620 rx_ring
->rx_stats
.alloc_buf_failed
++;
625 ice_bump_ntc(rx_ring
);
627 if (eth_skb_pad(skb
)) {
632 total_rx_bytes
+= skb
->len
;
635 stat_err_bits
= BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S
);
636 if (ice_test_staterr(rx_desc
, stat_err_bits
))
637 vlan_tag
= le16_to_cpu(rx_desc
->wb
.l2tag1
);
639 rx_ptype
= le16_to_cpu(rx_desc
->wb
.ptype_flex_flags0
) &
640 ICE_RX_FLEX_DESC_PTYPE_M
;
642 ice_process_skb_fields(rx_ring
, rx_desc
, skb
, rx_ptype
);
643 ice_receive_skb(rx_ring
, skb
, vlan_tag
);
646 if (cleaned_count
>= ICE_RX_BUF_WRITE
)
647 failure
= !ice_alloc_rx_bufs_zc(rx_ring
, cleaned_count
);
649 ice_finalize_xdp_rx(rx_ring
, xdp_xmit
);
650 ice_update_rx_ring_stats(rx_ring
, total_rx_packets
, total_rx_bytes
);
652 if (xsk_uses_need_wakeup(rx_ring
->xsk_pool
)) {
653 if (failure
|| rx_ring
->next_to_clean
== rx_ring
->next_to_use
)
654 xsk_set_rx_need_wakeup(rx_ring
->xsk_pool
);
656 xsk_clear_rx_need_wakeup(rx_ring
->xsk_pool
);
658 return (int)total_rx_packets
;
661 return failure
? budget
: (int)total_rx_packets
;
665 * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
666 * @xdp_ring: XDP Tx ring
667 * @budget: max number of frames to xmit
669 * Returns true if cleanup/transmission is done.
671 static bool ice_xmit_zc(struct ice_ring
*xdp_ring
, int budget
)
673 struct ice_tx_desc
*tx_desc
= NULL
;
674 bool work_done
= true;
675 struct xdp_desc desc
;
678 while (likely(budget
-- > 0)) {
679 struct ice_tx_buf
*tx_buf
;
681 if (unlikely(!ICE_DESC_UNUSED(xdp_ring
))) {
682 xdp_ring
->tx_stats
.tx_busy
++;
687 tx_buf
= &xdp_ring
->tx_buf
[xdp_ring
->next_to_use
];
689 if (!xsk_tx_peek_desc(xdp_ring
->xsk_pool
, &desc
))
692 dma
= xsk_buff_raw_get_dma(xdp_ring
->xsk_pool
, desc
.addr
);
693 xsk_buff_raw_dma_sync_for_device(xdp_ring
->xsk_pool
, dma
,
696 tx_buf
->bytecount
= desc
.len
;
698 tx_desc
= ICE_TX_DESC(xdp_ring
, xdp_ring
->next_to_use
);
699 tx_desc
->buf_addr
= cpu_to_le64(dma
);
700 tx_desc
->cmd_type_offset_bsz
=
701 ice_build_ctob(ICE_TXD_LAST_DESC_CMD
, 0, desc
.len
, 0);
703 xdp_ring
->next_to_use
++;
704 if (xdp_ring
->next_to_use
== xdp_ring
->count
)
705 xdp_ring
->next_to_use
= 0;
709 ice_xdp_ring_update_tail(xdp_ring
);
710 xsk_tx_release(xdp_ring
->xsk_pool
);
713 return budget
> 0 && work_done
;
717 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
718 * @xdp_ring: XDP Tx ring
719 * @tx_buf: Tx buffer to clean
722 ice_clean_xdp_tx_buf(struct ice_ring
*xdp_ring
, struct ice_tx_buf
*tx_buf
)
724 xdp_return_frame((struct xdp_frame
*)tx_buf
->raw_buf
);
725 dma_unmap_single(xdp_ring
->dev
, dma_unmap_addr(tx_buf
, dma
),
726 dma_unmap_len(tx_buf
, len
), DMA_TO_DEVICE
);
727 dma_unmap_len_set(tx_buf
, len
, 0);
731 * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
732 * @xdp_ring: XDP Tx ring
733 * @budget: NAPI budget
735 * Returns true if cleanup/tranmission is done.
737 bool ice_clean_tx_irq_zc(struct ice_ring
*xdp_ring
, int budget
)
739 int total_packets
= 0, total_bytes
= 0;
740 s16 ntc
= xdp_ring
->next_to_clean
;
741 struct ice_tx_desc
*tx_desc
;
742 struct ice_tx_buf
*tx_buf
;
746 tx_desc
= ICE_TX_DESC(xdp_ring
, ntc
);
747 tx_buf
= &xdp_ring
->tx_buf
[ntc
];
748 ntc
-= xdp_ring
->count
;
751 if (!(tx_desc
->cmd_type_offset_bsz
&
752 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE
)))
755 total_bytes
+= tx_buf
->bytecount
;
758 if (tx_buf
->raw_buf
) {
759 ice_clean_xdp_tx_buf(xdp_ring
, tx_buf
);
760 tx_buf
->raw_buf
= NULL
;
765 tx_desc
->cmd_type_offset_bsz
= 0;
770 if (unlikely(!ntc
)) {
771 ntc
-= xdp_ring
->count
;
772 tx_buf
= xdp_ring
->tx_buf
;
773 tx_desc
= ICE_TX_DESC(xdp_ring
, 0);
778 } while (likely(--budget
));
780 ntc
+= xdp_ring
->count
;
781 xdp_ring
->next_to_clean
= ntc
;
784 xsk_tx_completed(xdp_ring
->xsk_pool
, xsk_frames
);
786 if (xsk_uses_need_wakeup(xdp_ring
->xsk_pool
))
787 xsk_set_tx_need_wakeup(xdp_ring
->xsk_pool
);
789 ice_update_tx_ring_stats(xdp_ring
, total_packets
, total_bytes
);
790 xmit_done
= ice_xmit_zc(xdp_ring
, ICE_DFLT_IRQ_WORK
);
792 return budget
> 0 && xmit_done
;
796 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
797 * @netdev: net_device
798 * @queue_id: queue to wake up
799 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
801 * Returns negative on error, zero otherwise.
804 ice_xsk_wakeup(struct net_device
*netdev
, u32 queue_id
,
805 u32 __always_unused flags
)
807 struct ice_netdev_priv
*np
= netdev_priv(netdev
);
808 struct ice_q_vector
*q_vector
;
809 struct ice_vsi
*vsi
= np
->vsi
;
810 struct ice_ring
*ring
;
812 if (test_bit(__ICE_DOWN
, vsi
->state
))
815 if (!ice_is_xdp_ena_vsi(vsi
))
818 if (queue_id
>= vsi
->num_txq
)
821 if (!vsi
->xdp_rings
[queue_id
]->xsk_pool
)
824 ring
= vsi
->xdp_rings
[queue_id
];
826 /* The idea here is that if NAPI is running, mark a miss, so
827 * it will run again. If not, trigger an interrupt and
828 * schedule the NAPI from interrupt context. If NAPI would be
829 * scheduled here, the interrupt affinity would not be
832 q_vector
= ring
->q_vector
;
833 if (!napi_if_scheduled_mark_missed(&q_vector
->napi
))
834 ice_trigger_sw_intr(&vsi
->back
->hw
, q_vector
);
840 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
841 * @vsi: VSI to be checked
843 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
845 bool ice_xsk_any_rx_ring_ena(struct ice_vsi
*vsi
)
852 for (i
= 0; i
< vsi
->num_xsk_pools
; i
++) {
853 if (vsi
->xsk_pools
[i
])
861 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
862 * @rx_ring: ring to be cleaned
864 void ice_xsk_clean_rx_ring(struct ice_ring
*rx_ring
)
868 for (i
= 0; i
< rx_ring
->count
; i
++) {
869 struct ice_rx_buf
*rx_buf
= &rx_ring
->rx_buf
[i
];
879 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
880 * @xdp_ring: XDP_Tx ring
882 void ice_xsk_clean_xdp_ring(struct ice_ring
*xdp_ring
)
884 u16 ntc
= xdp_ring
->next_to_clean
, ntu
= xdp_ring
->next_to_use
;
888 struct ice_tx_buf
*tx_buf
= &xdp_ring
->tx_buf
[ntc
];
891 ice_clean_xdp_tx_buf(xdp_ring
, tx_buf
);
895 tx_buf
->raw_buf
= NULL
;
898 if (ntc
>= xdp_ring
->count
)
903 xsk_tx_completed(xdp_ring
->xsk_pool
, xsk_frames
);