1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
9 #include "i40e_txrx_common.h"
12 int i40e_alloc_rx_bi_zc(struct i40e_ring
*rx_ring
)
14 unsigned long sz
= sizeof(*rx_ring
->rx_bi_zc
) * rx_ring
->count
;
16 rx_ring
->rx_bi_zc
= kzalloc(sz
, GFP_KERNEL
);
17 return rx_ring
->rx_bi_zc
? 0 : -ENOMEM
;
20 void i40e_clear_rx_bi_zc(struct i40e_ring
*rx_ring
)
22 memset(rx_ring
->rx_bi_zc
, 0,
23 sizeof(*rx_ring
->rx_bi_zc
) * rx_ring
->count
);
26 static struct xdp_buff
**i40e_rx_bi(struct i40e_ring
*rx_ring
, u32 idx
)
28 return &rx_ring
->rx_bi_zc
[idx
];
32 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
36 * @qid: Rx ring to associate buffer pool with
38 * Returns 0 on success, <0 on failure
40 static int i40e_xsk_pool_enable(struct i40e_vsi
*vsi
,
41 struct xsk_buff_pool
*pool
,
44 struct net_device
*netdev
= vsi
->netdev
;
48 if (vsi
->type
!= I40E_VSI_MAIN
)
51 if (qid
>= vsi
->num_queue_pairs
)
54 if (qid
>= netdev
->real_num_rx_queues
||
55 qid
>= netdev
->real_num_tx_queues
)
58 err
= xsk_pool_dma_map(pool
, &vsi
->back
->pdev
->dev
, I40E_RX_DMA_ATTR
);
62 set_bit(qid
, vsi
->af_xdp_zc_qps
);
64 if_running
= netif_running(vsi
->netdev
) && i40e_enabled_xdp_vsi(vsi
);
67 err
= i40e_queue_pair_disable(vsi
, qid
);
71 err
= i40e_queue_pair_enable(vsi
, qid
);
75 /* Kick start the NAPI context so that receiving will start */
76 err
= i40e_xsk_wakeup(vsi
->netdev
, qid
, XDP_WAKEUP_RX
);
85 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
88 * @qid: Rx ring to associate buffer pool with
90 * Returns 0 on success, <0 on failure
92 static int i40e_xsk_pool_disable(struct i40e_vsi
*vsi
, u16 qid
)
94 struct net_device
*netdev
= vsi
->netdev
;
95 struct xsk_buff_pool
*pool
;
99 pool
= xsk_get_pool_from_qid(netdev
, qid
);
103 if_running
= netif_running(vsi
->netdev
) && i40e_enabled_xdp_vsi(vsi
);
106 err
= i40e_queue_pair_disable(vsi
, qid
);
111 clear_bit(qid
, vsi
->af_xdp_zc_qps
);
112 xsk_pool_dma_unmap(pool
, I40E_RX_DMA_ATTR
);
115 err
= i40e_queue_pair_enable(vsi
, qid
);
124 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
127 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
128 * @qid: Rx ring to (dis)associate buffer pool (from)to
130 * This function enables or disables a buffer pool to a certain ring.
132 * Returns 0 on success, <0 on failure
134 int i40e_xsk_pool_setup(struct i40e_vsi
*vsi
, struct xsk_buff_pool
*pool
,
137 return pool
? i40e_xsk_pool_enable(vsi
, pool
, qid
) :
138 i40e_xsk_pool_disable(vsi
, qid
);
142 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
144 * @xdp: xdp_buff used as input to the XDP program
146 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
148 static int i40e_run_xdp_zc(struct i40e_ring
*rx_ring
, struct xdp_buff
*xdp
)
150 int err
, result
= I40E_XDP_PASS
;
151 struct i40e_ring
*xdp_ring
;
152 struct bpf_prog
*xdp_prog
;
156 /* NB! xdp_prog will always be !NULL, due to the fact that
157 * this path is enabled by setting an XDP program.
159 xdp_prog
= READ_ONCE(rx_ring
->xdp_prog
);
160 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
166 xdp_ring
= rx_ring
->vsi
->xdp_rings
[rx_ring
->queue_index
];
167 result
= i40e_xmit_xdp_tx_ring(xdp
, xdp_ring
);
170 err
= xdp_do_redirect(rx_ring
->netdev
, xdp
, xdp_prog
);
171 result
= !err
? I40E_XDP_REDIR
: I40E_XDP_CONSUMED
;
174 bpf_warn_invalid_xdp_action(act
);
177 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, act
);
178 fallthrough
; /* handle aborts by dropping packet */
180 result
= I40E_XDP_CONSUMED
;
187 bool i40e_alloc_rx_buffers_zc(struct i40e_ring
*rx_ring
, u16 count
)
189 u16 ntu
= rx_ring
->next_to_use
;
190 union i40e_rx_desc
*rx_desc
;
191 struct xdp_buff
**bi
, *xdp
;
195 rx_desc
= I40E_RX_DESC(rx_ring
, ntu
);
196 bi
= i40e_rx_bi(rx_ring
, ntu
);
198 xdp
= xsk_buff_alloc(rx_ring
->xsk_pool
);
204 dma
= xsk_buff_xdp_get_dma(xdp
);
205 rx_desc
->read
.pkt_addr
= cpu_to_le64(dma
);
206 rx_desc
->read
.hdr_addr
= 0;
212 if (unlikely(ntu
== rx_ring
->count
)) {
213 rx_desc
= I40E_RX_DESC(rx_ring
, 0);
214 bi
= i40e_rx_bi(rx_ring
, 0);
222 if (rx_ring
->next_to_use
!= ntu
)
223 i40e_release_rx_desc(rx_ring
, ntu
);
229 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
233 * This functions allocates a new skb from a zero-copy Rx buffer.
235 * Returns the skb, or NULL on failure.
237 static struct sk_buff
*i40e_construct_skb_zc(struct i40e_ring
*rx_ring
,
238 struct xdp_buff
*xdp
)
240 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
241 unsigned int datasize
= xdp
->data_end
- xdp
->data
;
244 /* allocate a skb to store the frags */
245 skb
= __napi_alloc_skb(&rx_ring
->q_vector
->napi
,
246 xdp
->data_end
- xdp
->data_hard_start
,
247 GFP_ATOMIC
| __GFP_NOWARN
);
251 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
252 memcpy(__skb_put(skb
, datasize
), xdp
->data
, datasize
);
254 skb_metadata_set(skb
, metasize
);
261 * i40e_inc_ntc: Advance the next_to_clean index
264 static void i40e_inc_ntc(struct i40e_ring
*rx_ring
)
266 u32 ntc
= rx_ring
->next_to_clean
+ 1;
268 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
269 rx_ring
->next_to_clean
= ntc
;
273 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
275 * @budget: NAPI budget
277 * Returns amount of work completed
279 int i40e_clean_rx_irq_zc(struct i40e_ring
*rx_ring
, int budget
)
281 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
282 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
283 unsigned int xdp_res
, xdp_xmit
= 0;
287 while (likely(total_rx_packets
< (unsigned int)budget
)) {
288 union i40e_rx_desc
*rx_desc
;
289 struct xdp_buff
**bi
;
293 rx_desc
= I40E_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
294 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
296 /* This memory barrier is needed to keep us from reading
297 * any other fields out of the rx_desc until we have
298 * verified the descriptor has been written back.
302 if (i40e_rx_is_programming_status(qword
)) {
303 i40e_clean_programming_status(rx_ring
,
304 rx_desc
->raw
.qword
[0],
306 bi
= i40e_rx_bi(rx_ring
, rx_ring
->next_to_clean
);
310 i40e_inc_ntc(rx_ring
);
314 bi
= i40e_rx_bi(rx_ring
, rx_ring
->next_to_clean
);
315 size
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
316 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
320 bi
= i40e_rx_bi(rx_ring
, rx_ring
->next_to_clean
);
321 (*bi
)->data_end
= (*bi
)->data
+ size
;
322 xsk_buff_dma_sync_for_cpu(*bi
, rx_ring
->xsk_pool
);
324 xdp_res
= i40e_run_xdp_zc(rx_ring
, *bi
);
326 if (xdp_res
& (I40E_XDP_TX
| I40E_XDP_REDIR
))
332 total_rx_bytes
+= size
;
336 i40e_inc_ntc(rx_ring
);
342 /* NB! We are not checking for errors using
343 * i40e_test_staterr with
344 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
345 * SBP is *not* set in PRT_SBPVSI (default not set).
347 skb
= i40e_construct_skb_zc(rx_ring
, *bi
);
350 rx_ring
->rx_stats
.alloc_buff_failed
++;
355 i40e_inc_ntc(rx_ring
);
357 if (eth_skb_pad(skb
))
360 total_rx_bytes
+= skb
->len
;
363 i40e_process_skb_fields(rx_ring
, rx_desc
, skb
);
364 napi_gro_receive(&rx_ring
->q_vector
->napi
, skb
);
367 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
)
368 failure
= !i40e_alloc_rx_buffers_zc(rx_ring
, cleaned_count
);
370 i40e_finalize_xdp_rx(rx_ring
, xdp_xmit
);
371 i40e_update_rx_stats(rx_ring
, total_rx_bytes
, total_rx_packets
);
373 if (xsk_uses_need_wakeup(rx_ring
->xsk_pool
)) {
374 if (failure
|| rx_ring
->next_to_clean
== rx_ring
->next_to_use
)
375 xsk_set_rx_need_wakeup(rx_ring
->xsk_pool
);
377 xsk_clear_rx_need_wakeup(rx_ring
->xsk_pool
);
379 return (int)total_rx_packets
;
381 return failure
? budget
: (int)total_rx_packets
;
385 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
386 * @xdp_ring: XDP Tx ring
387 * @budget: NAPI budget
389 * Returns true if the work is finished.
391 static bool i40e_xmit_zc(struct i40e_ring
*xdp_ring
, unsigned int budget
)
393 unsigned int sent_frames
= 0, total_bytes
= 0;
394 struct i40e_tx_desc
*tx_desc
= NULL
;
395 struct i40e_tx_buffer
*tx_bi
;
396 struct xdp_desc desc
;
399 while (budget
-- > 0) {
400 if (!xsk_tx_peek_desc(xdp_ring
->xsk_pool
, &desc
))
403 dma
= xsk_buff_raw_get_dma(xdp_ring
->xsk_pool
, desc
.addr
);
404 xsk_buff_raw_dma_sync_for_device(xdp_ring
->xsk_pool
, dma
,
407 tx_bi
= &xdp_ring
->tx_bi
[xdp_ring
->next_to_use
];
408 tx_bi
->bytecount
= desc
.len
;
410 tx_desc
= I40E_TX_DESC(xdp_ring
, xdp_ring
->next_to_use
);
411 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
412 tx_desc
->cmd_type_offset_bsz
=
413 build_ctob(I40E_TX_DESC_CMD_ICRC
414 | I40E_TX_DESC_CMD_EOP
,
418 total_bytes
+= tx_bi
->bytecount
;
420 xdp_ring
->next_to_use
++;
421 if (xdp_ring
->next_to_use
== xdp_ring
->count
)
422 xdp_ring
->next_to_use
= 0;
426 /* Request an interrupt for the last frame and bump tail ptr. */
427 tx_desc
->cmd_type_offset_bsz
|= (I40E_TX_DESC_CMD_RS
<<
428 I40E_TXD_QW1_CMD_SHIFT
);
429 i40e_xdp_ring_update_tail(xdp_ring
);
431 xsk_tx_release(xdp_ring
->xsk_pool
);
432 i40e_update_tx_stats(xdp_ring
, sent_frames
, total_bytes
);
439 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
440 * @tx_ring: XDP Tx ring
441 * @tx_bi: Tx buffer info to clean
443 static void i40e_clean_xdp_tx_buffer(struct i40e_ring
*tx_ring
,
444 struct i40e_tx_buffer
*tx_bi
)
446 xdp_return_frame(tx_bi
->xdpf
);
447 tx_ring
->xdp_tx_active
--;
448 dma_unmap_single(tx_ring
->dev
,
449 dma_unmap_addr(tx_bi
, dma
),
450 dma_unmap_len(tx_bi
, len
), DMA_TO_DEVICE
);
451 dma_unmap_len_set(tx_bi
, len
, 0);
455 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
457 * @tx_ring: XDP Tx ring
459 * Returns true if cleanup/tranmission is done.
461 bool i40e_clean_xdp_tx_irq(struct i40e_vsi
*vsi
, struct i40e_ring
*tx_ring
)
463 struct xsk_buff_pool
*bp
= tx_ring
->xsk_pool
;
464 u32 i
, completed_frames
, xsk_frames
= 0;
465 u32 head_idx
= i40e_get_head(tx_ring
);
466 struct i40e_tx_buffer
*tx_bi
;
469 if (head_idx
< tx_ring
->next_to_clean
)
470 head_idx
+= tx_ring
->count
;
471 completed_frames
= head_idx
- tx_ring
->next_to_clean
;
473 if (completed_frames
== 0)
476 if (likely(!tx_ring
->xdp_tx_active
)) {
477 xsk_frames
= completed_frames
;
481 ntc
= tx_ring
->next_to_clean
;
483 for (i
= 0; i
< completed_frames
; i
++) {
484 tx_bi
= &tx_ring
->tx_bi
[ntc
];
487 i40e_clean_xdp_tx_buffer(tx_ring
, tx_bi
);
493 if (++ntc
>= tx_ring
->count
)
498 tx_ring
->next_to_clean
+= completed_frames
;
499 if (unlikely(tx_ring
->next_to_clean
>= tx_ring
->count
))
500 tx_ring
->next_to_clean
-= tx_ring
->count
;
503 xsk_tx_completed(bp
, xsk_frames
);
505 i40e_arm_wb(tx_ring
, vsi
, completed_frames
);
508 if (xsk_uses_need_wakeup(tx_ring
->xsk_pool
))
509 xsk_set_tx_need_wakeup(tx_ring
->xsk_pool
);
511 return i40e_xmit_zc(tx_ring
, I40E_DESC_UNUSED(tx_ring
));
515 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
516 * @dev: the netdevice
517 * @queue_id: queue id to wake up
518 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
520 * Returns <0 for errors, 0 otherwise.
522 int i40e_xsk_wakeup(struct net_device
*dev
, u32 queue_id
, u32 flags
)
524 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
525 struct i40e_vsi
*vsi
= np
->vsi
;
526 struct i40e_pf
*pf
= vsi
->back
;
527 struct i40e_ring
*ring
;
529 if (test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
532 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
535 if (!i40e_enabled_xdp_vsi(vsi
))
538 if (queue_id
>= vsi
->num_queue_pairs
)
541 if (!vsi
->xdp_rings
[queue_id
]->xsk_pool
)
544 ring
= vsi
->xdp_rings
[queue_id
];
546 /* The idea here is that if NAPI is running, mark a miss, so
547 * it will run again. If not, trigger an interrupt and
548 * schedule the NAPI from interrupt context. If NAPI would be
549 * scheduled here, the interrupt affinity would not be
552 if (!napi_if_scheduled_mark_missed(&ring
->q_vector
->napi
))
553 i40e_force_wb(vsi
, ring
->q_vector
);
558 void i40e_xsk_clean_rx_ring(struct i40e_ring
*rx_ring
)
562 for (i
= 0; i
< rx_ring
->count
; i
++) {
563 struct xdp_buff
*rx_bi
= *i40e_rx_bi(rx_ring
, i
);
568 xsk_buff_free(rx_bi
);
574 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
575 * @tx_ring: XDP Tx ring
577 void i40e_xsk_clean_tx_ring(struct i40e_ring
*tx_ring
)
579 u16 ntc
= tx_ring
->next_to_clean
, ntu
= tx_ring
->next_to_use
;
580 struct xsk_buff_pool
*bp
= tx_ring
->xsk_pool
;
581 struct i40e_tx_buffer
*tx_bi
;
585 tx_bi
= &tx_ring
->tx_bi
[ntc
];
588 i40e_clean_xdp_tx_buffer(tx_ring
, tx_bi
);
595 if (ntc
>= tx_ring
->count
)
600 xsk_tx_completed(bp
, xsk_frames
);
604 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
605 * buffer pool attached
608 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
610 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi
*vsi
)
612 struct net_device
*netdev
= vsi
->netdev
;
615 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
616 if (xsk_get_pool_from_qid(netdev
, i
))