]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/filter.h>
18 #include <net/page_pool.h>
23 DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key
);
25 struct bnxt_sw_tx_bd
*bnxt_xmit_bd(struct bnxt
*bp
,
26 struct bnxt_tx_ring_info
*txr
,
27 dma_addr_t mapping
, u32 len
)
29 struct bnxt_sw_tx_bd
*tx_buf
;
35 tx_buf
= &txr
->tx_buf_ring
[prod
];
37 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
38 flags
= (len
<< TX_BD_LEN_SHIFT
) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT
) |
39 TX_BD_FLAGS_PACKET_END
| bnxt_lhint_arr
[len
>> 9];
40 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
41 txbd
->tx_bd_opaque
= prod
;
42 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
49 static void __bnxt_xmit_xdp(struct bnxt
*bp
, struct bnxt_tx_ring_info
*txr
,
50 dma_addr_t mapping
, u32 len
, u16 rx_prod
)
52 struct bnxt_sw_tx_bd
*tx_buf
;
54 tx_buf
= bnxt_xmit_bd(bp
, txr
, mapping
, len
);
55 tx_buf
->rx_prod
= rx_prod
;
56 tx_buf
->action
= XDP_TX
;
59 static void __bnxt_xmit_xdp_redirect(struct bnxt
*bp
,
60 struct bnxt_tx_ring_info
*txr
,
61 dma_addr_t mapping
, u32 len
,
62 struct xdp_frame
*xdpf
)
64 struct bnxt_sw_tx_bd
*tx_buf
;
66 tx_buf
= bnxt_xmit_bd(bp
, txr
, mapping
, len
);
67 tx_buf
->action
= XDP_REDIRECT
;
69 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
70 dma_unmap_len_set(tx_buf
, len
, 0);
73 void bnxt_tx_int_xdp(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int nr_pkts
)
75 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
76 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
77 bool rx_doorbell_needed
= false;
78 struct bnxt_sw_tx_bd
*tx_buf
;
79 u16 tx_cons
= txr
->tx_cons
;
80 u16 last_tx_cons
= tx_cons
;
83 for (i
= 0; i
< nr_pkts
; i
++) {
84 tx_buf
= &txr
->tx_buf_ring
[tx_cons
];
86 if (tx_buf
->action
== XDP_REDIRECT
) {
87 struct pci_dev
*pdev
= bp
->pdev
;
89 dma_unmap_single(&pdev
->dev
,
90 dma_unmap_addr(tx_buf
, mapping
),
91 dma_unmap_len(tx_buf
, len
),
93 xdp_return_frame(tx_buf
->xdpf
);
96 } else if (tx_buf
->action
== XDP_TX
) {
97 rx_doorbell_needed
= true;
98 last_tx_cons
= tx_cons
;
100 tx_cons
= NEXT_TX(tx_cons
);
102 txr
->tx_cons
= tx_cons
;
103 if (rx_doorbell_needed
) {
104 tx_buf
= &txr
->tx_buf_ring
[last_tx_cons
];
105 bnxt_db_write(bp
, &rxr
->rx_db
, tx_buf
->rx_prod
);
109 /* returns the following:
110 * true - packet consumed by XDP and new buffer is allocated.
111 * false - packet should be passed to the stack.
113 bool bnxt_rx_xdp(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
, u16 cons
,
114 struct page
*page
, u8
**data_ptr
, unsigned int *len
, u8
*event
)
116 struct bpf_prog
*xdp_prog
= READ_ONCE(rxr
->xdp_prog
);
117 struct bnxt_tx_ring_info
*txr
;
118 struct bnxt_sw_rx_bd
*rx_buf
;
119 struct pci_dev
*pdev
;
131 rx_buf
= &rxr
->rx_buf_ring
[cons
];
132 offset
= bp
->rx_offset
;
134 mapping
= rx_buf
->mapping
- bp
->rx_dma_offset
;
135 dma_sync_single_for_cpu(&pdev
->dev
, mapping
+ offset
, *len
, bp
->rx_dir
);
137 txr
= rxr
->bnapi
->tx_ring
;
138 /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
139 xdp_init_buff(&xdp
, PAGE_SIZE
, &rxr
->xdp_rxq
);
140 xdp_prepare_buff(&xdp
, *data_ptr
- offset
, offset
, *len
, false);
141 orig_data
= xdp
.data
;
143 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
145 tx_avail
= bnxt_tx_avail(bp
, txr
);
146 /* If the tx ring is not full, we must not update the rx producer yet
147 * because we may still be transmitting on some BDs.
149 if (tx_avail
!= bp
->tx_ring_size
)
150 *event
&= ~BNXT_RX_EVENT
;
152 *len
= xdp
.data_end
- xdp
.data
;
153 if (orig_data
!= xdp
.data
) {
154 offset
= xdp
.data
- xdp
.data_hard_start
;
155 *data_ptr
= xdp
.data_hard_start
+ offset
;
163 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
164 bnxt_reuse_rx_data(rxr
, cons
, page
);
168 *event
= BNXT_TX_EVENT
;
169 dma_sync_single_for_device(&pdev
->dev
, mapping
+ offset
, *len
,
171 __bnxt_xmit_xdp(bp
, txr
, mapping
+ offset
, *len
,
172 NEXT_RX(rxr
->rx_prod
));
173 bnxt_reuse_rx_data(rxr
, cons
, page
);
176 /* if we are calling this here then we know that the
177 * redirect is coming from a frame received by the
180 dma_unmap_page_attrs(&pdev
->dev
, mapping
,
181 PAGE_SIZE
, bp
->rx_dir
,
182 DMA_ATTR_WEAK_ORDERING
);
184 /* if we are unable to allocate a new buffer, abort and reuse */
185 if (bnxt_alloc_rx_data(bp
, rxr
, rxr
->rx_prod
, GFP_ATOMIC
)) {
186 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
187 bnxt_reuse_rx_data(rxr
, cons
, page
);
191 if (xdp_do_redirect(bp
->dev
, &xdp
, xdp_prog
)) {
192 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
193 page_pool_recycle_direct(rxr
->page_pool
, page
);
197 *event
|= BNXT_REDIRECT_EVENT
;
200 bpf_warn_invalid_xdp_action(act
);
203 trace_xdp_exception(bp
->dev
, xdp_prog
, act
);
206 bnxt_reuse_rx_data(rxr
, cons
, page
);
212 int bnxt_xdp_xmit(struct net_device
*dev
, int num_frames
,
213 struct xdp_frame
**frames
, u32 flags
)
215 struct bnxt
*bp
= netdev_priv(dev
);
216 struct bpf_prog
*xdp_prog
= READ_ONCE(bp
->xdp_prog
);
217 struct pci_dev
*pdev
= bp
->pdev
;
218 struct bnxt_tx_ring_info
*txr
;
224 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
) ||
225 !bp
->tx_nr_rings_xdp
||
229 ring
= smp_processor_id() % bp
->tx_nr_rings_xdp
;
230 txr
= &bp
->tx_ring
[ring
];
232 if (READ_ONCE(txr
->dev_state
) == BNXT_DEV_STATE_CLOSING
)
235 if (static_branch_unlikely(&bnxt_xdp_locking_key
))
236 spin_lock(&txr
->xdp_tx_lock
);
238 for (i
= 0; i
< num_frames
; i
++) {
239 struct xdp_frame
*xdp
= frames
[i
];
241 if (!bnxt_tx_avail(bp
, txr
))
244 mapping
= dma_map_single(&pdev
->dev
, xdp
->data
, xdp
->len
,
247 if (dma_mapping_error(&pdev
->dev
, mapping
))
250 __bnxt_xmit_xdp_redirect(bp
, txr
, mapping
, xdp
->len
, xdp
);
254 if (flags
& XDP_XMIT_FLUSH
) {
255 /* Sync BD data before updating doorbell */
257 bnxt_db_write(bp
, &txr
->tx_db
, txr
->tx_prod
);
260 if (static_branch_unlikely(&bnxt_xdp_locking_key
))
261 spin_unlock(&txr
->xdp_tx_lock
);
266 /* Under rtnl_lock */
267 static int bnxt_xdp_set(struct bnxt
*bp
, struct bpf_prog
*prog
)
269 struct net_device
*dev
= bp
->dev
;
270 int tx_xdp
= 0, rc
, tc
;
271 struct bpf_prog
*old
;
273 if (prog
&& bp
->dev
->mtu
> BNXT_MAX_PAGE_MODE_MTU
) {
274 netdev_warn(dev
, "MTU %d larger than largest XDP supported MTU %d.\n",
275 bp
->dev
->mtu
, BNXT_MAX_PAGE_MODE_MTU
);
278 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
)) {
279 netdev_warn(dev
, "ethtool rx/tx channels must be combined to support XDP.\n");
283 tx_xdp
= bp
->rx_nr_rings
;
285 tc
= netdev_get_num_tc(dev
);
288 rc
= bnxt_check_rings(bp
, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
,
291 netdev_warn(dev
, "Unable to reserve enough TX rings to support XDP.\n");
294 if (netif_running(dev
))
295 bnxt_close_nic(bp
, true, false);
297 old
= xchg(&bp
->xdp_prog
, prog
);
302 bnxt_set_rx_skb_mode(bp
, true);
306 bnxt_set_rx_skb_mode(bp
, false);
307 bnxt_get_max_rings(bp
, &rx
, &tx
, true);
309 bp
->flags
&= ~BNXT_FLAG_NO_AGG_RINGS
;
310 bp
->dev
->hw_features
|= NETIF_F_LRO
;
313 bp
->tx_nr_rings_xdp
= tx_xdp
;
314 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tc
+ tx_xdp
;
315 bp
->cp_nr_rings
= max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
);
316 bnxt_set_tpa_flags(bp
);
317 bnxt_set_ring_params(bp
);
319 if (netif_running(dev
))
320 return bnxt_open_nic(bp
, true, false);
325 int bnxt_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
327 struct bnxt
*bp
= netdev_priv(dev
);
330 switch (xdp
->command
) {
332 rc
= bnxt_xdp_set(bp
, xdp
->prog
);