2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/bpf_trace.h>
37 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq
*sq
, struct mlx5e_dma_info
*di
,
40 struct mlx5e_xdp_info xdpi
;
42 xdpi
.xdpf
= convert_to_xdp_frame(xdp
);
43 if (unlikely(!xdpi
.xdpf
))
45 xdpi
.dma_addr
= di
->addr
+ (xdpi
.xdpf
->data
- (void *)xdpi
.xdpf
);
46 dma_sync_single_for_device(sq
->pdev
, xdpi
.dma_addr
,
47 xdpi
.xdpf
->len
, PCI_DMA_TODEVICE
);
50 return sq
->xmit_xdp_frame(sq
, &xdpi
);
53 /* returns true if packet was consumed by xdp */
54 bool mlx5e_xdp_handle(struct mlx5e_rq
*rq
, struct mlx5e_dma_info
*di
,
55 void *va
, u16
*rx_headroom
, u32
*len
)
57 struct bpf_prog
*prog
= READ_ONCE(rq
->xdp_prog
);
65 xdp
.data
= va
+ *rx_headroom
;
66 xdp_set_data_meta_invalid(&xdp
);
67 xdp
.data_end
= xdp
.data
+ *len
;
68 xdp
.data_hard_start
= va
;
69 xdp
.rxq
= &rq
->xdp_rxq
;
71 act
= bpf_prog_run_xdp(prog
, &xdp
);
74 *rx_headroom
= xdp
.data
- xdp
.data_hard_start
;
75 *len
= xdp
.data_end
- xdp
.data
;
78 if (unlikely(!mlx5e_xmit_xdp_buff(&rq
->xdpsq
, di
, &xdp
)))
80 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT
, rq
->flags
); /* non-atomic */
83 /* When XDP enabled then page-refcnt==1 here */
84 err
= xdp_do_redirect(rq
->netdev
, &xdp
, prog
);
87 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT
, rq
->flags
);
88 rq
->xdpsq
.redirect_flush
= true;
89 mlx5e_page_dma_unmap(rq
, di
);
90 rq
->stats
->xdp_redirect
++;
93 bpf_warn_invalid_xdp_action(act
);
97 trace_xdp_exception(rq
->netdev
, prog
, act
);
100 rq
->stats
->xdp_drop
++;
105 static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq
*sq
)
107 struct mlx5e_xdp_mpwqe
*session
= &sq
->mpwqe
;
108 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
112 mlx5e_xdpsq_fetch_wqe(sq
, &session
->wqe
);
114 prefetchw(session
->wqe
->data
);
115 session
->ds_count
= MLX5E_XDP_TX_EMPTY_DS_COUNT
;
117 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
119 /* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
120 * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
121 * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
122 * full-session WQE be cache-aligned.
124 #if L1_CACHE_BYTES < 128
125 #define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
127 #define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
130 wqebbs
= min_t(u16
, mlx5_wq_cyc_get_contig_wqebbs(wq
, pi
),
131 MLX5E_XDP_MPW_MAX_WQEBBS
);
133 session
->max_ds_count
= MLX5_SEND_WQEBB_NUM_DS
* wqebbs
;
136 static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq
*sq
)
138 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
139 struct mlx5e_xdp_mpwqe
*session
= &sq
->mpwqe
;
140 struct mlx5_wqe_ctrl_seg
*cseg
= &session
->wqe
->ctrl
;
141 u16 ds_count
= session
->ds_count
;
142 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
143 struct mlx5e_xdp_wqe_info
*wi
= &sq
->db
.wqe_info
[pi
];
145 cseg
->opmod_idx_opcode
=
146 cpu_to_be32((sq
->pc
<< 8) | MLX5_OPCODE_ENHANCED_MPSW
);
147 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | ds_count
);
149 wi
->num_wqebbs
= DIV_ROUND_UP(ds_count
, MLX5_SEND_WQEBB_NUM_DS
);
150 wi
->num_ds
= ds_count
- MLX5E_XDP_TX_EMPTY_DS_COUNT
;
152 sq
->pc
+= wi
->num_wqebbs
;
154 sq
->doorbell_cseg
= cseg
;
156 session
->wqe
= NULL
; /* Close session */
159 static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq
*sq
,
160 struct mlx5e_xdp_info
*xdpi
)
162 struct mlx5e_xdp_mpwqe
*session
= &sq
->mpwqe
;
163 struct mlx5e_xdpsq_stats
*stats
= sq
->stats
;
165 dma_addr_t dma_addr
= xdpi
->dma_addr
;
166 struct xdp_frame
*xdpf
= xdpi
->xdpf
;
167 unsigned int dma_len
= xdpf
->len
;
169 if (unlikely(sq
->hw_mtu
< dma_len
)) {
174 if (unlikely(!session
->wqe
)) {
175 if (unlikely(!mlx5e_wqc_has_room_for(&sq
->wq
, sq
->cc
, sq
->pc
,
176 MLX5_SEND_WQE_MAX_WQEBBS
))) {
177 /* SQ is full, ring doorbell */
178 mlx5e_xmit_xdp_doorbell(sq
);
183 mlx5e_xdp_mpwqe_session_start(sq
);
186 mlx5e_xdp_mpwqe_add_dseg(sq
, dma_addr
, dma_len
);
188 if (unlikely(session
->ds_count
== session
->max_ds_count
))
189 mlx5e_xdp_mpwqe_complete(sq
);
191 mlx5e_xdpi_fifo_push(&sq
->db
.xdpi_fifo
, xdpi
);
196 static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq
*sq
, struct mlx5e_xdp_info
*xdpi
)
198 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
199 u16 pi
= mlx5_wq_cyc_ctr2ix(wq
, sq
->pc
);
200 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
202 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
203 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
204 struct mlx5_wqe_data_seg
*dseg
= wqe
->data
;
206 struct xdp_frame
*xdpf
= xdpi
->xdpf
;
207 dma_addr_t dma_addr
= xdpi
->dma_addr
;
208 unsigned int dma_len
= xdpf
->len
;
210 struct mlx5e_xdpsq_stats
*stats
= sq
->stats
;
214 if (unlikely(dma_len
< MLX5E_XDP_MIN_INLINE
|| sq
->hw_mtu
< dma_len
)) {
219 if (unlikely(!mlx5e_wqc_has_room_for(wq
, sq
->cc
, sq
->pc
, 1))) {
220 /* SQ is full, ring doorbell */
221 mlx5e_xmit_xdp_doorbell(sq
);
228 /* copy the inline part if required */
229 if (sq
->min_inline_mode
!= MLX5_INLINE_MODE_NONE
) {
230 memcpy(eseg
->inline_hdr
.start
, xdpf
->data
, MLX5E_XDP_MIN_INLINE
);
231 eseg
->inline_hdr
.sz
= cpu_to_be16(MLX5E_XDP_MIN_INLINE
);
232 dma_len
-= MLX5E_XDP_MIN_INLINE
;
233 dma_addr
+= MLX5E_XDP_MIN_INLINE
;
237 /* write the dma part */
238 dseg
->addr
= cpu_to_be64(dma_addr
);
239 dseg
->byte_count
= cpu_to_be32(dma_len
);
241 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | MLX5_OPCODE_SEND
);
245 sq
->doorbell_cseg
= cseg
;
247 mlx5e_xdpi_fifo_push(&sq
->db
.xdpi_fifo
, xdpi
);
252 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq
*cq
, struct mlx5e_rq
*rq
)
254 struct mlx5e_xdp_info_fifo
*xdpi_fifo
;
255 struct mlx5e_xdpsq
*sq
;
256 struct mlx5_cqe64
*cqe
;
261 sq
= container_of(cq
, struct mlx5e_xdpsq
, cq
);
263 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED
, &sq
->state
)))
266 cqe
= mlx5_cqwq_get_cqe(&cq
->wq
);
271 xdpi_fifo
= &sq
->db
.xdpi_fifo
;
273 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
274 * otherwise a cq overrun may occur
283 mlx5_cqwq_pop(&cq
->wq
);
285 wqe_counter
= be16_to_cpu(cqe
->wqe_counter
);
287 if (unlikely(get_cqe_opcode(cqe
) != MLX5_CQE_REQ
))
288 netdev_WARN_ONCE(sq
->channel
->netdev
,
289 "Bad OP in XDPSQ CQE: 0x%x\n",
290 get_cqe_opcode(cqe
));
293 struct mlx5e_xdp_wqe_info
*wi
;
296 last_wqe
= (sqcc
== wqe_counter
);
297 ci
= mlx5_wq_cyc_ctr2ix(&sq
->wq
, sqcc
);
298 wi
= &sq
->db
.wqe_info
[ci
];
300 sqcc
+= wi
->num_wqebbs
;
302 for (j
= 0; j
< wi
->num_ds
; j
++) {
303 struct mlx5e_xdp_info xdpi
=
304 mlx5e_xdpi_fifo_pop(xdpi_fifo
);
307 xdp_return_frame(xdpi
.xdpf
);
308 dma_unmap_single(sq
->pdev
, xdpi
.dma_addr
,
309 xdpi
.xdpf
->len
, DMA_TO_DEVICE
);
311 /* Recycle RX page */
312 mlx5e_page_release(rq
, &xdpi
.di
, true);
316 } while ((++i
< MLX5E_TX_CQ_POLL_BUDGET
) && (cqe
= mlx5_cqwq_get_cqe(&cq
->wq
)));
318 sq
->stats
->cqes
+= i
;
320 mlx5_cqwq_update_db_record(&cq
->wq
);
322 /* ensure cq space is freed before enabling more cqes */
326 return (i
== MLX5E_TX_CQ_POLL_BUDGET
);
329 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq
*sq
, struct mlx5e_rq
*rq
)
331 struct mlx5e_xdp_info_fifo
*xdpi_fifo
= &sq
->db
.xdpi_fifo
;
332 bool is_redirect
= !rq
;
334 while (sq
->cc
!= sq
->pc
) {
335 struct mlx5e_xdp_wqe_info
*wi
;
338 ci
= mlx5_wq_cyc_ctr2ix(&sq
->wq
, sq
->cc
);
339 wi
= &sq
->db
.wqe_info
[ci
];
341 sq
->cc
+= wi
->num_wqebbs
;
343 for (i
= 0; i
< wi
->num_ds
; i
++) {
344 struct mlx5e_xdp_info xdpi
=
345 mlx5e_xdpi_fifo_pop(xdpi_fifo
);
348 xdp_return_frame(xdpi
.xdpf
);
349 dma_unmap_single(sq
->pdev
, xdpi
.dma_addr
,
350 xdpi
.xdpf
->len
, DMA_TO_DEVICE
);
352 /* Recycle RX page */
353 mlx5e_page_release(rq
, &xdpi
.di
, false);
359 int mlx5e_xdp_xmit(struct net_device
*dev
, int n
, struct xdp_frame
**frames
,
362 struct mlx5e_priv
*priv
= netdev_priv(dev
);
363 struct mlx5e_xdpsq
*sq
;
368 /* this flag is sufficient, no need to test internal sq state */
369 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv
)))
372 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
375 sq_num
= smp_processor_id();
377 if (unlikely(sq_num
>= priv
->channels
.num
))
380 sq
= &priv
->channels
.c
[sq_num
]->xdpsq
;
382 for (i
= 0; i
< n
; i
++) {
383 struct xdp_frame
*xdpf
= frames
[i
];
384 struct mlx5e_xdp_info xdpi
;
386 xdpi
.dma_addr
= dma_map_single(sq
->pdev
, xdpf
->data
, xdpf
->len
,
388 if (unlikely(dma_mapping_error(sq
->pdev
, xdpi
.dma_addr
))) {
389 xdp_return_frame_rx_napi(xdpf
);
396 if (unlikely(!sq
->xmit_xdp_frame(sq
, &xdpi
))) {
397 dma_unmap_single(sq
->pdev
, xdpi
.dma_addr
,
398 xdpf
->len
, DMA_TO_DEVICE
);
399 xdp_return_frame_rx_napi(xdpf
);
404 if (flags
& XDP_XMIT_FLUSH
) {
406 mlx5e_xdp_mpwqe_complete(sq
);
407 mlx5e_xmit_xdp_doorbell(sq
);
413 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq
*rq
)
415 struct mlx5e_xdpsq
*xdpsq
= &rq
->xdpsq
;
417 if (xdpsq
->mpwqe
.wqe
)
418 mlx5e_xdp_mpwqe_complete(xdpsq
);
420 mlx5e_xmit_xdp_doorbell(xdpsq
);
422 if (xdpsq
->redirect_flush
) {
424 xdpsq
->redirect_flush
= false;
428 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq
*sq
, bool is_mpw
)
430 sq
->xmit_xdp_frame
= is_mpw
?
431 mlx5e_xmit_xdp_frame_mpwqe
: mlx5e_xmit_xdp_frame
;