]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
net/mlx5e: Add XSK zero-copy support
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / xsk / tx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "tx.h"
5 #include "umem.h"
6 #include "en/xdp.h"
7 #include "en/params.h"
8 #include <net/xdp_sock.h>
9
10 int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid)
11 {
12 struct mlx5e_priv *priv = netdev_priv(dev);
13 struct mlx5e_params *params = &priv->channels.params;
14 struct mlx5e_channel *c;
15 u16 ix;
16
17 if (unlikely(!mlx5e_xdp_is_open(priv)))
18 return -ENETDOWN;
19
20 if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
21 return -EINVAL;
22
23 c = priv->channels.c[ix];
24
25 if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
26 return -ENXIO;
27
28 if (!napi_if_scheduled_mark_missed(&c->napi)) {
29 spin_lock(&c->xskicosq_lock);
30 mlx5e_trigger_irq(&c->xskicosq);
31 spin_unlock(&c->xskicosq_lock);
32 }
33
34 return 0;
35 }
36
37 /* When TX fails (because of the size of the packet), we need to get completions
38 * in order, so post a NOP to get a CQE. Since AF_XDP doesn't distinguish
39 * between successful TX and errors, handling in mlx5e_poll_xdpsq_cq is the
40 * same.
41 */
42 static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
43 struct mlx5e_xdp_info *xdpi)
44 {
45 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
46 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
47 struct mlx5e_tx_wqe *nopwqe;
48
49 wi->num_wqebbs = 1;
50 wi->num_pkts = 1;
51
52 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
53 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
54 sq->doorbell_cseg = &nopwqe->ctrl;
55 }
56
57 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
58 {
59 struct xdp_umem *umem = sq->umem;
60 struct mlx5e_xdp_info xdpi;
61 struct mlx5e_xdp_xmit_data xdptxd;
62 bool work_done = true;
63 bool flush = false;
64
65 xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK;
66
67 for (; budget; budget--) {
68 int check_result = sq->xmit_xdp_frame_check(sq);
69 struct xdp_desc desc;
70
71 if (unlikely(check_result < 0)) {
72 work_done = false;
73 break;
74 }
75
76 if (!xsk_umem_consume_tx(umem, &desc)) {
77 /* TX will get stuck until something wakes it up by
78 * triggering NAPI. Currently it's expected that the
79 * application calls sendto() if there are consumed, but
80 * not completed frames.
81 */
82 break;
83 }
84
85 xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr);
86 xdptxd.data = xdp_umem_get_data(umem, desc.addr);
87 xdptxd.len = desc.len;
88
89 dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr,
90 xdptxd.len, DMA_BIDIRECTIONAL);
91
92 if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
93 if (sq->mpwqe.wqe)
94 mlx5e_xdp_mpwqe_complete(sq);
95
96 mlx5e_xsk_tx_post_err(sq, &xdpi);
97 }
98
99 flush = true;
100 }
101
102 if (flush) {
103 if (sq->mpwqe.wqe)
104 mlx5e_xdp_mpwqe_complete(sq);
105 mlx5e_xmit_xdp_doorbell(sq);
106
107 xsk_umem_consume_tx_done(umem);
108 }
109
110 return !(budget && work_done);
111 }