]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
net/mlx5e: Introduce a fenced NOP WQE posting function
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / txrx.h
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #ifndef __MLX5_EN_TXRX_H___
5 #define __MLX5_EN_TXRX_H___
6
7 #include "en.h"
8
9 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
10 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
11 MLX5E_SQ_NOPS_ROOM)
12
13 #ifndef CONFIG_MLX5_EN_TLS
14 #define MLX5E_SQ_TLS_ROOM (0)
15 #else
16 /* TLS offload requires additional stop_room for:
17 * - a resync SKB.
18 */
19 #define MLX5E_SQ_TLS_ROOM \
20 (MLX5_SEND_WQE_MAX_WQEBBS)
21 #endif
22
23 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
24
25 static inline bool
26 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
27 {
28 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
29 }
30
31 static inline void *
32 mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi)
33 {
34 struct mlx5_wq_cyc *wq = &sq->wq;
35 void *wqe;
36
37 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
38 wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
39 memset(wqe, 0, size);
40
41 return wqe;
42 }
43
44 static inline struct mlx5e_tx_wqe *
45 mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
46 {
47 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
48 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
49 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
50
51 memset(cseg, 0, sizeof(*cseg));
52
53 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
54 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
55
56 (*pc)++;
57
58 return wqe;
59 }
60
61 static inline struct mlx5e_tx_wqe *
62 mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
63 {
64 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
65 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
66 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
67
68 memset(cseg, 0, sizeof(*cseg));
69
70 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
71 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
72 cseg->fm_ce_se = MLX5_FENCE_MODE_INITIATOR_SMALL;
73
74 (*pc)++;
75
76 return wqe;
77 }
78
79 static inline void
80 mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
81 u16 pi, u16 nnops)
82 {
83 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
84
85 edge_wi = wi + nnops;
86
87 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
88 for (; wi < edge_wi; wi++) {
89 wi->skb = NULL;
90 wi->num_wqebbs = 1;
91 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
92 }
93 sq->stats->nop += nnops;
94 }
95
96 static inline void
97 mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
98 struct mlx5_wqe_ctrl_seg *ctrl)
99 {
100 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
101 /* ensure wqe is visible to device before updating doorbell record */
102 dma_wmb();
103
104 *wq->db = cpu_to_be32(pc);
105
106 /* ensure doorbell record is visible to device before ringing the
107 * doorbell
108 */
109 wmb();
110
111 mlx5_write64((__be32 *)ctrl, uar_map);
112 }
113
114 static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
115 {
116 return !!wqe->ctrl.tisn;
117 }
118
119 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
120 {
121 struct mlx5_core_cq *mcq;
122
123 mcq = &cq->mcq;
124 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
125 }
126
127 static inline struct mlx5e_sq_dma *
128 mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
129 {
130 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
131 }
132
133 static inline void
134 mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
135 enum mlx5e_dma_map_type map_type)
136 {
137 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
138
139 dma->addr = addr;
140 dma->size = size;
141 dma->type = map_type;
142 }
143
144 static inline void
145 mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
146 {
147 switch (dma->type) {
148 case MLX5E_DMA_MAP_SINGLE:
149 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
150 break;
151 case MLX5E_DMA_MAP_PAGE:
152 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
153 break;
154 default:
155 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
156 }
157 }
158
159 /* SW parser related functions */
160
161 struct mlx5e_swp_spec {
162 __be16 l3_proto;
163 u8 l4_proto;
164 u8 is_tun;
165 __be16 tun_l3_proto;
166 u8 tun_l4_proto;
167 };
168
169 static inline void
170 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
171 struct mlx5e_swp_spec *swp_spec)
172 {
173 /* SWP offsets are in 2-bytes words */
174 eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
175 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
176 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
177 if (swp_spec->l4_proto) {
178 eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
179 if (swp_spec->l4_proto == IPPROTO_UDP)
180 eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
181 }
182
183 if (swp_spec->is_tun) {
184 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
185 if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
186 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
187 } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
188 eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
189 if (swp_spec->l3_proto == htons(ETH_P_IPV6))
190 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
191 }
192 switch (swp_spec->tun_l4_proto) {
193 case IPPROTO_UDP:
194 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
195 /* fall through */
196 case IPPROTO_TCP:
197 eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
198 break;
199 }
200 }
201
202 #endif