]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/mlx5/mlx5_rxtx.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / mlx5 / mlx5_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015-2019 Mellanox Technologies, Ltd
4 */
5
6 #include <stdint.h>
7 #include <string.h>
8 #include <stdlib.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #include <infiniband/mlx5dv.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_mbuf.h>
22 #include <rte_mempool.h>
23 #include <rte_prefetch.h>
24 #include <rte_common.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_ether.h>
27 #include <rte_cycles.h>
28 #include <rte_flow.h>
29
30 #include <mlx5_devx_cmds.h>
31 #include <mlx5_prm.h>
32 #include <mlx5_common.h>
33
34 #include "mlx5_defs.h"
35 #include "mlx5.h"
36 #include "mlx5_mr.h"
37 #include "mlx5_utils.h"
38 #include "mlx5_rxtx.h"
39 #include "mlx5_autoconf.h"
40
41 /* TX burst subroutines return codes. */
42 enum mlx5_txcmp_code {
43 MLX5_TXCMP_CODE_EXIT = 0,
44 MLX5_TXCMP_CODE_ERROR,
45 MLX5_TXCMP_CODE_SINGLE,
46 MLX5_TXCMP_CODE_MULTI,
47 MLX5_TXCMP_CODE_TSO,
48 MLX5_TXCMP_CODE_EMPW,
49 };
50
51 /*
52 * These defines are used to configure Tx burst routine option set
53 * supported at compile time. The not specified options are optimized out
54 * out due to if conditions can be explicitly calculated at compile time.
55 * The offloads with bigger runtime check (require more CPU cycles to
56 * skip) overhead should have the bigger index - this is needed to
57 * select the better matching routine function if no exact match and
58 * some offloads are not actually requested.
59 */
60 #define MLX5_TXOFF_CONFIG_MULTI (1u << 0) /* Multi-segment packets.*/
61 #define MLX5_TXOFF_CONFIG_TSO (1u << 1) /* TCP send offload supported.*/
62 #define MLX5_TXOFF_CONFIG_SWP (1u << 2) /* Tunnels/SW Parser offloads.*/
63 #define MLX5_TXOFF_CONFIG_CSUM (1u << 3) /* Check Sums offloaded. */
64 #define MLX5_TXOFF_CONFIG_INLINE (1u << 4) /* Data inlining supported. */
65 #define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
66 #define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
67 #define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
68 #define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
69
70 /* The most common offloads groups. */
71 #define MLX5_TXOFF_CONFIG_NONE 0
72 #define MLX5_TXOFF_CONFIG_FULL (MLX5_TXOFF_CONFIG_MULTI | \
73 MLX5_TXOFF_CONFIG_TSO | \
74 MLX5_TXOFF_CONFIG_SWP | \
75 MLX5_TXOFF_CONFIG_CSUM | \
76 MLX5_TXOFF_CONFIG_INLINE | \
77 MLX5_TXOFF_CONFIG_VLAN | \
78 MLX5_TXOFF_CONFIG_METADATA)
79
80 #define MLX5_TXOFF_CONFIG(mask) (olx & MLX5_TXOFF_CONFIG_##mask)
81
82 #define MLX5_TXOFF_DECL(func, olx) \
83 static uint16_t mlx5_tx_burst_##func(void *txq, \
84 struct rte_mbuf **pkts, \
85 uint16_t pkts_n) \
86 { \
87 return mlx5_tx_burst_tmpl((struct mlx5_txq_data *)txq, \
88 pkts, pkts_n, (olx)); \
89 }
90
91 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
92
93 static __rte_always_inline uint32_t
94 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
95
96 static __rte_always_inline int
97 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
98 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
99
100 static __rte_always_inline uint32_t
101 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
102
103 static __rte_always_inline void
104 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
105 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
106
107 static __rte_always_inline void
108 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
109 const unsigned int strd_n);
110
111 static int
112 mlx5_queue_state_modify(struct rte_eth_dev *dev,
113 struct mlx5_mp_arg_queue_state_modify *sm);
114
115 static inline void
116 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
117 volatile struct mlx5_cqe *restrict cqe,
118 uint32_t phcsum);
119
120 static inline void
121 mlx5_lro_update_hdr(uint8_t *restrict padd,
122 volatile struct mlx5_cqe *restrict cqe,
123 uint32_t len);
124
125 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
126 [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
127 };
128
129 uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
130 uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
131
132 uint64_t rte_net_mlx5_dynf_inline_mask;
133 #define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
134
135 /**
136 * Build a table to translate Rx completion flags to packet type.
137 *
138 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
139 */
140 void
141 mlx5_set_ptype_table(void)
142 {
143 unsigned int i;
144 uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
145
146 /* Last entry must not be overwritten, reserved for errored packet. */
147 for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
148 (*p)[i] = RTE_PTYPE_UNKNOWN;
149 /*
150 * The index to the array should have:
151 * bit[1:0] = l3_hdr_type
152 * bit[4:2] = l4_hdr_type
153 * bit[5] = ip_frag
154 * bit[6] = tunneled
155 * bit[7] = outer_l3_type
156 */
157 /* L2 */
158 (*p)[0x00] = RTE_PTYPE_L2_ETHER;
159 /* L3 */
160 (*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 RTE_PTYPE_L4_NONFRAG;
162 (*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
163 RTE_PTYPE_L4_NONFRAG;
164 /* Fragmented */
165 (*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
166 RTE_PTYPE_L4_FRAG;
167 (*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 RTE_PTYPE_L4_FRAG;
169 /* TCP */
170 (*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 RTE_PTYPE_L4_TCP;
172 (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
173 RTE_PTYPE_L4_TCP;
174 (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
175 RTE_PTYPE_L4_TCP;
176 (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
177 RTE_PTYPE_L4_TCP;
178 (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
179 RTE_PTYPE_L4_TCP;
180 (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
181 RTE_PTYPE_L4_TCP;
182 /* UDP */
183 (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 RTE_PTYPE_L4_UDP;
185 (*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
186 RTE_PTYPE_L4_UDP;
187 /* Repeat with outer_l3_type being set. Just in case. */
188 (*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 RTE_PTYPE_L4_NONFRAG;
190 (*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
191 RTE_PTYPE_L4_NONFRAG;
192 (*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
193 RTE_PTYPE_L4_FRAG;
194 (*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 RTE_PTYPE_L4_FRAG;
196 (*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
197 RTE_PTYPE_L4_TCP;
198 (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
199 RTE_PTYPE_L4_TCP;
200 (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
201 RTE_PTYPE_L4_TCP;
202 (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
203 RTE_PTYPE_L4_TCP;
204 (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
205 RTE_PTYPE_L4_TCP;
206 (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
207 RTE_PTYPE_L4_TCP;
208 (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
209 RTE_PTYPE_L4_UDP;
210 (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
211 RTE_PTYPE_L4_UDP;
212 /* Tunneled - L3 */
213 (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
214 (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
216 RTE_PTYPE_INNER_L4_NONFRAG;
217 (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L4_NONFRAG;
220 (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
221 (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L4_NONFRAG;
224 (*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
226 RTE_PTYPE_INNER_L4_NONFRAG;
227 /* Tunneled - Fragmented */
228 (*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
230 RTE_PTYPE_INNER_L4_FRAG;
231 (*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
232 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
233 RTE_PTYPE_INNER_L4_FRAG;
234 (*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L4_FRAG;
237 (*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_FRAG;
240 /* Tunneled - TCP */
241 (*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
243 RTE_PTYPE_INNER_L4_TCP;
244 (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_TCP;
247 (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
248 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L4_TCP;
250 (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
251 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L4_TCP;
253 (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
255 RTE_PTYPE_INNER_L4_TCP;
256 (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
258 RTE_PTYPE_INNER_L4_TCP;
259 (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
260 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
261 RTE_PTYPE_INNER_L4_TCP;
262 (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
263 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
264 RTE_PTYPE_INNER_L4_TCP;
265 (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
266 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
267 RTE_PTYPE_INNER_L4_TCP;
268 (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
269 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
270 RTE_PTYPE_INNER_L4_TCP;
271 (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
272 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
273 RTE_PTYPE_INNER_L4_TCP;
274 (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
275 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
276 RTE_PTYPE_INNER_L4_TCP;
277 /* Tunneled - UDP */
278 (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
279 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
280 RTE_PTYPE_INNER_L4_UDP;
281 (*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
282 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
283 RTE_PTYPE_INNER_L4_UDP;
284 (*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
285 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
286 RTE_PTYPE_INNER_L4_UDP;
287 (*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
288 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
289 RTE_PTYPE_INNER_L4_UDP;
290 }
291
292 /**
293 * Build a table to translate packet to checksum type of Verbs.
294 */
295 void
296 mlx5_set_cksum_table(void)
297 {
298 unsigned int i;
299 uint8_t v;
300
301 /*
302 * The index should have:
303 * bit[0] = PKT_TX_TCP_SEG
304 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
305 * bit[4] = PKT_TX_IP_CKSUM
306 * bit[8] = PKT_TX_OUTER_IP_CKSUM
307 * bit[9] = tunnel
308 */
309 for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
310 v = 0;
311 if (i & (1 << 9)) {
312 /* Tunneled packet. */
313 if (i & (1 << 8)) /* Outer IP. */
314 v |= MLX5_ETH_WQE_L3_CSUM;
315 if (i & (1 << 4)) /* Inner IP. */
316 v |= MLX5_ETH_WQE_L3_INNER_CSUM;
317 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
318 v |= MLX5_ETH_WQE_L4_INNER_CSUM;
319 } else {
320 /* No tunnel. */
321 if (i & (1 << 4)) /* IP. */
322 v |= MLX5_ETH_WQE_L3_CSUM;
323 if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
324 v |= MLX5_ETH_WQE_L4_CSUM;
325 }
326 mlx5_cksum_table[i] = v;
327 }
328 }
329
330 /**
331 * Build a table to translate packet type of mbuf to SWP type of Verbs.
332 */
333 void
334 mlx5_set_swp_types_table(void)
335 {
336 unsigned int i;
337 uint8_t v;
338
339 /*
340 * The index should have:
341 * bit[0:1] = PKT_TX_L4_MASK
342 * bit[4] = PKT_TX_IPV6
343 * bit[8] = PKT_TX_OUTER_IPV6
344 * bit[9] = PKT_TX_OUTER_UDP
345 */
346 for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
347 v = 0;
348 if (i & (1 << 8))
349 v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
350 if (i & (1 << 9))
351 v |= MLX5_ETH_WQE_L4_OUTER_UDP;
352 if (i & (1 << 4))
353 v |= MLX5_ETH_WQE_L3_INNER_IPV6;
354 if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
355 v |= MLX5_ETH_WQE_L4_INNER_UDP;
356 mlx5_swp_types_table[i] = v;
357 }
358 }
359
360 /**
361 * Set Software Parser flags and offsets in Ethernet Segment of WQE.
362 * Flags must be preliminary initialized to zero.
363 *
364 * @param loc
365 * Pointer to burst routine local context.
366 * @param swp_flags
367 * Pointer to store Software Parser flags
368 * @param olx
369 * Configured Tx offloads mask. It is fully defined at
370 * compile time and may be used for optimization.
371 *
372 * @return
373 * Software Parser offsets packed in dword.
374 * Software Parser flags are set by pointer.
375 */
376 static __rte_always_inline uint32_t
377 txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
378 uint8_t *swp_flags,
379 unsigned int olx)
380 {
381 uint64_t ol, tunnel;
382 unsigned int idx, off;
383 uint32_t set;
384
385 if (!MLX5_TXOFF_CONFIG(SWP))
386 return 0;
387 ol = loc->mbuf->ol_flags;
388 tunnel = ol & PKT_TX_TUNNEL_MASK;
389 /*
390 * Check whether Software Parser is required.
391 * Only customized tunnels may ask for.
392 */
393 if (likely(tunnel != PKT_TX_TUNNEL_UDP && tunnel != PKT_TX_TUNNEL_IP))
394 return 0;
395 /*
396 * The index should have:
397 * bit[0:1] = PKT_TX_L4_MASK
398 * bit[4] = PKT_TX_IPV6
399 * bit[8] = PKT_TX_OUTER_IPV6
400 * bit[9] = PKT_TX_OUTER_UDP
401 */
402 idx = (ol & (PKT_TX_L4_MASK | PKT_TX_IPV6 | PKT_TX_OUTER_IPV6)) >> 52;
403 idx |= (tunnel == PKT_TX_TUNNEL_UDP) ? (1 << 9) : 0;
404 *swp_flags = mlx5_swp_types_table[idx];
405 /*
406 * Set offsets for SW parser. Since ConnectX-5, SW parser just
407 * complements HW parser. SW parser starts to engage only if HW parser
408 * can't reach a header. For the older devices, HW parser will not kick
409 * in if any of SWP offsets is set. Therefore, all of the L3 offsets
410 * should be set regardless of HW offload.
411 */
412 off = loc->mbuf->outer_l2_len;
413 if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
414 off += sizeof(struct rte_vlan_hdr);
415 set = (off >> 1) << 8; /* Outer L3 offset. */
416 off += loc->mbuf->outer_l3_len;
417 if (tunnel == PKT_TX_TUNNEL_UDP)
418 set |= off >> 1; /* Outer L4 offset. */
419 if (ol & (PKT_TX_IPV4 | PKT_TX_IPV6)) { /* Inner IP. */
420 const uint64_t csum = ol & PKT_TX_L4_MASK;
421 off += loc->mbuf->l2_len;
422 set |= (off >> 1) << 24; /* Inner L3 offset. */
423 if (csum == PKT_TX_TCP_CKSUM ||
424 csum == PKT_TX_UDP_CKSUM ||
425 (MLX5_TXOFF_CONFIG(TSO) && ol & PKT_TX_TCP_SEG)) {
426 off += loc->mbuf->l3_len;
427 set |= (off >> 1) << 16; /* Inner L4 offset. */
428 }
429 }
430 set = rte_cpu_to_le_32(set);
431 return set;
432 }
433
434 /**
435 * Convert the Checksum offloads to Verbs.
436 *
437 * @param buf
438 * Pointer to the mbuf.
439 *
440 * @return
441 * Converted checksum flags.
442 */
443 static __rte_always_inline uint8_t
444 txq_ol_cksum_to_cs(struct rte_mbuf *buf)
445 {
446 uint32_t idx;
447 uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
448 const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
449 PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
450
451 /*
452 * The index should have:
453 * bit[0] = PKT_TX_TCP_SEG
454 * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
455 * bit[4] = PKT_TX_IP_CKSUM
456 * bit[8] = PKT_TX_OUTER_IP_CKSUM
457 * bit[9] = tunnel
458 */
459 idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
460 return mlx5_cksum_table[idx];
461 }
462
463 /**
464 * Internal function to compute the number of used descriptors in an RX queue
465 *
466 * @param rxq
467 * The Rx queue.
468 *
469 * @return
470 * The number of used rx descriptor.
471 */
472 static uint32_t
473 rx_queue_count(struct mlx5_rxq_data *rxq)
474 {
475 struct rxq_zip *zip = &rxq->zip;
476 volatile struct mlx5_cqe *cqe;
477 const unsigned int cqe_n = (1 << rxq->cqe_n);
478 const unsigned int cqe_cnt = cqe_n - 1;
479 unsigned int cq_ci;
480 unsigned int used;
481
482 /* if we are processing a compressed cqe */
483 if (zip->ai) {
484 used = zip->cqe_cnt - zip->ca;
485 cq_ci = zip->cq_ci;
486 } else {
487 used = 0;
488 cq_ci = rxq->cq_ci;
489 }
490 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
491 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
492 int8_t op_own;
493 unsigned int n;
494
495 op_own = cqe->op_own;
496 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
497 n = rte_be_to_cpu_32(cqe->byte_cnt);
498 else
499 n = 1;
500 cq_ci += n;
501 used += n;
502 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
503 }
504 used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
505 return used;
506 }
507
508 /**
509 * DPDK callback to check the status of a rx descriptor.
510 *
511 * @param rx_queue
512 * The Rx queue.
513 * @param[in] offset
514 * The index of the descriptor in the ring.
515 *
516 * @return
517 * The status of the tx descriptor.
518 */
519 int
520 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
521 {
522 struct mlx5_rxq_data *rxq = rx_queue;
523 struct mlx5_rxq_ctrl *rxq_ctrl =
524 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
525 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
526
527 if (dev->rx_pkt_burst != mlx5_rx_burst) {
528 rte_errno = ENOTSUP;
529 return -rte_errno;
530 }
531 if (offset >= (1 << rxq->elts_n)) {
532 rte_errno = EINVAL;
533 return -rte_errno;
534 }
535 if (offset < rx_queue_count(rxq))
536 return RTE_ETH_RX_DESC_DONE;
537 return RTE_ETH_RX_DESC_AVAIL;
538 }
539
540 /**
541 * DPDK callback to get the RX queue information
542 *
543 * @param dev
544 * Pointer to the device structure.
545 *
546 * @param rx_queue_id
547 * Rx queue identificator.
548 *
549 * @param qinfo
550 * Pointer to the RX queue information structure.
551 *
552 * @return
553 * None.
554 */
555
556 void
557 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
558 struct rte_eth_rxq_info *qinfo)
559 {
560 struct mlx5_priv *priv = dev->data->dev_private;
561 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
562 struct mlx5_rxq_ctrl *rxq_ctrl =
563 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
564
565 if (!rxq)
566 return;
567 qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
568 rxq->mprq_mp : rxq->mp;
569 qinfo->conf.rx_thresh.pthresh = 0;
570 qinfo->conf.rx_thresh.hthresh = 0;
571 qinfo->conf.rx_thresh.wthresh = 0;
572 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
573 qinfo->conf.rx_drop_en = 1;
574 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
575 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
576 qinfo->scattered_rx = dev->data->scattered_rx;
577 qinfo->nb_desc = 1 << rxq->elts_n;
578 }
579
580 /**
581 * DPDK callback to get the RX packet burst mode information
582 *
583 * @param dev
584 * Pointer to the device structure.
585 *
586 * @param rx_queue_id
587 * Rx queue identificatior.
588 *
589 * @param mode
590 * Pointer to the burts mode information.
591 *
592 * @return
593 * 0 as success, -EINVAL as failure.
594 */
595
596 int
597 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
598 uint16_t rx_queue_id __rte_unused,
599 struct rte_eth_burst_mode *mode)
600 {
601 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
602
603 if (pkt_burst == mlx5_rx_burst) {
604 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
605 } else if (pkt_burst == mlx5_rx_burst_mprq) {
606 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
607 } else if (pkt_burst == mlx5_rx_burst_vec) {
608 #if defined RTE_ARCH_X86_64
609 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
610 #elif defined RTE_ARCH_ARM64
611 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
612 #elif defined RTE_ARCH_PPC_64
613 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
614 #else
615 return -EINVAL;
616 #endif
617 } else {
618 return -EINVAL;
619 }
620 return 0;
621 }
622
623 /**
624 * DPDK callback to get the number of used descriptors in a RX queue
625 *
626 * @param dev
627 * Pointer to the device structure.
628 *
629 * @param rx_queue_id
630 * The Rx queue.
631 *
632 * @return
633 * The number of used rx descriptor.
634 * -EINVAL if the queue is invalid
635 */
636 uint32_t
637 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
638 {
639 struct mlx5_priv *priv = dev->data->dev_private;
640 struct mlx5_rxq_data *rxq;
641
642 if (dev->rx_pkt_burst != mlx5_rx_burst) {
643 rte_errno = ENOTSUP;
644 return -rte_errno;
645 }
646 rxq = (*priv->rxqs)[rx_queue_id];
647 if (!rxq) {
648 rte_errno = EINVAL;
649 return -rte_errno;
650 }
651 return rx_queue_count(rxq);
652 }
653
654 #define MLX5_SYSTEM_LOG_DIR "/var/log"
655 /**
656 * Dump debug information to log file.
657 *
658 * @param fname
659 * The file name.
660 * @param hex_title
661 * If not NULL this string is printed as a header to the output
662 * and the output will be in hexadecimal view.
663 * @param buf
664 * This is the buffer address to print out.
665 * @param len
666 * The number of bytes to dump out.
667 */
668 void
669 mlx5_dump_debug_information(const char *fname, const char *hex_title,
670 const void *buf, unsigned int hex_len)
671 {
672 FILE *fd;
673
674 MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
675 fd = fopen(path, "a+");
676 if (!fd) {
677 DRV_LOG(WARNING, "cannot open %s for debug dump", path);
678 MKSTR(path2, "./%s", fname);
679 fd = fopen(path2, "a+");
680 if (!fd) {
681 DRV_LOG(ERR, "cannot open %s for debug dump", path2);
682 return;
683 }
684 DRV_LOG(INFO, "New debug dump in file %s", path2);
685 } else {
686 DRV_LOG(INFO, "New debug dump in file %s", path);
687 }
688 if (hex_title)
689 rte_hexdump(fd, hex_title, buf, hex_len);
690 else
691 fprintf(fd, "%s", (const char *)buf);
692 fprintf(fd, "\n\n\n");
693 fclose(fd);
694 }
695
696 /**
697 * Move QP from error state to running state and initialize indexes.
698 *
699 * @param txq_ctrl
700 * Pointer to TX queue control structure.
701 *
702 * @return
703 * 0 on success, else -1.
704 */
705 static int
706 tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl)
707 {
708 struct mlx5_mp_arg_queue_state_modify sm = {
709 .is_wq = 0,
710 .queue_id = txq_ctrl->txq.idx,
711 };
712
713 if (mlx5_queue_state_modify(ETH_DEV(txq_ctrl->priv), &sm))
714 return -1;
715 txq_ctrl->txq.wqe_ci = 0;
716 txq_ctrl->txq.wqe_pi = 0;
717 txq_ctrl->txq.elts_comp = 0;
718 return 0;
719 }
720
721 /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */
722 static int
723 check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe)
724 {
725 static const uint8_t magic[] = "seen";
726 int ret = 1;
727 unsigned int i;
728
729 for (i = 0; i < sizeof(magic); ++i)
730 if (!ret || err_cqe->rsvd1[i] != magic[i]) {
731 ret = 0;
732 err_cqe->rsvd1[i] = magic[i];
733 }
734 return ret;
735 }
736
737 /**
738 * Handle error CQE.
739 *
740 * @param txq
741 * Pointer to TX queue structure.
742 * @param error_cqe
743 * Pointer to the error CQE.
744 *
745 * @return
746 * Negative value if queue recovery failed, otherwise
747 * the error completion entry is handled successfully.
748 */
749 static int
750 mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
751 volatile struct mlx5_err_cqe *err_cqe)
752 {
753 if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
754 const uint16_t wqe_m = ((1 << txq->wqe_n) - 1);
755 struct mlx5_txq_ctrl *txq_ctrl =
756 container_of(txq, struct mlx5_txq_ctrl, txq);
757 uint16_t new_wqe_pi = rte_be_to_cpu_16(err_cqe->wqe_counter);
758 int seen = check_err_cqe_seen(err_cqe);
759
760 if (!seen && txq_ctrl->dump_file_n <
761 txq_ctrl->priv->config.max_dump_files_num) {
762 MKSTR(err_str, "Unexpected CQE error syndrome "
763 "0x%02x CQN = %u SQN = %u wqe_counter = %u "
764 "wq_ci = %u cq_ci = %u", err_cqe->syndrome,
765 txq->cqe_s, txq->qp_num_8s >> 8,
766 rte_be_to_cpu_16(err_cqe->wqe_counter),
767 txq->wqe_ci, txq->cq_ci);
768 MKSTR(name, "dpdk_mlx5_port_%u_txq_%u_index_%u_%u",
769 PORT_ID(txq_ctrl->priv), txq->idx,
770 txq_ctrl->dump_file_n, (uint32_t)rte_rdtsc());
771 mlx5_dump_debug_information(name, NULL, err_str, 0);
772 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
773 (const void *)((uintptr_t)
774 txq->cqes),
775 sizeof(*err_cqe) *
776 (1 << txq->cqe_n));
777 mlx5_dump_debug_information(name, "MLX5 Error SQ:",
778 (const void *)((uintptr_t)
779 txq->wqes),
780 MLX5_WQE_SIZE *
781 (1 << txq->wqe_n));
782 txq_ctrl->dump_file_n++;
783 }
784 if (!seen)
785 /*
786 * Count errors in WQEs units.
787 * Later it can be improved to count error packets,
788 * for example, by SQ parsing to find how much packets
789 * should be counted for each WQE.
790 */
791 txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
792 new_wqe_pi) & wqe_m;
793 if (tx_recover_qp(txq_ctrl)) {
794 /* Recovering failed - retry later on the same WQE. */
795 return -1;
796 }
797 /* Release all the remaining buffers. */
798 txq_free_elts(txq_ctrl);
799 }
800 return 0;
801 }
802
803 /**
804 * Translate RX completion flags to packet type.
805 *
806 * @param[in] rxq
807 * Pointer to RX queue structure.
808 * @param[in] cqe
809 * Pointer to CQE.
810 *
811 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
812 *
813 * @return
814 * Packet type for struct rte_mbuf.
815 */
816 static inline uint32_t
817 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
818 {
819 uint8_t idx;
820 uint8_t pinfo = cqe->pkt_info;
821 uint16_t ptype = cqe->hdr_type_etc;
822
823 /*
824 * The index to the array should have:
825 * bit[1:0] = l3_hdr_type
826 * bit[4:2] = l4_hdr_type
827 * bit[5] = ip_frag
828 * bit[6] = tunneled
829 * bit[7] = outer_l3_type
830 */
831 idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
832 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
833 }
834
835 /**
836 * Initialize Rx WQ and indexes.
837 *
838 * @param[in] rxq
839 * Pointer to RX queue structure.
840 */
841 void
842 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
843 {
844 const unsigned int wqe_n = 1 << rxq->elts_n;
845 unsigned int i;
846
847 for (i = 0; (i != wqe_n); ++i) {
848 volatile struct mlx5_wqe_data_seg *scat;
849 uintptr_t addr;
850 uint32_t byte_count;
851
852 if (mlx5_rxq_mprq_enabled(rxq)) {
853 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
854
855 scat = &((volatile struct mlx5_wqe_mprq *)
856 rxq->wqes)[i].dseg;
857 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
858 1 << rxq->strd_num_n);
859 byte_count = (1 << rxq->strd_sz_n) *
860 (1 << rxq->strd_num_n);
861 } else {
862 struct rte_mbuf *buf = (*rxq->elts)[i];
863
864 scat = &((volatile struct mlx5_wqe_data_seg *)
865 rxq->wqes)[i];
866 addr = rte_pktmbuf_mtod(buf, uintptr_t);
867 byte_count = DATA_LEN(buf);
868 }
869 /* scat->addr must be able to store a pointer. */
870 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
871 *scat = (struct mlx5_wqe_data_seg){
872 .addr = rte_cpu_to_be_64(addr),
873 .byte_count = rte_cpu_to_be_32(byte_count),
874 .lkey = mlx5_rx_addr2mr(rxq, addr),
875 };
876 }
877 rxq->consumed_strd = 0;
878 rxq->decompressed = 0;
879 rxq->rq_pi = 0;
880 rxq->zip = (struct rxq_zip){
881 .ai = 0,
882 };
883 /* Update doorbell counter. */
884 rxq->rq_ci = wqe_n >> rxq->sges_n;
885 rte_cio_wmb();
886 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
887 }
888
889 /**
890 * Modify a Verbs/DevX queue state.
891 * This must be called from the primary process.
892 *
893 * @param dev
894 * Pointer to Ethernet device.
895 * @param sm
896 * State modify request parameters.
897 *
898 * @return
899 * 0 in case of success else non-zero value and rte_errno is set.
900 */
901 int
902 mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
903 const struct mlx5_mp_arg_queue_state_modify *sm)
904 {
905 int ret;
906 struct mlx5_priv *priv = dev->data->dev_private;
907
908 if (sm->is_wq) {
909 struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
910 struct mlx5_rxq_ctrl *rxq_ctrl =
911 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
912
913 if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
914 struct ibv_wq_attr mod = {
915 .attr_mask = IBV_WQ_ATTR_STATE,
916 .wq_state = sm->state,
917 };
918
919 ret = mlx5_glue->modify_wq(rxq_ctrl->obj->wq, &mod);
920 } else { /* rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ. */
921 struct mlx5_devx_modify_rq_attr rq_attr;
922
923 memset(&rq_attr, 0, sizeof(rq_attr));
924 if (sm->state == IBV_WQS_RESET) {
925 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
926 rq_attr.state = MLX5_RQC_STATE_RST;
927 } else if (sm->state == IBV_WQS_RDY) {
928 rq_attr.rq_state = MLX5_RQC_STATE_RST;
929 rq_attr.state = MLX5_RQC_STATE_RDY;
930 } else if (sm->state == IBV_WQS_ERR) {
931 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
932 rq_attr.state = MLX5_RQC_STATE_ERR;
933 }
934 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq,
935 &rq_attr);
936 }
937 if (ret) {
938 DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
939 sm->state, strerror(errno));
940 rte_errno = errno;
941 return ret;
942 }
943 } else {
944 struct mlx5_txq_data *txq = (*priv->txqs)[sm->queue_id];
945 struct mlx5_txq_ctrl *txq_ctrl =
946 container_of(txq, struct mlx5_txq_ctrl, txq);
947 struct ibv_qp_attr mod = {
948 .qp_state = IBV_QPS_RESET,
949 .port_num = (uint8_t)priv->ibv_port,
950 };
951 struct ibv_qp *qp = txq_ctrl->obj->qp;
952
953 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
954 if (ret) {
955 DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
956 "%s", strerror(errno));
957 rte_errno = errno;
958 return ret;
959 }
960 mod.qp_state = IBV_QPS_INIT;
961 ret = mlx5_glue->modify_qp(qp, &mod,
962 (IBV_QP_STATE | IBV_QP_PORT));
963 if (ret) {
964 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
965 strerror(errno));
966 rte_errno = errno;
967 return ret;
968 }
969 mod.qp_state = IBV_QPS_RTR;
970 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
971 if (ret) {
972 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
973 strerror(errno));
974 rte_errno = errno;
975 return ret;
976 }
977 mod.qp_state = IBV_QPS_RTS;
978 ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
979 if (ret) {
980 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
981 strerror(errno));
982 rte_errno = errno;
983 return ret;
984 }
985 }
986 return 0;
987 }
988
989 /**
990 * Modify a Verbs queue state.
991 *
992 * @param dev
993 * Pointer to Ethernet device.
994 * @param sm
995 * State modify request parameters.
996 *
997 * @return
998 * 0 in case of success else non-zero value.
999 */
1000 static int
1001 mlx5_queue_state_modify(struct rte_eth_dev *dev,
1002 struct mlx5_mp_arg_queue_state_modify *sm)
1003 {
1004 struct mlx5_priv *priv = dev->data->dev_private;
1005 int ret = 0;
1006
1007 switch (rte_eal_process_type()) {
1008 case RTE_PROC_PRIMARY:
1009 ret = mlx5_queue_state_modify_primary(dev, sm);
1010 break;
1011 case RTE_PROC_SECONDARY:
1012 ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
1013 break;
1014 default:
1015 break;
1016 }
1017 return ret;
1018 }
1019
1020 /**
1021 * Handle a Rx error.
1022 * The function inserts the RQ state to reset when the first error CQE is
1023 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
1024 * it moves the RQ state to ready and initializes the RQ.
1025 * Next CQE identification and error counting are in the caller responsibility.
1026 *
1027 * @param[in] rxq
1028 * Pointer to RX queue structure.
1029 * @param[in] vec
1030 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
1031 * 0 when called from non-vectorized Rx burst.
1032 *
1033 * @return
1034 * -1 in case of recovery error, otherwise the CQE status.
1035 */
1036 int
1037 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
1038 {
1039 const uint16_t cqe_n = 1 << rxq->cqe_n;
1040 const uint16_t cqe_mask = cqe_n - 1;
1041 const unsigned int wqe_n = 1 << rxq->elts_n;
1042 struct mlx5_rxq_ctrl *rxq_ctrl =
1043 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
1044 union {
1045 volatile struct mlx5_cqe *cqe;
1046 volatile struct mlx5_err_cqe *err_cqe;
1047 } u = {
1048 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
1049 };
1050 struct mlx5_mp_arg_queue_state_modify sm;
1051 int ret;
1052
1053 switch (rxq->err_state) {
1054 case MLX5_RXQ_ERR_STATE_NO_ERROR:
1055 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
1056 /* Fall-through */
1057 case MLX5_RXQ_ERR_STATE_NEED_RESET:
1058 sm.is_wq = 1;
1059 sm.queue_id = rxq->idx;
1060 sm.state = IBV_WQS_RESET;
1061 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
1062 return -1;
1063 if (rxq_ctrl->dump_file_n <
1064 rxq_ctrl->priv->config.max_dump_files_num) {
1065 MKSTR(err_str, "Unexpected CQE error syndrome "
1066 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
1067 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
1068 rxq->cqn, rxq_ctrl->wqn,
1069 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
1070 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
1071 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
1072 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
1073 mlx5_dump_debug_information(name, NULL, err_str, 0);
1074 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
1075 (const void *)((uintptr_t)
1076 rxq->cqes),
1077 sizeof(*u.cqe) * cqe_n);
1078 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
1079 (const void *)((uintptr_t)
1080 rxq->wqes),
1081 16 * wqe_n);
1082 rxq_ctrl->dump_file_n++;
1083 }
1084 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
1085 /* Fall-through */
1086 case MLX5_RXQ_ERR_STATE_NEED_READY:
1087 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
1088 if (ret == MLX5_CQE_STATUS_HW_OWN) {
1089 rte_cio_wmb();
1090 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1091 rte_cio_wmb();
1092 /*
1093 * The RQ consumer index must be zeroed while moving
1094 * from RESET state to RDY state.
1095 */
1096 *rxq->rq_db = rte_cpu_to_be_32(0);
1097 rte_cio_wmb();
1098 sm.is_wq = 1;
1099 sm.queue_id = rxq->idx;
1100 sm.state = IBV_WQS_RDY;
1101 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
1102 &sm))
1103 return -1;
1104 if (vec) {
1105 const uint16_t q_mask = wqe_n - 1;
1106 uint16_t elt_idx;
1107 struct rte_mbuf **elt;
1108 int i;
1109 unsigned int n = wqe_n - (rxq->rq_ci -
1110 rxq->rq_pi);
1111
1112 for (i = 0; i < (int)n; ++i) {
1113 elt_idx = (rxq->rq_ci + i) & q_mask;
1114 elt = &(*rxq->elts)[elt_idx];
1115 *elt = rte_mbuf_raw_alloc(rxq->mp);
1116 if (!*elt) {
1117 for (i--; i >= 0; --i) {
1118 elt_idx = (rxq->rq_ci +
1119 i) & q_mask;
1120 elt = &(*rxq->elts)
1121 [elt_idx];
1122 rte_pktmbuf_free_seg
1123 (*elt);
1124 }
1125 return -1;
1126 }
1127 }
1128 for (i = 0; i < (int)wqe_n; ++i) {
1129 elt = &(*rxq->elts)[i];
1130 DATA_LEN(*elt) =
1131 (uint16_t)((*elt)->buf_len -
1132 rte_pktmbuf_headroom(*elt));
1133 }
1134 /* Padding with a fake mbuf for vec Rx. */
1135 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
1136 (*rxq->elts)[wqe_n + i] =
1137 &rxq->fake_mbuf;
1138 }
1139 mlx5_rxq_initialize(rxq);
1140 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
1141 }
1142 return ret;
1143 default:
1144 return -1;
1145 }
1146 }
1147
1148 /**
1149 * Get size of the next packet for a given CQE. For compressed CQEs, the
1150 * consumer index is updated only once all packets of the current one have
1151 * been processed.
1152 *
1153 * @param rxq
1154 * Pointer to RX queue.
1155 * @param cqe
1156 * CQE to process.
1157 * @param[out] mcqe
1158 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
1159 * written.
1160 *
1161 * @return
1162 * 0 in case of empty CQE, otherwise the packet size in bytes.
1163 */
1164 static inline int
1165 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1166 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
1167 {
1168 struct rxq_zip *zip = &rxq->zip;
1169 uint16_t cqe_n = cqe_cnt + 1;
1170 int len;
1171 uint16_t idx, end;
1172
1173 do {
1174 len = 0;
1175 /* Process compressed data in the CQE and mini arrays. */
1176 if (zip->ai) {
1177 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1178 (volatile struct mlx5_mini_cqe8 (*)[8])
1179 (uintptr_t)(&(*rxq->cqes)[zip->ca &
1180 cqe_cnt].pkt_info);
1181
1182 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1183 *mcqe = &(*mc)[zip->ai & 7];
1184 if ((++zip->ai & 7) == 0) {
1185 /* Invalidate consumed CQEs */
1186 idx = zip->ca;
1187 end = zip->na;
1188 while (idx != end) {
1189 (*rxq->cqes)[idx & cqe_cnt].op_own =
1190 MLX5_CQE_INVALIDATE;
1191 ++idx;
1192 }
1193 /*
1194 * Increment consumer index to skip the number
1195 * of CQEs consumed. Hardware leaves holes in
1196 * the CQ ring for software use.
1197 */
1198 zip->ca = zip->na;
1199 zip->na += 8;
1200 }
1201 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1202 /* Invalidate the rest */
1203 idx = zip->ca;
1204 end = zip->cq_ci;
1205
1206 while (idx != end) {
1207 (*rxq->cqes)[idx & cqe_cnt].op_own =
1208 MLX5_CQE_INVALIDATE;
1209 ++idx;
1210 }
1211 rxq->cq_ci = zip->cq_ci;
1212 zip->ai = 0;
1213 }
1214 /*
1215 * No compressed data, get next CQE and verify if it is
1216 * compressed.
1217 */
1218 } else {
1219 int ret;
1220 int8_t op_own;
1221
1222 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1223 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
1224 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
1225 rxq->err_state)) {
1226 ret = mlx5_rx_err_handle(rxq, 0);
1227 if (ret == MLX5_CQE_STATUS_HW_OWN ||
1228 ret == -1)
1229 return 0;
1230 } else {
1231 return 0;
1232 }
1233 }
1234 ++rxq->cq_ci;
1235 op_own = cqe->op_own;
1236 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1237 volatile struct mlx5_mini_cqe8 (*mc)[8] =
1238 (volatile struct mlx5_mini_cqe8 (*)[8])
1239 (uintptr_t)(&(*rxq->cqes)
1240 [rxq->cq_ci &
1241 cqe_cnt].pkt_info);
1242
1243 /* Fix endianness. */
1244 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1245 /*
1246 * Current mini array position is the one
1247 * returned by check_cqe64().
1248 *
1249 * If completion comprises several mini arrays,
1250 * as a special case the second one is located
1251 * 7 CQEs after the initial CQE instead of 8
1252 * for subsequent ones.
1253 */
1254 zip->ca = rxq->cq_ci;
1255 zip->na = zip->ca + 7;
1256 /* Compute the next non compressed CQE. */
1257 --rxq->cq_ci;
1258 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1259 /* Get packet size to return. */
1260 len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1261 *mcqe = &(*mc)[0];
1262 zip->ai = 1;
1263 /* Prefetch all to be invalidated */
1264 idx = zip->ca;
1265 end = zip->cq_ci;
1266 while (idx != end) {
1267 rte_prefetch0(&(*rxq->cqes)[(idx) &
1268 cqe_cnt]);
1269 ++idx;
1270 }
1271 } else {
1272 len = rte_be_to_cpu_32(cqe->byte_cnt);
1273 }
1274 }
1275 if (unlikely(rxq->err_state)) {
1276 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1277 ++rxq->stats.idropped;
1278 } else {
1279 return len;
1280 }
1281 } while (1);
1282 }
1283
1284 /**
1285 * Translate RX completion flags to offload flags.
1286 *
1287 * @param[in] cqe
1288 * Pointer to CQE.
1289 *
1290 * @return
1291 * Offload flags (ol_flags) for struct rte_mbuf.
1292 */
1293 static inline uint32_t
1294 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
1295 {
1296 uint32_t ol_flags = 0;
1297 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1298
1299 ol_flags =
1300 TRANSPOSE(flags,
1301 MLX5_CQE_RX_L3_HDR_VALID,
1302 PKT_RX_IP_CKSUM_GOOD) |
1303 TRANSPOSE(flags,
1304 MLX5_CQE_RX_L4_HDR_VALID,
1305 PKT_RX_L4_CKSUM_GOOD);
1306 return ol_flags;
1307 }
1308
1309 /**
1310 * Fill in mbuf fields from RX completion flags.
1311 * Note that pkt->ol_flags should be initialized outside of this function.
1312 *
1313 * @param rxq
1314 * Pointer to RX queue.
1315 * @param pkt
1316 * mbuf to fill.
1317 * @param cqe
1318 * CQE to process.
1319 * @param rss_hash_res
1320 * Packet RSS Hash result.
1321 */
1322 static inline void
1323 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
1324 volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
1325 {
1326 /* Update packet information. */
1327 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
1328 if (rss_hash_res && rxq->rss_hash) {
1329 pkt->hash.rss = rss_hash_res;
1330 pkt->ol_flags |= PKT_RX_RSS_HASH;
1331 }
1332 if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1333 pkt->ol_flags |= PKT_RX_FDIR;
1334 if (cqe->sop_drop_qpn !=
1335 rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1336 uint32_t mark = cqe->sop_drop_qpn;
1337
1338 pkt->ol_flags |= PKT_RX_FDIR_ID;
1339 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
1340 }
1341 }
1342 if (rxq->dynf_meta && cqe->flow_table_metadata) {
1343 pkt->ol_flags |= rxq->flow_meta_mask;
1344 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
1345 cqe->flow_table_metadata;
1346 }
1347 if (rxq->csum)
1348 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
1349 if (rxq->vlan_strip &&
1350 (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1351 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1352 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
1353 }
1354 if (rxq->hw_timestamp) {
1355 pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
1356 pkt->ol_flags |= PKT_RX_TIMESTAMP;
1357 }
1358 }
1359
1360 /**
1361 * DPDK callback for RX.
1362 *
1363 * @param dpdk_rxq
1364 * Generic pointer to RX queue structure.
1365 * @param[out] pkts
1366 * Array to store received packets.
1367 * @param pkts_n
1368 * Maximum number of packets in array.
1369 *
1370 * @return
1371 * Number of packets successfully received (<= pkts_n).
1372 */
1373 uint16_t
1374 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1375 {
1376 struct mlx5_rxq_data *rxq = dpdk_rxq;
1377 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1378 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1379 const unsigned int sges_n = rxq->sges_n;
1380 struct rte_mbuf *pkt = NULL;
1381 struct rte_mbuf *seg = NULL;
1382 volatile struct mlx5_cqe *cqe =
1383 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1384 unsigned int i = 0;
1385 unsigned int rq_ci = rxq->rq_ci << sges_n;
1386 int len = 0; /* keep its value across iterations. */
1387
1388 while (pkts_n) {
1389 unsigned int idx = rq_ci & wqe_cnt;
1390 volatile struct mlx5_wqe_data_seg *wqe =
1391 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
1392 struct rte_mbuf *rep = (*rxq->elts)[idx];
1393 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1394 uint32_t rss_hash_res;
1395
1396 if (pkt)
1397 NEXT(seg) = rep;
1398 seg = rep;
1399 rte_prefetch0(seg);
1400 rte_prefetch0(cqe);
1401 rte_prefetch0(wqe);
1402 rep = rte_mbuf_raw_alloc(rxq->mp);
1403 if (unlikely(rep == NULL)) {
1404 ++rxq->stats.rx_nombuf;
1405 if (!pkt) {
1406 /*
1407 * no buffers before we even started,
1408 * bail out silently.
1409 */
1410 break;
1411 }
1412 while (pkt != seg) {
1413 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
1414 rep = NEXT(pkt);
1415 NEXT(pkt) = NULL;
1416 NB_SEGS(pkt) = 1;
1417 rte_mbuf_raw_free(pkt);
1418 pkt = rep;
1419 }
1420 break;
1421 }
1422 if (!pkt) {
1423 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1424 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
1425 if (!len) {
1426 rte_mbuf_raw_free(rep);
1427 break;
1428 }
1429 pkt = seg;
1430 MLX5_ASSERT(len >= (rxq->crc_present << 2));
1431 pkt->ol_flags &= EXT_ATTACHED_MBUF;
1432 /* If compressed, take hash result from mini-CQE. */
1433 rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
1434 cqe->rx_hash_res :
1435 mcqe->rx_hash_result);
1436 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1437 if (rxq->crc_present)
1438 len -= RTE_ETHER_CRC_LEN;
1439 PKT_LEN(pkt) = len;
1440 if (cqe->lro_num_seg > 1) {
1441 mlx5_lro_update_hdr
1442 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
1443 len);
1444 pkt->ol_flags |= PKT_RX_LRO;
1445 pkt->tso_segsz = len / cqe->lro_num_seg;
1446 }
1447 }
1448 DATA_LEN(rep) = DATA_LEN(seg);
1449 PKT_LEN(rep) = PKT_LEN(seg);
1450 SET_DATA_OFF(rep, DATA_OFF(seg));
1451 PORT(rep) = PORT(seg);
1452 (*rxq->elts)[idx] = rep;
1453 /*
1454 * Fill NIC descriptor with the new buffer. The lkey and size
1455 * of the buffers are already known, only the buffer address
1456 * changes.
1457 */
1458 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1459 /* If there's only one MR, no need to replace LKey in WQE. */
1460 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1461 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
1462 if (len > DATA_LEN(seg)) {
1463 len -= DATA_LEN(seg);
1464 ++NB_SEGS(pkt);
1465 ++rq_ci;
1466 continue;
1467 }
1468 DATA_LEN(seg) = len;
1469 #ifdef MLX5_PMD_SOFT_COUNTERS
1470 /* Increment bytes counter. */
1471 rxq->stats.ibytes += PKT_LEN(pkt);
1472 #endif
1473 /* Return packet. */
1474 *(pkts++) = pkt;
1475 pkt = NULL;
1476 --pkts_n;
1477 ++i;
1478 /* Align consumer index to the next stride. */
1479 rq_ci >>= sges_n;
1480 ++rq_ci;
1481 rq_ci <<= sges_n;
1482 }
1483 if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1484 return 0;
1485 /* Update the consumer index. */
1486 rxq->rq_ci = rq_ci >> sges_n;
1487 rte_cio_wmb();
1488 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1489 rte_cio_wmb();
1490 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1491 #ifdef MLX5_PMD_SOFT_COUNTERS
1492 /* Increment packets counter. */
1493 rxq->stats.ipackets += i;
1494 #endif
1495 return i;
1496 }
1497
1498 /**
1499 * Update LRO packet TCP header.
1500 * The HW LRO feature doesn't update the TCP header after coalescing the
1501 * TCP segments but supplies information in CQE to fill it by SW.
1502 *
1503 * @param tcp
1504 * Pointer to the TCP header.
1505 * @param cqe
1506 * Pointer to the completion entry..
1507 * @param phcsum
1508 * The L3 pseudo-header checksum.
1509 */
1510 static inline void
1511 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
1512 volatile struct mlx5_cqe *restrict cqe,
1513 uint32_t phcsum)
1514 {
1515 uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1516 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1517 /*
1518 * The HW calculates only the TCP payload checksum, need to complete
1519 * the TCP header checksum and the L3 pseudo-header checksum.
1520 */
1521 uint32_t csum = phcsum + cqe->csum;
1522
1523 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
1524 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
1525 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
1526 tcp->recv_ack = cqe->lro_ack_seq_num;
1527 tcp->rx_win = cqe->lro_tcp_win;
1528 }
1529 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
1530 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
1531 tcp->cksum = 0;
1532 csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
1533 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
1534 csum = (~csum) & 0xffff;
1535 if (csum == 0)
1536 csum = 0xffff;
1537 tcp->cksum = csum;
1538 }
1539
1540 /**
1541 * Update LRO packet headers.
1542 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
1543 * TCP segments but supply information in CQE to fill it by SW.
1544 *
1545 * @param padd
1546 * The packet address.
1547 * @param cqe
1548 * Pointer to the completion entry..
1549 * @param len
1550 * The packet length.
1551 */
1552 static inline void
1553 mlx5_lro_update_hdr(uint8_t *restrict padd,
1554 volatile struct mlx5_cqe *restrict cqe,
1555 uint32_t len)
1556 {
1557 union {
1558 struct rte_ether_hdr *eth;
1559 struct rte_vlan_hdr *vlan;
1560 struct rte_ipv4_hdr *ipv4;
1561 struct rte_ipv6_hdr *ipv6;
1562 struct rte_tcp_hdr *tcp;
1563 uint8_t *hdr;
1564 } h = {
1565 .hdr = padd,
1566 };
1567 uint16_t proto = h.eth->ether_type;
1568 uint32_t phcsum;
1569
1570 h.eth++;
1571 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1572 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1573 proto = h.vlan->eth_proto;
1574 h.vlan++;
1575 }
1576 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1577 h.ipv4->time_to_live = cqe->lro_min_ttl;
1578 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1579 h.ipv4->hdr_checksum = 0;
1580 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1581 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1582 h.ipv4++;
1583 } else {
1584 h.ipv6->hop_limits = cqe->lro_min_ttl;
1585 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1586 sizeof(*h.ipv6));
1587 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1588 h.ipv6++;
1589 }
1590 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum);
1591 }
1592
1593 void
1594 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1595 {
1596 struct mlx5_mprq_buf *buf = opaque;
1597
1598 if (rte_atomic16_read(&buf->refcnt) == 1) {
1599 rte_mempool_put(buf->mp, buf);
1600 } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
1601 rte_atomic16_set(&buf->refcnt, 1);
1602 rte_mempool_put(buf->mp, buf);
1603 }
1604 }
1605
1606 void
1607 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1608 {
1609 mlx5_mprq_buf_free_cb(NULL, buf);
1610 }
1611
1612 static inline void
1613 mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx,
1614 const unsigned int strd_n)
1615 {
1616 struct mlx5_mprq_buf *rep = rxq->mprq_repl;
1617 volatile struct mlx5_wqe_data_seg *wqe =
1618 &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
1619 void *addr;
1620
1621 MLX5_ASSERT(rep != NULL);
1622 /* Replace MPRQ buf. */
1623 (*rxq->mprq_bufs)[rq_idx] = rep;
1624 /* Replace WQE. */
1625 addr = mlx5_mprq_buf_addr(rep, strd_n);
1626 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
1627 /* If there's only one MR, no need to replace LKey in WQE. */
1628 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
1629 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
1630 /* Stash a mbuf for next replacement. */
1631 if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
1632 rxq->mprq_repl = rep;
1633 else
1634 rxq->mprq_repl = NULL;
1635 }
1636
1637 /**
1638 * DPDK callback for RX with Multi-Packet RQ support.
1639 *
1640 * @param dpdk_rxq
1641 * Generic pointer to RX queue structure.
1642 * @param[out] pkts
1643 * Array to store received packets.
1644 * @param pkts_n
1645 * Maximum number of packets in array.
1646 *
1647 * @return
1648 * Number of packets successfully received (<= pkts_n).
1649 */
1650 uint16_t
1651 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1652 {
1653 struct mlx5_rxq_data *rxq = dpdk_rxq;
1654 const unsigned int strd_n = 1 << rxq->strd_num_n;
1655 const unsigned int strd_sz = 1 << rxq->strd_sz_n;
1656 const unsigned int strd_shift =
1657 MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
1658 const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
1659 const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
1660 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1661 unsigned int i = 0;
1662 uint32_t rq_ci = rxq->rq_ci;
1663 uint16_t consumed_strd = rxq->consumed_strd;
1664 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1665
1666 while (i < pkts_n) {
1667 struct rte_mbuf *pkt;
1668 void *addr;
1669 int ret;
1670 uint32_t len;
1671 uint16_t strd_cnt;
1672 uint16_t strd_idx;
1673 uint32_t offset;
1674 uint32_t byte_cnt;
1675 int32_t hdrm_overlap;
1676 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1677 uint32_t rss_hash_res = 0;
1678
1679 if (consumed_strd == strd_n) {
1680 /* Replace WQE only if the buffer is still in use. */
1681 if (rte_atomic16_read(&buf->refcnt) > 1) {
1682 mprq_buf_replace(rxq, rq_ci & wq_mask, strd_n);
1683 /* Release the old buffer. */
1684 mlx5_mprq_buf_free(buf);
1685 } else if (unlikely(rxq->mprq_repl == NULL)) {
1686 struct mlx5_mprq_buf *rep;
1687
1688 /*
1689 * Currently, the MPRQ mempool is out of buffer
1690 * and doing memcpy regardless of the size of Rx
1691 * packet. Retry allocation to get back to
1692 * normal.
1693 */
1694 if (!rte_mempool_get(rxq->mprq_mp,
1695 (void **)&rep))
1696 rxq->mprq_repl = rep;
1697 }
1698 /* Advance to the next WQE. */
1699 consumed_strd = 0;
1700 ++rq_ci;
1701 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1702 }
1703 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1704 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1705 if (!ret)
1706 break;
1707 byte_cnt = ret;
1708 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1709 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1710 MLX5_ASSERT(strd_cnt);
1711 consumed_strd += strd_cnt;
1712 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1713 continue;
1714 if (mcqe == NULL) {
1715 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
1716 strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
1717 } else {
1718 /* mini-CQE for MPRQ doesn't have hash result. */
1719 strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
1720 }
1721 MLX5_ASSERT(strd_idx < strd_n);
1722 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1723 wq_mask));
1724 pkt = rte_pktmbuf_alloc(rxq->mp);
1725 if (unlikely(pkt == NULL)) {
1726 ++rxq->stats.rx_nombuf;
1727 break;
1728 }
1729 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1730 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1731 if (rxq->crc_present)
1732 len -= RTE_ETHER_CRC_LEN;
1733 offset = strd_idx * strd_sz + strd_shift;
1734 addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
1735 hdrm_overlap = len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
1736 /*
1737 * Memcpy packets to the target mbuf if:
1738 * - The size of packet is smaller than mprq_max_memcpy_len.
1739 * - Out of buffer in the Mempool for Multi-Packet RQ.
1740 * - The packet's stride overlaps a headroom and scatter is off.
1741 */
1742 if (len <= rxq->mprq_max_memcpy_len ||
1743 rxq->mprq_repl == NULL ||
1744 (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
1745 if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
1746 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1747 addr, len);
1748 DATA_LEN(pkt) = len;
1749 } else if (rxq->strd_scatter_en) {
1750 struct rte_mbuf *prev = pkt;
1751 uint32_t seg_len =
1752 RTE_MIN(rte_pktmbuf_tailroom(pkt), len);
1753 uint32_t rem_len = len - seg_len;
1754
1755 rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
1756 addr, seg_len);
1757 DATA_LEN(pkt) = seg_len;
1758 while (rem_len) {
1759 struct rte_mbuf *next =
1760 rte_pktmbuf_alloc(rxq->mp);
1761
1762 if (unlikely(next == NULL)) {
1763 rte_pktmbuf_free(pkt);
1764 ++rxq->stats.rx_nombuf;
1765 goto out;
1766 }
1767 NEXT(prev) = next;
1768 SET_DATA_OFF(next, 0);
1769 addr = RTE_PTR_ADD(addr, seg_len);
1770 seg_len = RTE_MIN
1771 (rte_pktmbuf_tailroom(next),
1772 rem_len);
1773 rte_memcpy
1774 (rte_pktmbuf_mtod(next, void *),
1775 addr, seg_len);
1776 DATA_LEN(next) = seg_len;
1777 rem_len -= seg_len;
1778 prev = next;
1779 ++NB_SEGS(pkt);
1780 }
1781 } else {
1782 rte_pktmbuf_free_seg(pkt);
1783 ++rxq->stats.idropped;
1784 continue;
1785 }
1786 } else {
1787 rte_iova_t buf_iova;
1788 struct rte_mbuf_ext_shared_info *shinfo;
1789 uint16_t buf_len = strd_cnt * strd_sz;
1790 void *buf_addr;
1791
1792 /* Increment the refcnt of the whole chunk. */
1793 rte_atomic16_add_return(&buf->refcnt, 1);
1794 MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
1795 strd_n + 1);
1796 buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
1797 /*
1798 * MLX5 device doesn't use iova but it is necessary in a
1799 * case where the Rx packet is transmitted via a
1800 * different PMD.
1801 */
1802 buf_iova = rte_mempool_virt2iova(buf) +
1803 RTE_PTR_DIFF(buf_addr, buf);
1804 shinfo = &buf->shinfos[strd_idx];
1805 rte_mbuf_ext_refcnt_set(shinfo, 1);
1806 /*
1807 * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
1808 * attaching the stride to mbuf and more offload flags
1809 * will be added below by calling rxq_cq_to_mbuf().
1810 * Other fields will be overwritten.
1811 */
1812 rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
1813 buf_len, shinfo);
1814 /* Set mbuf head-room. */
1815 SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
1816 MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
1817 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
1818 len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
1819 DATA_LEN(pkt) = len;
1820 /*
1821 * Copy the last fragment of a packet (up to headroom
1822 * size bytes) in case there is a stride overlap with
1823 * a next packet's headroom. Allocate a separate mbuf
1824 * to store this fragment and link it. Scatter is on.
1825 */
1826 if (hdrm_overlap > 0) {
1827 MLX5_ASSERT(rxq->strd_scatter_en);
1828 struct rte_mbuf *seg =
1829 rte_pktmbuf_alloc(rxq->mp);
1830
1831 if (unlikely(seg == NULL)) {
1832 rte_pktmbuf_free_seg(pkt);
1833 ++rxq->stats.rx_nombuf;
1834 break;
1835 }
1836 SET_DATA_OFF(seg, 0);
1837 rte_memcpy(rte_pktmbuf_mtod(seg, void *),
1838 RTE_PTR_ADD(addr, len - hdrm_overlap),
1839 hdrm_overlap);
1840 DATA_LEN(seg) = hdrm_overlap;
1841 DATA_LEN(pkt) = len - hdrm_overlap;
1842 NEXT(pkt) = seg;
1843 NB_SEGS(pkt) = 2;
1844 }
1845 }
1846 rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
1847 if (cqe->lro_num_seg > 1) {
1848 mlx5_lro_update_hdr(addr, cqe, len);
1849 pkt->ol_flags |= PKT_RX_LRO;
1850 pkt->tso_segsz = len / cqe->lro_num_seg;
1851 }
1852 PKT_LEN(pkt) = len;
1853 PORT(pkt) = rxq->port_id;
1854 #ifdef MLX5_PMD_SOFT_COUNTERS
1855 /* Increment bytes counter. */
1856 rxq->stats.ibytes += PKT_LEN(pkt);
1857 #endif
1858 /* Return packet. */
1859 *(pkts++) = pkt;
1860 ++i;
1861 }
1862 out:
1863 /* Update the consumer indexes. */
1864 rxq->consumed_strd = consumed_strd;
1865 rte_cio_wmb();
1866 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1867 if (rq_ci != rxq->rq_ci) {
1868 rxq->rq_ci = rq_ci;
1869 rte_cio_wmb();
1870 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1871 }
1872 #ifdef MLX5_PMD_SOFT_COUNTERS
1873 /* Increment packets counter. */
1874 rxq->stats.ipackets += i;
1875 #endif
1876 return i;
1877 }
1878
1879 /**
1880 * Dummy DPDK callback for TX.
1881 *
1882 * This function is used to temporarily replace the real callback during
1883 * unsafe control operations on the queue, or in case of error.
1884 *
1885 * @param dpdk_txq
1886 * Generic pointer to TX queue structure.
1887 * @param[in] pkts
1888 * Packets to transmit.
1889 * @param pkts_n
1890 * Number of packets in array.
1891 *
1892 * @return
1893 * Number of packets successfully transmitted (<= pkts_n).
1894 */
1895 uint16_t
1896 removed_tx_burst(void *dpdk_txq __rte_unused,
1897 struct rte_mbuf **pkts __rte_unused,
1898 uint16_t pkts_n __rte_unused)
1899 {
1900 rte_mb();
1901 return 0;
1902 }
1903
1904 /**
1905 * Dummy DPDK callback for RX.
1906 *
1907 * This function is used to temporarily replace the real callback during
1908 * unsafe control operations on the queue, or in case of error.
1909 *
1910 * @param dpdk_rxq
1911 * Generic pointer to RX queue structure.
1912 * @param[out] pkts
1913 * Array to store received packets.
1914 * @param pkts_n
1915 * Maximum number of packets in array.
1916 *
1917 * @return
1918 * Number of packets successfully received (<= pkts_n).
1919 */
1920 uint16_t
1921 removed_rx_burst(void *dpdk_txq __rte_unused,
1922 struct rte_mbuf **pkts __rte_unused,
1923 uint16_t pkts_n __rte_unused)
1924 {
1925 rte_mb();
1926 return 0;
1927 }
1928
1929 /*
1930 * Vectorized Rx/Tx routines are not compiled in when required vector
1931 * instructions are not supported on a target architecture. The following null
1932 * stubs are needed for linkage when those are not included outside of this file
1933 * (e.g. mlx5_rxtx_vec_sse.c for x86).
1934 */
1935
1936 __rte_weak uint16_t
1937 mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
1938 struct rte_mbuf **pkts __rte_unused,
1939 uint16_t pkts_n __rte_unused)
1940 {
1941 return 0;
1942 }
1943
1944 __rte_weak int
1945 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1946 {
1947 return -ENOTSUP;
1948 }
1949
1950 __rte_weak int
1951 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1952 {
1953 return -ENOTSUP;
1954 }
1955
1956 /**
1957 * Free the mbufs from the linear array of pointers.
1958 *
1959 * @param pkts
1960 * Pointer to array of packets to be free.
1961 * @param pkts_n
1962 * Number of packets to be freed.
1963 * @param olx
1964 * Configured Tx offloads mask. It is fully defined at
1965 * compile time and may be used for optimization.
1966 */
1967 static __rte_always_inline void
1968 mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
1969 unsigned int pkts_n,
1970 unsigned int olx __rte_unused)
1971 {
1972 struct rte_mempool *pool = NULL;
1973 struct rte_mbuf **p_free = NULL;
1974 struct rte_mbuf *mbuf;
1975 unsigned int n_free = 0;
1976
1977 /*
1978 * The implemented algorithm eliminates
1979 * copying pointers to temporary array
1980 * for rte_mempool_put_bulk() calls.
1981 */
1982 MLX5_ASSERT(pkts);
1983 MLX5_ASSERT(pkts_n);
1984 for (;;) {
1985 for (;;) {
1986 /*
1987 * Decrement mbuf reference counter, detach
1988 * indirect and external buffers if needed.
1989 */
1990 mbuf = rte_pktmbuf_prefree_seg(*pkts);
1991 if (likely(mbuf != NULL)) {
1992 MLX5_ASSERT(mbuf == *pkts);
1993 if (likely(n_free != 0)) {
1994 if (unlikely(pool != mbuf->pool))
1995 /* From different pool. */
1996 break;
1997 } else {
1998 /* Start new scan array. */
1999 pool = mbuf->pool;
2000 p_free = pkts;
2001 }
2002 ++n_free;
2003 ++pkts;
2004 --pkts_n;
2005 if (unlikely(pkts_n == 0)) {
2006 mbuf = NULL;
2007 break;
2008 }
2009 } else {
2010 /*
2011 * This happens if mbuf is still referenced.
2012 * We can't put it back to the pool, skip.
2013 */
2014 ++pkts;
2015 --pkts_n;
2016 if (unlikely(n_free != 0))
2017 /* There is some array to free.*/
2018 break;
2019 if (unlikely(pkts_n == 0))
2020 /* Last mbuf, nothing to free. */
2021 return;
2022 }
2023 }
2024 for (;;) {
2025 /*
2026 * This loop is implemented to avoid multiple
2027 * inlining of rte_mempool_put_bulk().
2028 */
2029 MLX5_ASSERT(pool);
2030 MLX5_ASSERT(p_free);
2031 MLX5_ASSERT(n_free);
2032 /*
2033 * Free the array of pre-freed mbufs
2034 * belonging to the same memory pool.
2035 */
2036 rte_mempool_put_bulk(pool, (void *)p_free, n_free);
2037 if (unlikely(mbuf != NULL)) {
2038 /* There is the request to start new scan. */
2039 pool = mbuf->pool;
2040 p_free = pkts++;
2041 n_free = 1;
2042 --pkts_n;
2043 if (likely(pkts_n != 0))
2044 break;
2045 /*
2046 * This is the last mbuf to be freed.
2047 * Do one more loop iteration to complete.
2048 * This is rare case of the last unique mbuf.
2049 */
2050 mbuf = NULL;
2051 continue;
2052 }
2053 if (likely(pkts_n == 0))
2054 return;
2055 n_free = 0;
2056 break;
2057 }
2058 }
2059 }
2060
2061 /**
2062 * Free the mbuf from the elts ring buffer till new tail.
2063 *
2064 * @param txq
2065 * Pointer to Tx queue structure.
2066 * @param tail
2067 * Index in elts to free up to, becomes new elts tail.
2068 * @param olx
2069 * Configured Tx offloads mask. It is fully defined at
2070 * compile time and may be used for optimization.
2071 */
2072 static __rte_always_inline void
2073 mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
2074 uint16_t tail,
2075 unsigned int olx __rte_unused)
2076 {
2077 uint16_t n_elts = tail - txq->elts_tail;
2078
2079 MLX5_ASSERT(n_elts);
2080 MLX5_ASSERT(n_elts <= txq->elts_s);
2081 /*
2082 * Implement a loop to support ring buffer wraparound
2083 * with single inlining of mlx5_tx_free_mbuf().
2084 */
2085 do {
2086 unsigned int part;
2087
2088 part = txq->elts_s - (txq->elts_tail & txq->elts_m);
2089 part = RTE_MIN(part, n_elts);
2090 MLX5_ASSERT(part);
2091 MLX5_ASSERT(part <= txq->elts_s);
2092 mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
2093 part, olx);
2094 txq->elts_tail += part;
2095 n_elts -= part;
2096 } while (n_elts);
2097 }
2098
2099 /**
2100 * Store the mbuf being sent into elts ring buffer.
2101 * On Tx completion these mbufs will be freed.
2102 *
2103 * @param txq
2104 * Pointer to Tx queue structure.
2105 * @param pkts
2106 * Pointer to array of packets to be stored.
2107 * @param pkts_n
2108 * Number of packets to be stored.
2109 * @param olx
2110 * Configured Tx offloads mask. It is fully defined at
2111 * compile time and may be used for optimization.
2112 */
2113 static __rte_always_inline void
2114 mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
2115 struct rte_mbuf **restrict pkts,
2116 unsigned int pkts_n,
2117 unsigned int olx __rte_unused)
2118 {
2119 unsigned int part;
2120 struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
2121
2122 MLX5_ASSERT(pkts);
2123 MLX5_ASSERT(pkts_n);
2124 part = txq->elts_s - (txq->elts_head & txq->elts_m);
2125 MLX5_ASSERT(part);
2126 MLX5_ASSERT(part <= txq->elts_s);
2127 /* This code is a good candidate for vectorizing with SIMD. */
2128 rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
2129 (void *)pkts,
2130 RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
2131 txq->elts_head += pkts_n;
2132 if (unlikely(part < pkts_n))
2133 /* The copy is wrapping around the elts array. */
2134 rte_memcpy((void *)elts, (void *)(pkts + part),
2135 (pkts_n - part) * sizeof(struct rte_mbuf *));
2136 }
2137
2138 /**
2139 * Update completion queue consuming index via doorbell
2140 * and flush the completed data buffers.
2141 *
2142 * @param txq
2143 * Pointer to TX queue structure.
2144 * @param valid CQE pointer
2145 * if not NULL update txq->wqe_pi and flush the buffers
2146 * @param olx
2147 * Configured Tx offloads mask. It is fully defined at
2148 * compile time and may be used for optimization.
2149 */
2150 static __rte_always_inline void
2151 mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
2152 volatile struct mlx5_cqe *last_cqe,
2153 unsigned int olx __rte_unused)
2154 {
2155 if (likely(last_cqe != NULL)) {
2156 uint16_t tail;
2157
2158 txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
2159 tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
2160 if (likely(tail != txq->elts_tail)) {
2161 mlx5_tx_free_elts(txq, tail, olx);
2162 MLX5_ASSERT(tail == txq->elts_tail);
2163 }
2164 }
2165 }
2166
2167 /**
2168 * Manage TX completions. This routine checks the CQ for
2169 * arrived CQEs, deduces the last accomplished WQE in SQ,
2170 * updates SQ producing index and frees all completed mbufs.
2171 *
2172 * @param txq
2173 * Pointer to TX queue structure.
2174 * @param olx
2175 * Configured Tx offloads mask. It is fully defined at
2176 * compile time and may be used for optimization.
2177 *
2178 * NOTE: not inlined intentionally, it makes tx_burst
2179 * routine smaller, simple and faster - from experiments.
2180 */
2181 static void
2182 mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
2183 unsigned int olx __rte_unused)
2184 {
2185 unsigned int count = MLX5_TX_COMP_MAX_CQE;
2186 volatile struct mlx5_cqe *last_cqe = NULL;
2187 bool ring_doorbell = false;
2188 int ret;
2189
2190 static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
2191 static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
2192 do {
2193 volatile struct mlx5_cqe *cqe;
2194
2195 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
2196 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
2197 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
2198 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
2199 /* No new CQEs in completion queue. */
2200 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
2201 break;
2202 }
2203 /*
2204 * Some error occurred, try to restart.
2205 * We have no barrier after WQE related Doorbell
2206 * written, make sure all writes are completed
2207 * here, before we might perform SQ reset.
2208 */
2209 rte_wmb();
2210 ret = mlx5_tx_error_cqe_handle
2211 (txq, (volatile struct mlx5_err_cqe *)cqe);
2212 if (unlikely(ret < 0)) {
2213 /*
2214 * Some error occurred on queue error
2215 * handling, we do not advance the index
2216 * here, allowing to retry on next call.
2217 */
2218 return;
2219 }
2220 /*
2221 * We are going to fetch all entries with
2222 * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
2223 * The send queue is supposed to be empty.
2224 */
2225 ring_doorbell = true;
2226 ++txq->cq_ci;
2227 txq->cq_pi = txq->cq_ci;
2228 last_cqe = NULL;
2229 continue;
2230 }
2231 /* Normal transmit completion. */
2232 MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
2233 MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
2234 cqe->wqe_counter);
2235 ring_doorbell = true;
2236 ++txq->cq_ci;
2237 last_cqe = cqe;
2238 /*
2239 * We have to restrict the amount of processed CQEs
2240 * in one tx_burst routine call. The CQ may be large
2241 * and many CQEs may be updated by the NIC in one
2242 * transaction. Buffers freeing is time consuming,
2243 * multiple iterations may introduce significant
2244 * latency.
2245 */
2246 if (likely(--count == 0))
2247 break;
2248 } while (true);
2249 if (likely(ring_doorbell)) {
2250 /* Ring doorbell to notify hardware. */
2251 rte_compiler_barrier();
2252 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
2253 mlx5_tx_comp_flush(txq, last_cqe, olx);
2254 }
2255 }
2256
2257 /**
2258 * Check if the completion request flag should be set in the last WQE.
2259 * Both pushed mbufs and WQEs are monitored and the completion request
2260 * flag is set if any of thresholds is reached.
2261 *
2262 * @param txq
2263 * Pointer to TX queue structure.
2264 * @param loc
2265 * Pointer to burst routine local context.
2266 * @param olx
2267 * Configured Tx offloads mask. It is fully defined at
2268 * compile time and may be used for optimization.
2269 */
2270 static __rte_always_inline void
2271 mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
2272 struct mlx5_txq_local *restrict loc,
2273 unsigned int olx)
2274 {
2275 uint16_t head = txq->elts_head;
2276 unsigned int part;
2277
2278 part = MLX5_TXOFF_CONFIG(INLINE) ?
2279 0 : loc->pkts_sent - loc->pkts_copy;
2280 head += part;
2281 if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
2282 (MLX5_TXOFF_CONFIG(INLINE) &&
2283 (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
2284 volatile struct mlx5_wqe *last = loc->wqe_last;
2285
2286 MLX5_ASSERT(last);
2287 txq->elts_comp = head;
2288 if (MLX5_TXOFF_CONFIG(INLINE))
2289 txq->wqe_comp = txq->wqe_ci;
2290 /* Request unconditional completion on last WQE. */
2291 last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
2292 MLX5_COMP_MODE_OFFSET);
2293 /* Save elts_head in dedicated free on completion queue. */
2294 #ifdef RTE_LIBRTE_MLX5_DEBUG
2295 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
2296 (last->cseg.opcode >> 8) << 16;
2297 #else
2298 txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
2299 #endif
2300 /* A CQE slot must always be available. */
2301 MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
2302 }
2303 }
2304
2305 /**
2306 * DPDK callback to check the status of a tx descriptor.
2307 *
2308 * @param tx_queue
2309 * The tx queue.
2310 * @param[in] offset
2311 * The index of the descriptor in the ring.
2312 *
2313 * @return
2314 * The status of the tx descriptor.
2315 */
2316 int
2317 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
2318 {
2319 struct mlx5_txq_data *restrict txq = tx_queue;
2320 uint16_t used;
2321
2322 mlx5_tx_handle_completion(txq, 0);
2323 used = txq->elts_head - txq->elts_tail;
2324 if (offset < used)
2325 return RTE_ETH_TX_DESC_FULL;
2326 return RTE_ETH_TX_DESC_DONE;
2327 }
2328
2329 /**
2330 * Build the Control Segment with specified opcode:
2331 * - MLX5_OPCODE_SEND
2332 * - MLX5_OPCODE_ENHANCED_MPSW
2333 * - MLX5_OPCODE_TSO
2334 *
2335 * @param txq
2336 * Pointer to TX queue structure.
2337 * @param loc
2338 * Pointer to burst routine local context.
2339 * @param wqe
2340 * Pointer to WQE to fill with built Control Segment.
2341 * @param ds
2342 * Supposed length of WQE in segments.
2343 * @param opcode
2344 * SQ WQE opcode to put into Control Segment.
2345 * @param olx
2346 * Configured Tx offloads mask. It is fully defined at
2347 * compile time and may be used for optimization.
2348 */
2349 static __rte_always_inline void
2350 mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
2351 struct mlx5_txq_local *restrict loc __rte_unused,
2352 struct mlx5_wqe *restrict wqe,
2353 unsigned int ds,
2354 unsigned int opcode,
2355 unsigned int olx __rte_unused)
2356 {
2357 struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
2358
2359 /* For legacy MPW replace the EMPW by TSO with modifier. */
2360 if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
2361 opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
2362 cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
2363 cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
2364 cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
2365 MLX5_COMP_MODE_OFFSET);
2366 cs->misc = RTE_BE32(0);
2367 }
2368
2369 /**
2370 * Build the Ethernet Segment without inlined data.
2371 * Supports Software Parser, Checksums and VLAN
2372 * insertion Tx offload features.
2373 *
2374 * @param txq
2375 * Pointer to TX queue structure.
2376 * @param loc
2377 * Pointer to burst routine local context.
2378 * @param wqe
2379 * Pointer to WQE to fill with built Ethernet Segment.
2380 * @param olx
2381 * Configured Tx offloads mask. It is fully defined at
2382 * compile time and may be used for optimization.
2383 */
2384 static __rte_always_inline void
2385 mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
2386 struct mlx5_txq_local *restrict loc,
2387 struct mlx5_wqe *restrict wqe,
2388 unsigned int olx)
2389 {
2390 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2391 uint32_t csum;
2392
2393 /*
2394 * Calculate and set check sum flags first, dword field
2395 * in segment may be shared with Software Parser flags.
2396 */
2397 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2398 es->flags = rte_cpu_to_le_32(csum);
2399 /*
2400 * Calculate and set Software Parser offsets and flags.
2401 * These flags a set for custom UDP and IP tunnel packets.
2402 */
2403 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2404 /* Fill metadata field if needed. */
2405 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2406 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2407 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2408 /* Engage VLAN tag insertion feature if requested. */
2409 if (MLX5_TXOFF_CONFIG(VLAN) &&
2410 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
2411 /*
2412 * We should get here only if device support
2413 * this feature correctly.
2414 */
2415 MLX5_ASSERT(txq->vlan_en);
2416 es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
2417 loc->mbuf->vlan_tci);
2418 } else {
2419 es->inline_hdr = RTE_BE32(0);
2420 }
2421 }
2422
2423 /**
2424 * Build the Ethernet Segment with minimal inlined data
2425 * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
2426 * used to fill the gap in single WQEBB WQEs.
2427 * Supports Software Parser, Checksums and VLAN
2428 * insertion Tx offload features.
2429 *
2430 * @param txq
2431 * Pointer to TX queue structure.
2432 * @param loc
2433 * Pointer to burst routine local context.
2434 * @param wqe
2435 * Pointer to WQE to fill with built Ethernet Segment.
2436 * @param vlan
2437 * Length of VLAN tag insertion if any.
2438 * @param olx
2439 * Configured Tx offloads mask. It is fully defined at
2440 * compile time and may be used for optimization.
2441 */
2442 static __rte_always_inline void
2443 mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
2444 struct mlx5_txq_local *restrict loc,
2445 struct mlx5_wqe *restrict wqe,
2446 unsigned int vlan,
2447 unsigned int olx)
2448 {
2449 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2450 uint32_t csum;
2451 uint8_t *psrc, *pdst;
2452
2453 /*
2454 * Calculate and set check sum flags first, dword field
2455 * in segment may be shared with Software Parser flags.
2456 */
2457 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2458 es->flags = rte_cpu_to_le_32(csum);
2459 /*
2460 * Calculate and set Software Parser offsets and flags.
2461 * These flags a set for custom UDP and IP tunnel packets.
2462 */
2463 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2464 /* Fill metadata field if needed. */
2465 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2466 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2467 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2468 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2469 (sizeof(uint16_t) +
2470 sizeof(rte_v128u32_t)),
2471 "invalid Ethernet Segment data size");
2472 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2473 (sizeof(uint16_t) +
2474 sizeof(struct rte_vlan_hdr) +
2475 2 * RTE_ETHER_ADDR_LEN),
2476 "invalid Ethernet Segment data size");
2477 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2478 es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
2479 es->inline_data = *(unaligned_uint16_t *)psrc;
2480 psrc += sizeof(uint16_t);
2481 pdst = (uint8_t *)(es + 1);
2482 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2483 /* Implement VLAN tag insertion as part inline data. */
2484 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2485 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2486 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2487 /* Insert VLAN ethertype + VLAN tag. */
2488 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2489 ((RTE_ETHER_TYPE_VLAN << 16) |
2490 loc->mbuf->vlan_tci);
2491 pdst += sizeof(struct rte_vlan_hdr);
2492 /* Copy the rest two bytes from packet data. */
2493 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2494 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2495 } else {
2496 /* Fill the gap in the title WQEBB with inline data. */
2497 rte_mov16(pdst, psrc);
2498 }
2499 }
2500
2501 /**
2502 * Build the Ethernet Segment with entire packet
2503 * data inlining. Checks the boundary of WQEBB and
2504 * ring buffer wrapping, supports Software Parser,
2505 * Checksums and VLAN insertion Tx offload features.
2506 *
2507 * @param txq
2508 * Pointer to TX queue structure.
2509 * @param loc
2510 * Pointer to burst routine local context.
2511 * @param wqe
2512 * Pointer to WQE to fill with built Ethernet Segment.
2513 * @param vlan
2514 * Length of VLAN tag insertion if any.
2515 * @param inlen
2516 * Length of data to inline (VLAN included, if any).
2517 * @param tso
2518 * TSO flag, set mss field from the packet.
2519 * @param olx
2520 * Configured Tx offloads mask. It is fully defined at
2521 * compile time and may be used for optimization.
2522 *
2523 * @return
2524 * Pointer to the next Data Segment (aligned and wrapped around).
2525 */
2526 static __rte_always_inline struct mlx5_wqe_dseg *
2527 mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
2528 struct mlx5_txq_local *restrict loc,
2529 struct mlx5_wqe *restrict wqe,
2530 unsigned int vlan,
2531 unsigned int inlen,
2532 unsigned int tso,
2533 unsigned int olx)
2534 {
2535 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2536 uint32_t csum;
2537 uint8_t *psrc, *pdst;
2538 unsigned int part;
2539
2540 /*
2541 * Calculate and set check sum flags first, dword field
2542 * in segment may be shared with Software Parser flags.
2543 */
2544 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2545 if (tso) {
2546 csum <<= 24;
2547 csum |= loc->mbuf->tso_segsz;
2548 es->flags = rte_cpu_to_be_32(csum);
2549 } else {
2550 es->flags = rte_cpu_to_le_32(csum);
2551 }
2552 /*
2553 * Calculate and set Software Parser offsets and flags.
2554 * These flags a set for custom UDP and IP tunnel packets.
2555 */
2556 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2557 /* Fill metadata field if needed. */
2558 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2559 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2560 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2561 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2562 (sizeof(uint16_t) +
2563 sizeof(rte_v128u32_t)),
2564 "invalid Ethernet Segment data size");
2565 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2566 (sizeof(uint16_t) +
2567 sizeof(struct rte_vlan_hdr) +
2568 2 * RTE_ETHER_ADDR_LEN),
2569 "invalid Ethernet Segment data size");
2570 psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
2571 es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
2572 es->inline_data = *(unaligned_uint16_t *)psrc;
2573 psrc += sizeof(uint16_t);
2574 pdst = (uint8_t *)(es + 1);
2575 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2576 /* Implement VLAN tag insertion as part inline data. */
2577 memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
2578 pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2579 psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
2580 /* Insert VLAN ethertype + VLAN tag. */
2581 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2582 ((RTE_ETHER_TYPE_VLAN << 16) |
2583 loc->mbuf->vlan_tci);
2584 pdst += sizeof(struct rte_vlan_hdr);
2585 /* Copy the rest two bytes from packet data. */
2586 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
2587 *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
2588 psrc += sizeof(uint16_t);
2589 } else {
2590 /* Fill the gap in the title WQEBB with inline data. */
2591 rte_mov16(pdst, psrc);
2592 psrc += sizeof(rte_v128u32_t);
2593 }
2594 pdst = (uint8_t *)(es + 2);
2595 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2596 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2597 inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
2598 if (!inlen) {
2599 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
2600 return (struct mlx5_wqe_dseg *)pdst;
2601 }
2602 /*
2603 * The WQEBB space availability is checked by caller.
2604 * Here we should be aware of WQE ring buffer wraparound only.
2605 */
2606 part = (uint8_t *)txq->wqes_end - pdst;
2607 part = RTE_MIN(part, inlen);
2608 do {
2609 rte_memcpy(pdst, psrc, part);
2610 inlen -= part;
2611 if (likely(!inlen)) {
2612 /*
2613 * If return value is not used by the caller
2614 * the code below will be optimized out.
2615 */
2616 pdst += part;
2617 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2618 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
2619 pdst = (uint8_t *)txq->wqes;
2620 return (struct mlx5_wqe_dseg *)pdst;
2621 }
2622 pdst = (uint8_t *)txq->wqes;
2623 psrc += part;
2624 part = inlen;
2625 } while (true);
2626 }
2627
2628 /**
2629 * Copy data from chain of mbuf to the specified linear buffer.
2630 * Checksums and VLAN insertion Tx offload features. If data
2631 * from some mbuf copied completely this mbuf is freed. Local
2632 * structure is used to keep the byte stream state.
2633 *
2634 * @param pdst
2635 * Pointer to the destination linear buffer.
2636 * @param loc
2637 * Pointer to burst routine local context.
2638 * @param len
2639 * Length of data to be copied.
2640 * @param must
2641 * Length of data to be copied ignoring no inline hint.
2642 * @param olx
2643 * Configured Tx offloads mask. It is fully defined at
2644 * compile time and may be used for optimization.
2645 *
2646 * @return
2647 * Number of actual copied data bytes. This is always greater than or
2648 * equal to must parameter and might be lesser than len in no inline
2649 * hint flag is encountered.
2650 */
2651 static __rte_always_inline unsigned int
2652 mlx5_tx_mseg_memcpy(uint8_t *pdst,
2653 struct mlx5_txq_local *restrict loc,
2654 unsigned int len,
2655 unsigned int must,
2656 unsigned int olx __rte_unused)
2657 {
2658 struct rte_mbuf *mbuf;
2659 unsigned int part, dlen, copy = 0;
2660 uint8_t *psrc;
2661
2662 MLX5_ASSERT(len);
2663 MLX5_ASSERT(must <= len);
2664 do {
2665 /* Allow zero length packets, must check first. */
2666 dlen = rte_pktmbuf_data_len(loc->mbuf);
2667 if (dlen <= loc->mbuf_off) {
2668 /* Exhausted packet, just free. */
2669 mbuf = loc->mbuf;
2670 loc->mbuf = mbuf->next;
2671 rte_pktmbuf_free_seg(mbuf);
2672 loc->mbuf_off = 0;
2673 MLX5_ASSERT(loc->mbuf_nseg > 1);
2674 MLX5_ASSERT(loc->mbuf);
2675 --loc->mbuf_nseg;
2676 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
2677 unsigned int diff;
2678
2679 if (copy >= must) {
2680 /*
2681 * We already copied the minimal
2682 * requested amount of data.
2683 */
2684 return copy;
2685 }
2686 diff = must - copy;
2687 if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
2688 /*
2689 * Copy only the minimal required
2690 * part of the data buffer.
2691 */
2692 len = diff;
2693 }
2694 }
2695 continue;
2696 }
2697 dlen -= loc->mbuf_off;
2698 psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
2699 loc->mbuf_off);
2700 part = RTE_MIN(len, dlen);
2701 rte_memcpy(pdst, psrc, part);
2702 copy += part;
2703 loc->mbuf_off += part;
2704 len -= part;
2705 if (!len) {
2706 if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
2707 loc->mbuf_off = 0;
2708 /* Exhausted packet, just free. */
2709 mbuf = loc->mbuf;
2710 loc->mbuf = mbuf->next;
2711 rte_pktmbuf_free_seg(mbuf);
2712 loc->mbuf_off = 0;
2713 MLX5_ASSERT(loc->mbuf_nseg >= 1);
2714 --loc->mbuf_nseg;
2715 }
2716 return copy;
2717 }
2718 pdst += part;
2719 } while (true);
2720 }
2721
2722 /**
2723 * Build the Ethernet Segment with inlined data from
2724 * multi-segment packet. Checks the boundary of WQEBB
2725 * and ring buffer wrapping, supports Software Parser,
2726 * Checksums and VLAN insertion Tx offload features.
2727 *
2728 * @param txq
2729 * Pointer to TX queue structure.
2730 * @param loc
2731 * Pointer to burst routine local context.
2732 * @param wqe
2733 * Pointer to WQE to fill with built Ethernet Segment.
2734 * @param vlan
2735 * Length of VLAN tag insertion if any.
2736 * @param inlen
2737 * Length of data to inline (VLAN included, if any).
2738 * @param tso
2739 * TSO flag, set mss field from the packet.
2740 * @param olx
2741 * Configured Tx offloads mask. It is fully defined at
2742 * compile time and may be used for optimization.
2743 *
2744 * @return
2745 * Pointer to the next Data Segment (aligned and
2746 * possible NOT wrapped around - caller should do
2747 * wrapping check on its own).
2748 */
2749 static __rte_always_inline struct mlx5_wqe_dseg *
2750 mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
2751 struct mlx5_txq_local *restrict loc,
2752 struct mlx5_wqe *restrict wqe,
2753 unsigned int vlan,
2754 unsigned int inlen,
2755 unsigned int tso,
2756 unsigned int olx)
2757 {
2758 struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
2759 uint32_t csum;
2760 uint8_t *pdst;
2761 unsigned int part, tlen = 0;
2762
2763 /*
2764 * Calculate and set check sum flags first, uint32_t field
2765 * in segment may be shared with Software Parser flags.
2766 */
2767 csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
2768 if (tso) {
2769 csum <<= 24;
2770 csum |= loc->mbuf->tso_segsz;
2771 es->flags = rte_cpu_to_be_32(csum);
2772 } else {
2773 es->flags = rte_cpu_to_le_32(csum);
2774 }
2775 /*
2776 * Calculate and set Software Parser offsets and flags.
2777 * These flags a set for custom UDP and IP tunnel packets.
2778 */
2779 es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
2780 /* Fill metadata field if needed. */
2781 es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
2782 loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
2783 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
2784 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2785 (sizeof(uint16_t) +
2786 sizeof(rte_v128u32_t)),
2787 "invalid Ethernet Segment data size");
2788 static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
2789 (sizeof(uint16_t) +
2790 sizeof(struct rte_vlan_hdr) +
2791 2 * RTE_ETHER_ADDR_LEN),
2792 "invalid Ethernet Segment data size");
2793 MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
2794 pdst = (uint8_t *)&es->inline_data;
2795 if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
2796 /* Implement VLAN tag insertion as part inline data. */
2797 mlx5_tx_mseg_memcpy(pdst, loc,
2798 2 * RTE_ETHER_ADDR_LEN,
2799 2 * RTE_ETHER_ADDR_LEN, olx);
2800 pdst += 2 * RTE_ETHER_ADDR_LEN;
2801 *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
2802 ((RTE_ETHER_TYPE_VLAN << 16) |
2803 loc->mbuf->vlan_tci);
2804 pdst += sizeof(struct rte_vlan_hdr);
2805 tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
2806 }
2807 MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
2808 /*
2809 * The WQEBB space availability is checked by caller.
2810 * Here we should be aware of WQE ring buffer wraparound only.
2811 */
2812 part = (uint8_t *)txq->wqes_end - pdst;
2813 part = RTE_MIN(part, inlen - tlen);
2814 MLX5_ASSERT(part);
2815 do {
2816 unsigned int copy;
2817
2818 /*
2819 * Copying may be interrupted inside the routine
2820 * if run into no inline hint flag.
2821 */
2822 copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
2823 copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
2824 tlen += copy;
2825 if (likely(inlen <= tlen) || copy < part) {
2826 es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
2827 pdst += copy;
2828 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2829 return (struct mlx5_wqe_dseg *)pdst;
2830 }
2831 pdst = (uint8_t *)txq->wqes;
2832 part = inlen - tlen;
2833 } while (true);
2834 }
2835
2836 /**
2837 * Build the Data Segment of pointer type.
2838 *
2839 * @param txq
2840 * Pointer to TX queue structure.
2841 * @param loc
2842 * Pointer to burst routine local context.
2843 * @param dseg
2844 * Pointer to WQE to fill with built Data Segment.
2845 * @param buf
2846 * Data buffer to point.
2847 * @param len
2848 * Data buffer length.
2849 * @param olx
2850 * Configured Tx offloads mask. It is fully defined at
2851 * compile time and may be used for optimization.
2852 */
2853 static __rte_always_inline void
2854 mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
2855 struct mlx5_txq_local *restrict loc,
2856 struct mlx5_wqe_dseg *restrict dseg,
2857 uint8_t *buf,
2858 unsigned int len,
2859 unsigned int olx __rte_unused)
2860
2861 {
2862 MLX5_ASSERT(len);
2863 dseg->bcount = rte_cpu_to_be_32(len);
2864 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2865 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2866 }
2867
2868 /**
2869 * Build the Data Segment of pointer type or inline
2870 * if data length is less than buffer in minimal
2871 * Data Segment size.
2872 *
2873 * @param txq
2874 * Pointer to TX queue structure.
2875 * @param loc
2876 * Pointer to burst routine local context.
2877 * @param dseg
2878 * Pointer to WQE to fill with built Data Segment.
2879 * @param buf
2880 * Data buffer to point.
2881 * @param len
2882 * Data buffer length.
2883 * @param olx
2884 * Configured Tx offloads mask. It is fully defined at
2885 * compile time and may be used for optimization.
2886 */
2887 static __rte_always_inline void
2888 mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
2889 struct mlx5_txq_local *restrict loc,
2890 struct mlx5_wqe_dseg *restrict dseg,
2891 uint8_t *buf,
2892 unsigned int len,
2893 unsigned int olx __rte_unused)
2894
2895 {
2896 uintptr_t dst, src;
2897
2898 MLX5_ASSERT(len);
2899 if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
2900 dseg->bcount = rte_cpu_to_be_32(len);
2901 dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
2902 dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
2903
2904 return;
2905 }
2906 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2907 /* Unrolled implementation of generic rte_memcpy. */
2908 dst = (uintptr_t)&dseg->inline_data[0];
2909 src = (uintptr_t)buf;
2910 if (len & 0x08) {
2911 #ifdef RTE_ARCH_STRICT_ALIGN
2912 MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
2913 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2914 dst += sizeof(uint32_t);
2915 src += sizeof(uint32_t);
2916 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2917 dst += sizeof(uint32_t);
2918 src += sizeof(uint32_t);
2919 #else
2920 *(uint64_t *)dst = *(unaligned_uint64_t *)src;
2921 dst += sizeof(uint64_t);
2922 src += sizeof(uint64_t);
2923 #endif
2924 }
2925 if (len & 0x04) {
2926 *(uint32_t *)dst = *(unaligned_uint32_t *)src;
2927 dst += sizeof(uint32_t);
2928 src += sizeof(uint32_t);
2929 }
2930 if (len & 0x02) {
2931 *(uint16_t *)dst = *(unaligned_uint16_t *)src;
2932 dst += sizeof(uint16_t);
2933 src += sizeof(uint16_t);
2934 }
2935 if (len & 0x01)
2936 *(uint8_t *)dst = *(uint8_t *)src;
2937 }
2938
2939 /**
2940 * Build the Data Segment of inlined data from single
2941 * segment packet, no VLAN insertion.
2942 *
2943 * @param txq
2944 * Pointer to TX queue structure.
2945 * @param loc
2946 * Pointer to burst routine local context.
2947 * @param dseg
2948 * Pointer to WQE to fill with built Data Segment.
2949 * @param buf
2950 * Data buffer to point.
2951 * @param len
2952 * Data buffer length.
2953 * @param olx
2954 * Configured Tx offloads mask. It is fully defined at
2955 * compile time and may be used for optimization.
2956 *
2957 * @return
2958 * Pointer to the next Data Segment after inlined data.
2959 * Ring buffer wraparound check is needed. We do not
2960 * do it here because it may not be needed for the
2961 * last packet in the eMPW session.
2962 */
2963 static __rte_always_inline struct mlx5_wqe_dseg *
2964 mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
2965 struct mlx5_txq_local *restrict loc __rte_unused,
2966 struct mlx5_wqe_dseg *restrict dseg,
2967 uint8_t *buf,
2968 unsigned int len,
2969 unsigned int olx __rte_unused)
2970 {
2971 unsigned int part;
2972 uint8_t *pdst;
2973
2974 if (!MLX5_TXOFF_CONFIG(MPW)) {
2975 /* Store the descriptor byte counter for eMPW sessions. */
2976 dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
2977 pdst = &dseg->inline_data[0];
2978 } else {
2979 /* The entire legacy MPW session counter is stored on close. */
2980 pdst = (uint8_t *)dseg;
2981 }
2982 /*
2983 * The WQEBB space availability is checked by caller.
2984 * Here we should be aware of WQE ring buffer wraparound only.
2985 */
2986 part = (uint8_t *)txq->wqes_end - pdst;
2987 part = RTE_MIN(part, len);
2988 do {
2989 rte_memcpy(pdst, buf, part);
2990 len -= part;
2991 if (likely(!len)) {
2992 pdst += part;
2993 if (!MLX5_TXOFF_CONFIG(MPW))
2994 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
2995 /* Note: no final wraparound check here. */
2996 return (struct mlx5_wqe_dseg *)pdst;
2997 }
2998 pdst = (uint8_t *)txq->wqes;
2999 buf += part;
3000 part = len;
3001 } while (true);
3002 }
3003
3004 /**
3005 * Build the Data Segment of inlined data from single
3006 * segment packet with VLAN insertion.
3007 *
3008 * @param txq
3009 * Pointer to TX queue structure.
3010 * @param loc
3011 * Pointer to burst routine local context.
3012 * @param dseg
3013 * Pointer to the dseg fill with built Data Segment.
3014 * @param buf
3015 * Data buffer to point.
3016 * @param len
3017 * Data buffer length.
3018 * @param olx
3019 * Configured Tx offloads mask. It is fully defined at
3020 * compile time and may be used for optimization.
3021 *
3022 * @return
3023 * Pointer to the next Data Segment after inlined data.
3024 * Ring buffer wraparound check is needed.
3025 */
3026 static __rte_always_inline struct mlx5_wqe_dseg *
3027 mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
3028 struct mlx5_txq_local *restrict loc __rte_unused,
3029 struct mlx5_wqe_dseg *restrict dseg,
3030 uint8_t *buf,
3031 unsigned int len,
3032 unsigned int olx __rte_unused)
3033
3034 {
3035 unsigned int part;
3036 uint8_t *pdst;
3037
3038 MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
3039 static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
3040 (2 * RTE_ETHER_ADDR_LEN),
3041 "invalid Data Segment data size");
3042 if (!MLX5_TXOFF_CONFIG(MPW)) {
3043 /* Store the descriptor byte counter for eMPW sessions. */
3044 dseg->bcount = rte_cpu_to_be_32
3045 ((len + sizeof(struct rte_vlan_hdr)) |
3046 MLX5_ETH_WQE_DATA_INLINE);
3047 pdst = &dseg->inline_data[0];
3048 } else {
3049 /* The entire legacy MPW session counter is stored on close. */
3050 pdst = (uint8_t *)dseg;
3051 }
3052 memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
3053 buf += MLX5_DSEG_MIN_INLINE_SIZE;
3054 pdst += MLX5_DSEG_MIN_INLINE_SIZE;
3055 len -= MLX5_DSEG_MIN_INLINE_SIZE;
3056 /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
3057 MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
3058 if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
3059 pdst = (uint8_t *)txq->wqes;
3060 *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
3061 loc->mbuf->vlan_tci);
3062 pdst += sizeof(struct rte_vlan_hdr);
3063 /*
3064 * The WQEBB space availability is checked by caller.
3065 * Here we should be aware of WQE ring buffer wraparound only.
3066 */
3067 part = (uint8_t *)txq->wqes_end - pdst;
3068 part = RTE_MIN(part, len);
3069 do {
3070 rte_memcpy(pdst, buf, part);
3071 len -= part;
3072 if (likely(!len)) {
3073 pdst += part;
3074 if (!MLX5_TXOFF_CONFIG(MPW))
3075 pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
3076 /* Note: no final wraparound check here. */
3077 return (struct mlx5_wqe_dseg *)pdst;
3078 }
3079 pdst = (uint8_t *)txq->wqes;
3080 buf += part;
3081 part = len;
3082 } while (true);
3083 }
3084
3085 /**
3086 * Build the Ethernet Segment with optionally inlined data with
3087 * VLAN insertion and following Data Segments (if any) from
3088 * multi-segment packet. Used by ordinary send and TSO.
3089 *
3090 * @param txq
3091 * Pointer to TX queue structure.
3092 * @param loc
3093 * Pointer to burst routine local context.
3094 * @param wqe
3095 * Pointer to WQE to fill with built Ethernet/Data Segments.
3096 * @param vlan
3097 * Length of VLAN header to insert, 0 means no VLAN insertion.
3098 * @param inlen
3099 * Data length to inline. For TSO this parameter specifies
3100 * exact value, for ordinary send routine can be aligned by
3101 * caller to provide better WQE space saving and data buffer
3102 * start address alignment. This length includes VLAN header
3103 * being inserted.
3104 * @param tso
3105 * Zero means ordinary send, inlined data can be extended,
3106 * otherwise this is TSO, inlined data length is fixed.
3107 * @param olx
3108 * Configured Tx offloads mask. It is fully defined at
3109 * compile time and may be used for optimization.
3110 *
3111 * @return
3112 * Actual size of built WQE in segments.
3113 */
3114 static __rte_always_inline unsigned int
3115 mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
3116 struct mlx5_txq_local *restrict loc,
3117 struct mlx5_wqe *restrict wqe,
3118 unsigned int vlan,
3119 unsigned int inlen,
3120 unsigned int tso,
3121 unsigned int olx __rte_unused)
3122 {
3123 struct mlx5_wqe_dseg *restrict dseg;
3124 unsigned int ds;
3125
3126 MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
3127 loc->mbuf_nseg = NB_SEGS(loc->mbuf);
3128 loc->mbuf_off = 0;
3129
3130 dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
3131 if (!loc->mbuf_nseg)
3132 goto dseg_done;
3133 /*
3134 * There are still some mbuf remaining, not inlined.
3135 * The first mbuf may be partially inlined and we
3136 * must process the possible non-zero data offset.
3137 */
3138 if (loc->mbuf_off) {
3139 unsigned int dlen;
3140 uint8_t *dptr;
3141
3142 /*
3143 * Exhausted packets must be dropped before.
3144 * Non-zero offset means there are some data
3145 * remained in the packet.
3146 */
3147 MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
3148 MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
3149 dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
3150 loc->mbuf_off);
3151 dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
3152 /*
3153 * Build the pointer/minimal data Data Segment.
3154 * Do ring buffer wrapping check in advance.
3155 */
3156 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3157 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3158 mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
3159 /* Store the mbuf to be freed on completion. */
3160 MLX5_ASSERT(loc->elts_free);
3161 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3162 --loc->elts_free;
3163 ++dseg;
3164 if (--loc->mbuf_nseg == 0)
3165 goto dseg_done;
3166 loc->mbuf = loc->mbuf->next;
3167 loc->mbuf_off = 0;
3168 }
3169 do {
3170 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3171 struct rte_mbuf *mbuf;
3172
3173 /* Zero length segment found, just skip. */
3174 mbuf = loc->mbuf;
3175 loc->mbuf = loc->mbuf->next;
3176 rte_pktmbuf_free_seg(mbuf);
3177 if (--loc->mbuf_nseg == 0)
3178 break;
3179 } else {
3180 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3181 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3182 mlx5_tx_dseg_iptr
3183 (txq, loc, dseg,
3184 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3185 rte_pktmbuf_data_len(loc->mbuf), olx);
3186 MLX5_ASSERT(loc->elts_free);
3187 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3188 --loc->elts_free;
3189 ++dseg;
3190 if (--loc->mbuf_nseg == 0)
3191 break;
3192 loc->mbuf = loc->mbuf->next;
3193 }
3194 } while (true);
3195
3196 dseg_done:
3197 /* Calculate actual segments used from the dseg pointer. */
3198 if ((uintptr_t)wqe < (uintptr_t)dseg)
3199 ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
3200 else
3201 ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
3202 txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
3203 return ds;
3204 }
3205
3206 /**
3207 * Tx one packet function for multi-segment TSO. Supports all
3208 * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
3209 * sends one packet per WQE.
3210 *
3211 * This routine is responsible for storing processed mbuf
3212 * into elts ring buffer and update elts_head.
3213 *
3214 * @param txq
3215 * Pointer to TX queue structure.
3216 * @param loc
3217 * Pointer to burst routine local context.
3218 * @param olx
3219 * Configured Tx offloads mask. It is fully defined at
3220 * compile time and may be used for optimization.
3221 *
3222 * @return
3223 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3224 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3225 * Local context variables partially updated.
3226 */
3227 static __rte_always_inline enum mlx5_txcmp_code
3228 mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
3229 struct mlx5_txq_local *restrict loc,
3230 unsigned int olx)
3231 {
3232 struct mlx5_wqe *restrict wqe;
3233 unsigned int ds, dlen, inlen, ntcp, vlan = 0;
3234
3235 /*
3236 * Calculate data length to be inlined to estimate
3237 * the required space in WQE ring buffer.
3238 */
3239 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3240 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3241 vlan = sizeof(struct rte_vlan_hdr);
3242 inlen = loc->mbuf->l2_len + vlan +
3243 loc->mbuf->l3_len + loc->mbuf->l4_len;
3244 if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
3245 return MLX5_TXCMP_CODE_ERROR;
3246 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3247 inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
3248 /* Packet must contain all TSO headers. */
3249 if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
3250 inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3251 inlen > (dlen + vlan)))
3252 return MLX5_TXCMP_CODE_ERROR;
3253 MLX5_ASSERT(inlen >= txq->inlen_mode);
3254 /*
3255 * Check whether there are enough free WQEBBs:
3256 * - Control Segment
3257 * - Ethernet Segment
3258 * - First Segment of inlined Ethernet data
3259 * - ... data continued ...
3260 * - Data Segments of pointer/min inline type
3261 */
3262 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3263 MLX5_ESEG_MIN_INLINE_SIZE +
3264 MLX5_WSEG_SIZE +
3265 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3266 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3267 return MLX5_TXCMP_CODE_EXIT;
3268 /* Check for maximal WQE size. */
3269 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3270 return MLX5_TXCMP_CODE_ERROR;
3271 #ifdef MLX5_PMD_SOFT_COUNTERS
3272 /* Update sent data bytes/packets counters. */
3273 ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
3274 loc->mbuf->tso_segsz;
3275 /*
3276 * One will be added for mbuf itself
3277 * at the end of the mlx5_tx_burst from
3278 * loc->pkts_sent field.
3279 */
3280 --ntcp;
3281 txq->stats.opackets += ntcp;
3282 txq->stats.obytes += dlen + vlan + ntcp * inlen;
3283 #endif
3284 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3285 loc->wqe_last = wqe;
3286 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
3287 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
3288 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3289 txq->wqe_ci += (ds + 3) / 4;
3290 loc->wqe_free -= (ds + 3) / 4;
3291 return MLX5_TXCMP_CODE_MULTI;
3292 }
3293
3294 /**
3295 * Tx one packet function for multi-segment SEND. Supports all
3296 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3297 * sends one packet per WQE, without any data inlining in
3298 * Ethernet Segment.
3299 *
3300 * This routine is responsible for storing processed mbuf
3301 * into elts ring buffer and update elts_head.
3302 *
3303 * @param txq
3304 * Pointer to TX queue structure.
3305 * @param loc
3306 * Pointer to burst routine local context.
3307 * @param olx
3308 * Configured Tx offloads mask. It is fully defined at
3309 * compile time and may be used for optimization.
3310 *
3311 * @return
3312 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3313 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3314 * Local context variables partially updated.
3315 */
3316 static __rte_always_inline enum mlx5_txcmp_code
3317 mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
3318 struct mlx5_txq_local *restrict loc,
3319 unsigned int olx)
3320 {
3321 struct mlx5_wqe_dseg *restrict dseg;
3322 struct mlx5_wqe *restrict wqe;
3323 unsigned int ds, nseg;
3324
3325 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3326 /*
3327 * No inline at all, it means the CPU cycles saving
3328 * is prioritized at configuration, we should not
3329 * copy any packet data to WQE.
3330 */
3331 nseg = NB_SEGS(loc->mbuf);
3332 ds = 2 + nseg;
3333 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3334 return MLX5_TXCMP_CODE_EXIT;
3335 /* Check for maximal WQE size. */
3336 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3337 return MLX5_TXCMP_CODE_ERROR;
3338 /*
3339 * Some Tx offloads may cause an error if
3340 * packet is not long enough, check against
3341 * assumed minimal length.
3342 */
3343 if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
3344 return MLX5_TXCMP_CODE_ERROR;
3345 #ifdef MLX5_PMD_SOFT_COUNTERS
3346 /* Update sent data bytes counter. */
3347 txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
3348 if (MLX5_TXOFF_CONFIG(VLAN) &&
3349 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3350 txq->stats.obytes += sizeof(struct rte_vlan_hdr);
3351 #endif
3352 /*
3353 * SEND WQE, one WQEBB:
3354 * - Control Segment, SEND opcode
3355 * - Ethernet Segment, optional VLAN, no inline
3356 * - Data Segments, pointer only type
3357 */
3358 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3359 loc->wqe_last = wqe;
3360 mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
3361 mlx5_tx_eseg_none(txq, loc, wqe, olx);
3362 dseg = &wqe->dseg[0];
3363 do {
3364 if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
3365 struct rte_mbuf *mbuf;
3366
3367 /*
3368 * Zero length segment found, have to
3369 * correct total size of WQE in segments.
3370 * It is supposed to be rare occasion, so
3371 * in normal case (no zero length segments)
3372 * we avoid extra writing to the Control
3373 * Segment.
3374 */
3375 --ds;
3376 wqe->cseg.sq_ds -= RTE_BE32(1);
3377 mbuf = loc->mbuf;
3378 loc->mbuf = mbuf->next;
3379 rte_pktmbuf_free_seg(mbuf);
3380 if (--nseg == 0)
3381 break;
3382 } else {
3383 mlx5_tx_dseg_ptr
3384 (txq, loc, dseg,
3385 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
3386 rte_pktmbuf_data_len(loc->mbuf), olx);
3387 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3388 --loc->elts_free;
3389 if (--nseg == 0)
3390 break;
3391 ++dseg;
3392 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
3393 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
3394 loc->mbuf = loc->mbuf->next;
3395 }
3396 } while (true);
3397 txq->wqe_ci += (ds + 3) / 4;
3398 loc->wqe_free -= (ds + 3) / 4;
3399 return MLX5_TXCMP_CODE_MULTI;
3400 }
3401
3402 /**
3403 * Tx one packet function for multi-segment SEND. Supports all
3404 * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
3405 * sends one packet per WQE, with data inlining in
3406 * Ethernet Segment and minimal Data Segments.
3407 *
3408 * This routine is responsible for storing processed mbuf
3409 * into elts ring buffer and update elts_head.
3410 *
3411 * @param txq
3412 * Pointer to TX queue structure.
3413 * @param loc
3414 * Pointer to burst routine local context.
3415 * @param olx
3416 * Configured Tx offloads mask. It is fully defined at
3417 * compile time and may be used for optimization.
3418 *
3419 * @return
3420 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3421 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3422 * Local context variables partially updated.
3423 */
3424 static __rte_always_inline enum mlx5_txcmp_code
3425 mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
3426 struct mlx5_txq_local *restrict loc,
3427 unsigned int olx)
3428 {
3429 struct mlx5_wqe *restrict wqe;
3430 unsigned int ds, inlen, dlen, vlan = 0;
3431
3432 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3433 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3434 /*
3435 * First calculate data length to be inlined
3436 * to estimate the required space for WQE.
3437 */
3438 dlen = rte_pktmbuf_pkt_len(loc->mbuf);
3439 if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
3440 vlan = sizeof(struct rte_vlan_hdr);
3441 inlen = dlen + vlan;
3442 /* Check against minimal length. */
3443 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
3444 return MLX5_TXCMP_CODE_ERROR;
3445 MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
3446 if (inlen > txq->inlen_send ||
3447 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
3448 struct rte_mbuf *mbuf;
3449 unsigned int nxlen;
3450 uintptr_t start;
3451
3452 /*
3453 * Packet length exceeds the allowed inline
3454 * data length, check whether the minimal
3455 * inlining is required.
3456 */
3457 if (txq->inlen_mode) {
3458 MLX5_ASSERT(txq->inlen_mode >=
3459 MLX5_ESEG_MIN_INLINE_SIZE);
3460 MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
3461 inlen = txq->inlen_mode;
3462 } else {
3463 if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
3464 !vlan || txq->vlan_en) {
3465 /*
3466 * VLAN insertion will be done inside by HW.
3467 * It is not utmost effective - VLAN flag is
3468 * checked twice, but we should proceed the
3469 * inlining length correctly and take into
3470 * account the VLAN header being inserted.
3471 */
3472 return mlx5_tx_packet_multi_send
3473 (txq, loc, olx);
3474 }
3475 inlen = MLX5_ESEG_MIN_INLINE_SIZE;
3476 }
3477 /*
3478 * Now we know the minimal amount of data is requested
3479 * to inline. Check whether we should inline the buffers
3480 * from the chain beginning to eliminate some mbufs.
3481 */
3482 mbuf = loc->mbuf;
3483 nxlen = rte_pktmbuf_data_len(mbuf);
3484 if (unlikely(nxlen <= txq->inlen_send)) {
3485 /* We can inline first mbuf at least. */
3486 if (nxlen < inlen) {
3487 unsigned int smlen;
3488
3489 /* Scan mbufs till inlen filled. */
3490 do {
3491 smlen = nxlen;
3492 mbuf = NEXT(mbuf);
3493 MLX5_ASSERT(mbuf);
3494 nxlen = rte_pktmbuf_data_len(mbuf);
3495 nxlen += smlen;
3496 } while (unlikely(nxlen < inlen));
3497 if (unlikely(nxlen > txq->inlen_send)) {
3498 /* We cannot inline entire mbuf. */
3499 smlen = inlen - smlen;
3500 start = rte_pktmbuf_mtod_offset
3501 (mbuf, uintptr_t, smlen);
3502 goto do_align;
3503 }
3504 }
3505 do {
3506 inlen = nxlen;
3507 mbuf = NEXT(mbuf);
3508 /* There should be not end of packet. */
3509 MLX5_ASSERT(mbuf);
3510 nxlen = inlen + rte_pktmbuf_data_len(mbuf);
3511 } while (unlikely(nxlen < txq->inlen_send));
3512 }
3513 start = rte_pktmbuf_mtod(mbuf, uintptr_t);
3514 /*
3515 * Check whether we can do inline to align start
3516 * address of data buffer to cacheline.
3517 */
3518 do_align:
3519 start = (~start + 1) & (RTE_CACHE_LINE_SIZE - 1);
3520 if (unlikely(start)) {
3521 start += inlen;
3522 if (start <= txq->inlen_send)
3523 inlen = start;
3524 }
3525 }
3526 /*
3527 * Check whether there are enough free WQEBBs:
3528 * - Control Segment
3529 * - Ethernet Segment
3530 * - First Segment of inlined Ethernet data
3531 * - ... data continued ...
3532 * - Data Segments of pointer/min inline type
3533 *
3534 * Estimate the number of Data Segments conservatively,
3535 * supposing no any mbufs is being freed during inlining.
3536 */
3537 MLX5_ASSERT(inlen <= txq->inlen_send);
3538 ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
3539 MLX5_ESEG_MIN_INLINE_SIZE +
3540 MLX5_WSEG_SIZE +
3541 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3542 if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
3543 return MLX5_TXCMP_CODE_EXIT;
3544 /* Check for maximal WQE size. */
3545 if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
3546 return MLX5_TXCMP_CODE_ERROR;
3547 #ifdef MLX5_PMD_SOFT_COUNTERS
3548 /* Update sent data bytes/packets counters. */
3549 txq->stats.obytes += dlen + vlan;
3550 #endif
3551 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3552 loc->wqe_last = wqe;
3553 mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_SEND, olx);
3554 ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 0, olx);
3555 wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3556 txq->wqe_ci += (ds + 3) / 4;
3557 loc->wqe_free -= (ds + 3) / 4;
3558 return MLX5_TXCMP_CODE_MULTI;
3559 }
3560
3561 /**
3562 * Tx burst function for multi-segment packets. Supports all
3563 * types of Tx offloads, uses MLX5_OPCODE_SEND/TSO to build WQEs,
3564 * sends one packet per WQE. Function stops sending if it
3565 * encounters the single-segment packet.
3566 *
3567 * This routine is responsible for storing processed mbuf
3568 * into elts ring buffer and update elts_head.
3569 *
3570 * @param txq
3571 * Pointer to TX queue structure.
3572 * @param[in] pkts
3573 * Packets to transmit.
3574 * @param pkts_n
3575 * Number of packets in array.
3576 * @param loc
3577 * Pointer to burst routine local context.
3578 * @param olx
3579 * Configured Tx offloads mask. It is fully defined at
3580 * compile time and may be used for optimization.
3581 *
3582 * @return
3583 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3584 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3585 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3586 * MLX5_TXCMP_CODE_TSO - TSO single-segment packet encountered.
3587 * Local context variables updated.
3588 */
3589 static __rte_always_inline enum mlx5_txcmp_code
3590 mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
3591 struct rte_mbuf **restrict pkts,
3592 unsigned int pkts_n,
3593 struct mlx5_txq_local *restrict loc,
3594 unsigned int olx)
3595 {
3596 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3597 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3598 pkts += loc->pkts_sent + 1;
3599 pkts_n -= loc->pkts_sent;
3600 for (;;) {
3601 enum mlx5_txcmp_code ret;
3602
3603 MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
3604 /*
3605 * Estimate the number of free elts quickly but
3606 * conservatively. Some segment may be fully inlined
3607 * and freed, ignore this here - precise estimation
3608 * is costly.
3609 */
3610 if (loc->elts_free < NB_SEGS(loc->mbuf))
3611 return MLX5_TXCMP_CODE_EXIT;
3612 if (MLX5_TXOFF_CONFIG(TSO) &&
3613 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)) {
3614 /* Proceed with multi-segment TSO. */
3615 ret = mlx5_tx_packet_multi_tso(txq, loc, olx);
3616 } else if (MLX5_TXOFF_CONFIG(INLINE)) {
3617 /* Proceed with multi-segment SEND with inlining. */
3618 ret = mlx5_tx_packet_multi_inline(txq, loc, olx);
3619 } else {
3620 /* Proceed with multi-segment SEND w/o inlining. */
3621 ret = mlx5_tx_packet_multi_send(txq, loc, olx);
3622 }
3623 if (ret == MLX5_TXCMP_CODE_EXIT)
3624 return MLX5_TXCMP_CODE_EXIT;
3625 if (ret == MLX5_TXCMP_CODE_ERROR)
3626 return MLX5_TXCMP_CODE_ERROR;
3627 /* WQE is built, go to the next packet. */
3628 ++loc->pkts_sent;
3629 --pkts_n;
3630 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3631 return MLX5_TXCMP_CODE_EXIT;
3632 loc->mbuf = *pkts++;
3633 if (pkts_n > 1)
3634 rte_prefetch0(*pkts);
3635 if (likely(NB_SEGS(loc->mbuf) > 1))
3636 continue;
3637 /* Here ends the series of multi-segment packets. */
3638 if (MLX5_TXOFF_CONFIG(TSO) &&
3639 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3640 return MLX5_TXCMP_CODE_TSO;
3641 return MLX5_TXCMP_CODE_SINGLE;
3642 }
3643 MLX5_ASSERT(false);
3644 }
3645
3646 /**
3647 * Tx burst function for single-segment packets with TSO.
3648 * Supports all types of Tx offloads, except multi-packets.
3649 * Uses MLX5_OPCODE_TSO to build WQEs, sends one packet per WQE.
3650 * Function stops sending if it encounters the multi-segment
3651 * packet or packet without TSO requested.
3652 *
3653 * The routine is responsible for storing processed mbuf
3654 * into elts ring buffer and update elts_head if inline
3655 * offloads is requested due to possible early freeing
3656 * of the inlined mbufs (can not store pkts array in elts
3657 * as a batch).
3658 *
3659 * @param txq
3660 * Pointer to TX queue structure.
3661 * @param[in] pkts
3662 * Packets to transmit.
3663 * @param pkts_n
3664 * Number of packets in array.
3665 * @param loc
3666 * Pointer to burst routine local context.
3667 * @param olx
3668 * Configured Tx offloads mask. It is fully defined at
3669 * compile time and may be used for optimization.
3670 *
3671 * @return
3672 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
3673 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
3674 * MLX5_TXCMP_CODE_SINGLE - single-segment packet encountered.
3675 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3676 * Local context variables updated.
3677 */
3678 static __rte_always_inline enum mlx5_txcmp_code
3679 mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
3680 struct rte_mbuf **restrict pkts,
3681 unsigned int pkts_n,
3682 struct mlx5_txq_local *restrict loc,
3683 unsigned int olx)
3684 {
3685 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
3686 MLX5_ASSERT(pkts_n > loc->pkts_sent);
3687 pkts += loc->pkts_sent + 1;
3688 pkts_n -= loc->pkts_sent;
3689 for (;;) {
3690 struct mlx5_wqe_dseg *restrict dseg;
3691 struct mlx5_wqe *restrict wqe;
3692 unsigned int ds, dlen, hlen, ntcp, vlan = 0;
3693 uint8_t *dptr;
3694
3695 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
3696 dlen = rte_pktmbuf_data_len(loc->mbuf);
3697 if (MLX5_TXOFF_CONFIG(VLAN) &&
3698 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
3699 vlan = sizeof(struct rte_vlan_hdr);
3700 }
3701 /*
3702 * First calculate the WQE size to check
3703 * whether we have enough space in ring buffer.
3704 */
3705 hlen = loc->mbuf->l2_len + vlan +
3706 loc->mbuf->l3_len + loc->mbuf->l4_len;
3707 if (unlikely((!hlen || !loc->mbuf->tso_segsz)))
3708 return MLX5_TXCMP_CODE_ERROR;
3709 if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
3710 hlen += loc->mbuf->outer_l2_len +
3711 loc->mbuf->outer_l3_len;
3712 /* Segment must contain all TSO headers. */
3713 if (unlikely(hlen > MLX5_MAX_TSO_HEADER ||
3714 hlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
3715 hlen > (dlen + vlan)))
3716 return MLX5_TXCMP_CODE_ERROR;
3717 /*
3718 * Check whether there are enough free WQEBBs:
3719 * - Control Segment
3720 * - Ethernet Segment
3721 * - First Segment of inlined Ethernet data
3722 * - ... data continued ...
3723 * - Finishing Data Segment of pointer type
3724 */
3725 ds = 4 + (hlen - MLX5_ESEG_MIN_INLINE_SIZE +
3726 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
3727 if (loc->wqe_free < ((ds + 3) / 4))
3728 return MLX5_TXCMP_CODE_EXIT;
3729 #ifdef MLX5_PMD_SOFT_COUNTERS
3730 /* Update sent data bytes/packets counters. */
3731 ntcp = (dlen + vlan - hlen +
3732 loc->mbuf->tso_segsz - 1) /
3733 loc->mbuf->tso_segsz;
3734 /*
3735 * One will be added for mbuf itself at the end
3736 * of the mlx5_tx_burst from loc->pkts_sent field.
3737 */
3738 --ntcp;
3739 txq->stats.opackets += ntcp;
3740 txq->stats.obytes += dlen + vlan + ntcp * hlen;
3741 #endif
3742 /*
3743 * Build the TSO WQE:
3744 * - Control Segment
3745 * - Ethernet Segment with hlen bytes inlined
3746 * - Data Segment of pointer type
3747 */
3748 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
3749 loc->wqe_last = wqe;
3750 mlx5_tx_cseg_init(txq, loc, wqe, ds,
3751 MLX5_OPCODE_TSO, olx);
3752 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan, hlen, 1, olx);
3753 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) + hlen - vlan;
3754 dlen -= hlen - vlan;
3755 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
3756 /*
3757 * WQE is built, update the loop parameters
3758 * and go to the next packet.
3759 */
3760 txq->wqe_ci += (ds + 3) / 4;
3761 loc->wqe_free -= (ds + 3) / 4;
3762 if (MLX5_TXOFF_CONFIG(INLINE))
3763 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
3764 --loc->elts_free;
3765 ++loc->pkts_sent;
3766 --pkts_n;
3767 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
3768 return MLX5_TXCMP_CODE_EXIT;
3769 loc->mbuf = *pkts++;
3770 if (pkts_n > 1)
3771 rte_prefetch0(*pkts);
3772 if (MLX5_TXOFF_CONFIG(MULTI) &&
3773 unlikely(NB_SEGS(loc->mbuf) > 1))
3774 return MLX5_TXCMP_CODE_MULTI;
3775 if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
3776 return MLX5_TXCMP_CODE_SINGLE;
3777 /* Continue with the next TSO packet. */
3778 }
3779 MLX5_ASSERT(false);
3780 }
3781
3782 /**
3783 * Analyze the packet and select the best method to send.
3784 *
3785 * @param txq
3786 * Pointer to TX queue structure.
3787 * @param loc
3788 * Pointer to burst routine local context.
3789 * @param olx
3790 * Configured Tx offloads mask. It is fully defined at
3791 * compile time and may be used for optimization.
3792 * @param newp
3793 * The predefined flag whether do complete check for
3794 * multi-segment packets and TSO.
3795 *
3796 * @return
3797 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
3798 * MLX5_TXCMP_CODE_TSO - TSO required, use TSO/LSO.
3799 * MLX5_TXCMP_CODE_SINGLE - single-segment packet, use SEND.
3800 * MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
3801 */
3802 static __rte_always_inline enum mlx5_txcmp_code
3803 mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
3804 struct mlx5_txq_local *restrict loc,
3805 unsigned int olx,
3806 bool newp)
3807 {
3808 /* Check for multi-segment packet. */
3809 if (newp &&
3810 MLX5_TXOFF_CONFIG(MULTI) &&
3811 unlikely(NB_SEGS(loc->mbuf) > 1))
3812 return MLX5_TXCMP_CODE_MULTI;
3813 /* Check for TSO packet. */
3814 if (newp &&
3815 MLX5_TXOFF_CONFIG(TSO) &&
3816 unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
3817 return MLX5_TXCMP_CODE_TSO;
3818 /* Check if eMPW is enabled at all. */
3819 if (!MLX5_TXOFF_CONFIG(EMPW))
3820 return MLX5_TXCMP_CODE_SINGLE;
3821 /* Check if eMPW can be engaged. */
3822 if (MLX5_TXOFF_CONFIG(VLAN) &&
3823 unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
3824 (!MLX5_TXOFF_CONFIG(INLINE) ||
3825 unlikely((rte_pktmbuf_data_len(loc->mbuf) +
3826 sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
3827 /*
3828 * eMPW does not support VLAN insertion offload,
3829 * we have to inline the entire packet but
3830 * packet is too long for inlining.
3831 */
3832 return MLX5_TXCMP_CODE_SINGLE;
3833 }
3834 return MLX5_TXCMP_CODE_EMPW;
3835 }
3836
3837 /**
3838 * Check the next packet attributes to match with the eMPW batch ones.
3839 * In addition, for legacy MPW the packet length is checked either.
3840 *
3841 * @param txq
3842 * Pointer to TX queue structure.
3843 * @param es
3844 * Pointer to Ethernet Segment of eMPW batch.
3845 * @param loc
3846 * Pointer to burst routine local context.
3847 * @param dlen
3848 * Length of previous packet in MPW descriptor.
3849 * @param olx
3850 * Configured Tx offloads mask. It is fully defined at
3851 * compile time and may be used for optimization.
3852 *
3853 * @return
3854 * true - packet match with eMPW batch attributes.
3855 * false - no match, eMPW should be restarted.
3856 */
3857 static __rte_always_inline bool
3858 mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
3859 struct mlx5_wqe_eseg *restrict es,
3860 struct mlx5_txq_local *restrict loc,
3861 uint32_t dlen,
3862 unsigned int olx)
3863 {
3864 uint8_t swp_flags = 0;
3865
3866 /* Compare the checksum flags, if any. */
3867 if (MLX5_TXOFF_CONFIG(CSUM) &&
3868 txq_ol_cksum_to_cs(loc->mbuf) != es->cs_flags)
3869 return false;
3870 /* Compare the Software Parser offsets and flags. */
3871 if (MLX5_TXOFF_CONFIG(SWP) &&
3872 (es->swp_offs != txq_mbuf_to_swp(loc, &swp_flags, olx) ||
3873 es->swp_flags != swp_flags))
3874 return false;
3875 /* Fill metadata field if needed. */
3876 if (MLX5_TXOFF_CONFIG(METADATA) &&
3877 es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
3878 *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
3879 return false;
3880 /* Legacy MPW can send packets with the same lengt only. */
3881 if (MLX5_TXOFF_CONFIG(MPW) &&
3882 dlen != rte_pktmbuf_data_len(loc->mbuf))
3883 return false;
3884 /* There must be no VLAN packets in eMPW loop. */
3885 if (MLX5_TXOFF_CONFIG(VLAN))
3886 MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
3887 return true;
3888 }
3889
3890 /*
3891 * Update send loop variables and WQE for eMPW loop
3892 * without data inlining. Number of Data Segments is
3893 * equal to the number of sent packets.
3894 *
3895 * @param txq
3896 * Pointer to TX queue structure.
3897 * @param loc
3898 * Pointer to burst routine local context.
3899 * @param ds
3900 * Number of packets/Data Segments/Packets.
3901 * @param slen
3902 * Accumulated statistics, bytes sent
3903 * @param olx
3904 * Configured Tx offloads mask. It is fully defined at
3905 * compile time and may be used for optimization.
3906 *
3907 * @return
3908 * true - packet match with eMPW batch attributes.
3909 * false - no match, eMPW should be restarted.
3910 */
3911 static __rte_always_inline void
3912 mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
3913 struct mlx5_txq_local *restrict loc,
3914 unsigned int ds,
3915 unsigned int slen,
3916 unsigned int olx __rte_unused)
3917 {
3918 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
3919 #ifdef MLX5_PMD_SOFT_COUNTERS
3920 /* Update sent data bytes counter. */
3921 txq->stats.obytes += slen;
3922 #else
3923 (void)slen;
3924 #endif
3925 loc->elts_free -= ds;
3926 loc->pkts_sent += ds;
3927 ds += 2;
3928 loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
3929 txq->wqe_ci += (ds + 3) / 4;
3930 loc->wqe_free -= (ds + 3) / 4;
3931 }
3932
3933 /*
3934 * Update send loop variables and WQE for eMPW loop
3935 * with data inlining. Gets the size of pushed descriptors
3936 * and data to the WQE.
3937 *
3938 * @param txq
3939 * Pointer to TX queue structure.
3940 * @param loc
3941 * Pointer to burst routine local context.
3942 * @param len
3943 * Total size of descriptor/data in bytes.
3944 * @param slen
3945 * Accumulated statistics, data bytes sent.
3946 * @param wqem
3947 * The base WQE for the eMPW/MPW descriptor.
3948 * @param olx
3949 * Configured Tx offloads mask. It is fully defined at
3950 * compile time and may be used for optimization.
3951 *
3952 * @return
3953 * true - packet match with eMPW batch attributes.
3954 * false - no match, eMPW should be restarted.
3955 */
3956 static __rte_always_inline void
3957 mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
3958 struct mlx5_txq_local *restrict loc,
3959 unsigned int len,
3960 unsigned int slen,
3961 struct mlx5_wqe *restrict wqem,
3962 unsigned int olx __rte_unused)
3963 {
3964 struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
3965
3966 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
3967 #ifdef MLX5_PMD_SOFT_COUNTERS
3968 /* Update sent data bytes counter. */
3969 txq->stats.obytes += slen;
3970 #else
3971 (void)slen;
3972 #endif
3973 if (MLX5_TXOFF_CONFIG(MPW) && dseg->bcount == RTE_BE32(0)) {
3974 /*
3975 * If the legacy MPW session contains the inline packets
3976 * we should set the only inline data segment length
3977 * and align the total length to the segment size.
3978 */
3979 MLX5_ASSERT(len > sizeof(dseg->bcount));
3980 dseg->bcount = rte_cpu_to_be_32((len - sizeof(dseg->bcount)) |
3981 MLX5_ETH_WQE_DATA_INLINE);
3982 len = (len + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE + 2;
3983 } else {
3984 /*
3985 * The session is not legacy MPW or contains the
3986 * data buffer pointer segments.
3987 */
3988 MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
3989 len = len / MLX5_WSEG_SIZE + 2;
3990 }
3991 wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
3992 txq->wqe_ci += (len + 3) / 4;
3993 loc->wqe_free -= (len + 3) / 4;
3994 loc->wqe_last = wqem;
3995 }
3996
3997 /**
3998 * The set of Tx burst functions for single-segment packets
3999 * without TSO and with Multi-Packet Writing feature support.
4000 * Supports all types of Tx offloads, except multi-packets
4001 * and TSO.
4002 *
4003 * Uses MLX5_OPCODE_EMPW to build WQEs if possible and sends
4004 * as many packet per WQE as it can. If eMPW is not configured
4005 * or packet can not be sent with eMPW (VLAN insertion) the
4006 * ordinary SEND opcode is used and only one packet placed
4007 * in WQE.
4008 *
4009 * Functions stop sending if it encounters the multi-segment
4010 * packet or packet with TSO requested.
4011 *
4012 * The routines are responsible for storing processed mbuf
4013 * into elts ring buffer and update elts_head if inlining
4014 * offload is requested. Otherwise the copying mbufs to elts
4015 * can be postponed and completed at the end of burst routine.
4016 *
4017 * @param txq
4018 * Pointer to TX queue structure.
4019 * @param[in] pkts
4020 * Packets to transmit.
4021 * @param pkts_n
4022 * Number of packets in array.
4023 * @param loc
4024 * Pointer to burst routine local context.
4025 * @param olx
4026 * Configured Tx offloads mask. It is fully defined at
4027 * compile time and may be used for optimization.
4028 *
4029 * @return
4030 * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
4031 * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
4032 * MLX5_TXCMP_CODE_MULTI - multi-segment packet encountered.
4033 * MLX5_TXCMP_CODE_TSO - TSO packet encountered.
4034 * MLX5_TXCMP_CODE_SINGLE - used inside functions set.
4035 * MLX5_TXCMP_CODE_EMPW - used inside functions set.
4036 *
4037 * Local context variables updated.
4038 *
4039 *
4040 * The routine sends packets with MLX5_OPCODE_EMPW
4041 * without inlining, this is dedicated optimized branch.
4042 * No VLAN insertion is supported.
4043 */
4044 static __rte_always_inline enum mlx5_txcmp_code
4045 mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
4046 struct rte_mbuf **restrict pkts,
4047 unsigned int pkts_n,
4048 struct mlx5_txq_local *restrict loc,
4049 unsigned int olx)
4050 {
4051 /*
4052 * Subroutine is the part of mlx5_tx_burst_single()
4053 * and sends single-segment packet with eMPW opcode
4054 * without data inlining.
4055 */
4056 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4057 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4058 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4059 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4060 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4061 pkts += loc->pkts_sent + 1;
4062 pkts_n -= loc->pkts_sent;
4063 for (;;) {
4064 struct mlx5_wqe_dseg *restrict dseg;
4065 struct mlx5_wqe_eseg *restrict eseg;
4066 enum mlx5_txcmp_code ret;
4067 unsigned int part, loop;
4068 unsigned int slen = 0;
4069
4070 next_empw:
4071 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4072 part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4073 MLX5_MPW_MAX_PACKETS :
4074 MLX5_EMPW_MAX_PACKETS);
4075 if (unlikely(loc->elts_free < part)) {
4076 /* We have no enough elts to save all mbufs. */
4077 if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
4078 return MLX5_TXCMP_CODE_EXIT;
4079 /* But we still able to send at least minimal eMPW. */
4080 part = loc->elts_free;
4081 }
4082 /* Check whether we have enough WQEs */
4083 if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
4084 if (unlikely(loc->wqe_free <
4085 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4086 return MLX5_TXCMP_CODE_EXIT;
4087 part = (loc->wqe_free * 4) - 2;
4088 }
4089 if (likely(part > 1))
4090 rte_prefetch0(*pkts);
4091 loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4092 /*
4093 * Build eMPW title WQEBB:
4094 * - Control Segment, eMPW opcode
4095 * - Ethernet Segment, no inline
4096 */
4097 mlx5_tx_cseg_init(txq, loc, loc->wqe_last, part + 2,
4098 MLX5_OPCODE_ENHANCED_MPSW, olx);
4099 mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
4100 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4101 eseg = &loc->wqe_last->eseg;
4102 dseg = &loc->wqe_last->dseg[0];
4103 loop = part;
4104 /* Store the packet length for legacy MPW. */
4105 if (MLX5_TXOFF_CONFIG(MPW))
4106 eseg->mss = rte_cpu_to_be_16
4107 (rte_pktmbuf_data_len(loc->mbuf));
4108 for (;;) {
4109 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4110 #ifdef MLX5_PMD_SOFT_COUNTERS
4111 /* Update sent data bytes counter. */
4112 slen += dlen;
4113 #endif
4114 mlx5_tx_dseg_ptr
4115 (txq, loc, dseg,
4116 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4117 dlen, olx);
4118 if (unlikely(--loop == 0))
4119 break;
4120 loc->mbuf = *pkts++;
4121 if (likely(loop > 1))
4122 rte_prefetch0(*pkts);
4123 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4124 /*
4125 * Unroll the completion code to avoid
4126 * returning variable value - it results in
4127 * unoptimized sequent checking in caller.
4128 */
4129 if (ret == MLX5_TXCMP_CODE_MULTI) {
4130 part -= loop;
4131 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4132 if (unlikely(!loc->elts_free ||
4133 !loc->wqe_free))
4134 return MLX5_TXCMP_CODE_EXIT;
4135 return MLX5_TXCMP_CODE_MULTI;
4136 }
4137 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4138 if (ret == MLX5_TXCMP_CODE_TSO) {
4139 part -= loop;
4140 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4141 if (unlikely(!loc->elts_free ||
4142 !loc->wqe_free))
4143 return MLX5_TXCMP_CODE_EXIT;
4144 return MLX5_TXCMP_CODE_TSO;
4145 }
4146 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4147 part -= loop;
4148 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4149 if (unlikely(!loc->elts_free ||
4150 !loc->wqe_free))
4151 return MLX5_TXCMP_CODE_EXIT;
4152 return MLX5_TXCMP_CODE_SINGLE;
4153 }
4154 if (ret != MLX5_TXCMP_CODE_EMPW) {
4155 MLX5_ASSERT(false);
4156 part -= loop;
4157 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4158 return MLX5_TXCMP_CODE_ERROR;
4159 }
4160 /*
4161 * Check whether packet parameters coincide
4162 * within assumed eMPW batch:
4163 * - check sum settings
4164 * - metadata value
4165 * - software parser settings
4166 * - packets length (legacy MPW only)
4167 */
4168 if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
4169 MLX5_ASSERT(loop);
4170 part -= loop;
4171 mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
4172 if (unlikely(!loc->elts_free ||
4173 !loc->wqe_free))
4174 return MLX5_TXCMP_CODE_EXIT;
4175 pkts_n -= part;
4176 goto next_empw;
4177 }
4178 /* Packet attributes match, continue the same eMPW. */
4179 ++dseg;
4180 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4181 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4182 }
4183 /* eMPW is built successfully, update loop parameters. */
4184 MLX5_ASSERT(!loop);
4185 MLX5_ASSERT(pkts_n >= part);
4186 #ifdef MLX5_PMD_SOFT_COUNTERS
4187 /* Update sent data bytes counter. */
4188 txq->stats.obytes += slen;
4189 #endif
4190 loc->elts_free -= part;
4191 loc->pkts_sent += part;
4192 txq->wqe_ci += (2 + part + 3) / 4;
4193 loc->wqe_free -= (2 + part + 3) / 4;
4194 pkts_n -= part;
4195 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4196 return MLX5_TXCMP_CODE_EXIT;
4197 loc->mbuf = *pkts++;
4198 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4199 if (unlikely(ret != MLX5_TXCMP_CODE_EMPW))
4200 return ret;
4201 /* Continue sending eMPW batches. */
4202 }
4203 MLX5_ASSERT(false);
4204 }
4205
4206 /**
4207 * The routine sends packets with MLX5_OPCODE_EMPW
4208 * with inlining, optionally supports VLAN insertion.
4209 */
4210 static __rte_always_inline enum mlx5_txcmp_code
4211 mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
4212 struct rte_mbuf **restrict pkts,
4213 unsigned int pkts_n,
4214 struct mlx5_txq_local *restrict loc,
4215 unsigned int olx)
4216 {
4217 /*
4218 * Subroutine is the part of mlx5_tx_burst_single()
4219 * and sends single-segment packet with eMPW opcode
4220 * with data inlining.
4221 */
4222 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4223 MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
4224 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4225 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4226 static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
4227 pkts += loc->pkts_sent + 1;
4228 pkts_n -= loc->pkts_sent;
4229 for (;;) {
4230 struct mlx5_wqe_dseg *restrict dseg;
4231 struct mlx5_wqe *restrict wqem;
4232 enum mlx5_txcmp_code ret;
4233 unsigned int room, part, nlim;
4234 unsigned int slen = 0;
4235
4236 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4237 /*
4238 * Limits the amount of packets in one WQE
4239 * to improve CQE latency generation.
4240 */
4241 nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
4242 MLX5_MPW_INLINE_MAX_PACKETS :
4243 MLX5_EMPW_MAX_PACKETS);
4244 /* Check whether we have minimal amount WQEs */
4245 if (unlikely(loc->wqe_free <
4246 ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
4247 return MLX5_TXCMP_CODE_EXIT;
4248 if (likely(pkts_n > 1))
4249 rte_prefetch0(*pkts);
4250 wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4251 /*
4252 * Build eMPW title WQEBB:
4253 * - Control Segment, eMPW opcode, zero DS
4254 * - Ethernet Segment, no inline
4255 */
4256 mlx5_tx_cseg_init(txq, loc, wqem, 0,
4257 MLX5_OPCODE_ENHANCED_MPSW, olx);
4258 mlx5_tx_eseg_none(txq, loc, wqem,
4259 olx & ~MLX5_TXOFF_CONFIG_VLAN);
4260 dseg = &wqem->dseg[0];
4261 /* Store the packet length for legacy MPW. */
4262 if (MLX5_TXOFF_CONFIG(MPW))
4263 wqem->eseg.mss = rte_cpu_to_be_16
4264 (rte_pktmbuf_data_len(loc->mbuf));
4265 room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
4266 loc->wqe_free) * MLX5_WQE_SIZE -
4267 MLX5_WQE_CSEG_SIZE -
4268 MLX5_WQE_ESEG_SIZE;
4269 /* Limit the room for legacy MPW sessions for performance. */
4270 if (MLX5_TXOFF_CONFIG(MPW))
4271 room = RTE_MIN(room,
4272 RTE_MAX(txq->inlen_empw +
4273 sizeof(dseg->bcount) +
4274 (MLX5_TXOFF_CONFIG(VLAN) ?
4275 sizeof(struct rte_vlan_hdr) : 0),
4276 MLX5_MPW_INLINE_MAX_PACKETS *
4277 MLX5_WQE_DSEG_SIZE));
4278 /* Build WQE till we have space, packets and resources. */
4279 part = room;
4280 for (;;) {
4281 uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
4282 uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
4283 unsigned int tlen;
4284
4285 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4286 MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
4287 MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
4288 /*
4289 * Some Tx offloads may cause an error if
4290 * packet is not long enough, check against
4291 * assumed minimal length.
4292 */
4293 if (unlikely(dlen <= MLX5_ESEG_MIN_INLINE_SIZE)) {
4294 part -= room;
4295 if (unlikely(!part))
4296 return MLX5_TXCMP_CODE_ERROR;
4297 /*
4298 * We have some successfully built
4299 * packet Data Segments to send.
4300 */
4301 mlx5_tx_idone_empw(txq, loc, part,
4302 slen, wqem, olx);
4303 return MLX5_TXCMP_CODE_ERROR;
4304 }
4305 /* Inline or not inline - that's the Question. */
4306 if (dlen > txq->inlen_empw ||
4307 loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
4308 goto pointer_empw;
4309 if (MLX5_TXOFF_CONFIG(MPW)) {
4310 if (dlen > txq->inlen_send)
4311 goto pointer_empw;
4312 tlen = dlen;
4313 if (part == room) {
4314 /* Open new inline MPW session. */
4315 tlen += sizeof(dseg->bcount);
4316 dseg->bcount = RTE_BE32(0);
4317 dseg = RTE_PTR_ADD
4318 (dseg, sizeof(dseg->bcount));
4319 } else {
4320 /*
4321 * No pointer and inline descriptor
4322 * intermix for legacy MPW sessions.
4323 */
4324 if (wqem->dseg[0].bcount)
4325 break;
4326 }
4327 } else {
4328 tlen = sizeof(dseg->bcount) + dlen;
4329 }
4330 /* Inline entire packet, optional VLAN insertion. */
4331 if (MLX5_TXOFF_CONFIG(VLAN) &&
4332 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4333 /*
4334 * The packet length must be checked in
4335 * mlx5_tx_able_to_empw() and packet
4336 * fits into inline length guaranteed.
4337 */
4338 MLX5_ASSERT((dlen +
4339 sizeof(struct rte_vlan_hdr)) <=
4340 txq->inlen_empw);
4341 tlen += sizeof(struct rte_vlan_hdr);
4342 if (room < tlen)
4343 break;
4344 dseg = mlx5_tx_dseg_vlan(txq, loc, dseg,
4345 dptr, dlen, olx);
4346 #ifdef MLX5_PMD_SOFT_COUNTERS
4347 /* Update sent data bytes counter. */
4348 slen += sizeof(struct rte_vlan_hdr);
4349 #endif
4350 } else {
4351 if (room < tlen)
4352 break;
4353 dseg = mlx5_tx_dseg_empw(txq, loc, dseg,
4354 dptr, dlen, olx);
4355 }
4356 if (!MLX5_TXOFF_CONFIG(MPW))
4357 tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
4358 MLX5_ASSERT(room >= tlen);
4359 room -= tlen;
4360 /*
4361 * Packet data are completely inlined,
4362 * free the packet immediately.
4363 */
4364 rte_pktmbuf_free_seg(loc->mbuf);
4365 goto next_mbuf;
4366 pointer_empw:
4367 /*
4368 * No pointer and inline descriptor
4369 * intermix for legacy MPW sessions.
4370 */
4371 if (MLX5_TXOFF_CONFIG(MPW) &&
4372 part != room &&
4373 wqem->dseg[0].bcount == RTE_BE32(0))
4374 break;
4375 /*
4376 * Not inlinable VLAN packets are
4377 * proceeded outside of this routine.
4378 */
4379 MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
4380 if (MLX5_TXOFF_CONFIG(VLAN))
4381 MLX5_ASSERT(!(loc->mbuf->ol_flags &
4382 PKT_TX_VLAN_PKT));
4383 mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
4384 /* We have to store mbuf in elts.*/
4385 txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
4386 room -= MLX5_WQE_DSEG_SIZE;
4387 /* Ring buffer wraparound is checked at the loop end.*/
4388 ++dseg;
4389 next_mbuf:
4390 #ifdef MLX5_PMD_SOFT_COUNTERS
4391 /* Update sent data bytes counter. */
4392 slen += dlen;
4393 #endif
4394 loc->pkts_sent++;
4395 loc->elts_free--;
4396 pkts_n--;
4397 if (unlikely(!pkts_n || !loc->elts_free)) {
4398 /*
4399 * We have no resources/packets to
4400 * continue build descriptors.
4401 */
4402 part -= room;
4403 mlx5_tx_idone_empw(txq, loc, part,
4404 slen, wqem, olx);
4405 return MLX5_TXCMP_CODE_EXIT;
4406 }
4407 loc->mbuf = *pkts++;
4408 if (likely(pkts_n > 1))
4409 rte_prefetch0(*pkts);
4410 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4411 /*
4412 * Unroll the completion code to avoid
4413 * returning variable value - it results in
4414 * unoptimized sequent checking in caller.
4415 */
4416 if (ret == MLX5_TXCMP_CODE_MULTI) {
4417 part -= room;
4418 mlx5_tx_idone_empw(txq, loc, part,
4419 slen, wqem, olx);
4420 if (unlikely(!loc->elts_free ||
4421 !loc->wqe_free))
4422 return MLX5_TXCMP_CODE_EXIT;
4423 return MLX5_TXCMP_CODE_MULTI;
4424 }
4425 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4426 if (ret == MLX5_TXCMP_CODE_TSO) {
4427 part -= room;
4428 mlx5_tx_idone_empw(txq, loc, part,
4429 slen, wqem, olx);
4430 if (unlikely(!loc->elts_free ||
4431 !loc->wqe_free))
4432 return MLX5_TXCMP_CODE_EXIT;
4433 return MLX5_TXCMP_CODE_TSO;
4434 }
4435 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4436 part -= room;
4437 mlx5_tx_idone_empw(txq, loc, part,
4438 slen, wqem, olx);
4439 if (unlikely(!loc->elts_free ||
4440 !loc->wqe_free))
4441 return MLX5_TXCMP_CODE_EXIT;
4442 return MLX5_TXCMP_CODE_SINGLE;
4443 }
4444 if (ret != MLX5_TXCMP_CODE_EMPW) {
4445 MLX5_ASSERT(false);
4446 part -= room;
4447 mlx5_tx_idone_empw(txq, loc, part,
4448 slen, wqem, olx);
4449 return MLX5_TXCMP_CODE_ERROR;
4450 }
4451 /* Check if we have minimal room left. */
4452 nlim--;
4453 if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
4454 break;
4455 /*
4456 * Check whether packet parameters coincide
4457 * within assumed eMPW batch:
4458 * - check sum settings
4459 * - metadata value
4460 * - software parser settings
4461 * - packets length (legacy MPW only)
4462 */
4463 if (!mlx5_tx_match_empw(txq, &wqem->eseg,
4464 loc, dlen, olx))
4465 break;
4466 /* Packet attributes match, continue the same eMPW. */
4467 if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
4468 dseg = (struct mlx5_wqe_dseg *)txq->wqes;
4469 }
4470 /*
4471 * We get here to close an existing eMPW
4472 * session and start the new one.
4473 */
4474 MLX5_ASSERT(pkts_n);
4475 part -= room;
4476 if (unlikely(!part))
4477 return MLX5_TXCMP_CODE_EXIT;
4478 mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
4479 if (unlikely(!loc->elts_free ||
4480 !loc->wqe_free))
4481 return MLX5_TXCMP_CODE_EXIT;
4482 /* Continue the loop with new eMPW session. */
4483 }
4484 MLX5_ASSERT(false);
4485 }
4486
4487 /**
4488 * The routine sends packets with ordinary MLX5_OPCODE_SEND.
4489 * Data inlining and VLAN insertion are supported.
4490 */
4491 static __rte_always_inline enum mlx5_txcmp_code
4492 mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
4493 struct rte_mbuf **restrict pkts,
4494 unsigned int pkts_n,
4495 struct mlx5_txq_local *restrict loc,
4496 unsigned int olx)
4497 {
4498 /*
4499 * Subroutine is the part of mlx5_tx_burst_single()
4500 * and sends single-segment packet with SEND opcode.
4501 */
4502 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4503 MLX5_ASSERT(pkts_n > loc->pkts_sent);
4504 pkts += loc->pkts_sent + 1;
4505 pkts_n -= loc->pkts_sent;
4506 for (;;) {
4507 struct mlx5_wqe *restrict wqe;
4508 enum mlx5_txcmp_code ret;
4509
4510 MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
4511 if (MLX5_TXOFF_CONFIG(INLINE)) {
4512 unsigned int inlen, vlan = 0;
4513
4514 inlen = rte_pktmbuf_data_len(loc->mbuf);
4515 if (MLX5_TXOFF_CONFIG(VLAN) &&
4516 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
4517 vlan = sizeof(struct rte_vlan_hdr);
4518 inlen += vlan;
4519 static_assert((sizeof(struct rte_vlan_hdr) +
4520 sizeof(struct rte_ether_hdr)) ==
4521 MLX5_ESEG_MIN_INLINE_SIZE,
4522 "invalid min inline data size");
4523 }
4524 /*
4525 * If inlining is enabled at configuration time
4526 * the limit must be not less than minimal size.
4527 * Otherwise we would do extra check for data
4528 * size to avoid crashes due to length overflow.
4529 */
4530 MLX5_ASSERT(txq->inlen_send >=
4531 MLX5_ESEG_MIN_INLINE_SIZE);
4532 if (inlen <= txq->inlen_send) {
4533 unsigned int seg_n, wqe_n;
4534
4535 rte_prefetch0(rte_pktmbuf_mtod
4536 (loc->mbuf, uint8_t *));
4537 /* Check against minimal length. */
4538 if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
4539 return MLX5_TXCMP_CODE_ERROR;
4540 if (loc->mbuf->ol_flags &
4541 PKT_TX_DYNF_NOINLINE) {
4542 /*
4543 * The hint flag not to inline packet
4544 * data is set. Check whether we can
4545 * follow the hint.
4546 */
4547 if ((!MLX5_TXOFF_CONFIG(EMPW) &&
4548 txq->inlen_mode) ||
4549 (MLX5_TXOFF_CONFIG(MPW) &&
4550 txq->inlen_mode)) {
4551 /*
4552 * The hardware requires the
4553 * minimal inline data header.
4554 */
4555 goto single_min_inline;
4556 }
4557 if (MLX5_TXOFF_CONFIG(VLAN) &&
4558 vlan && !txq->vlan_en) {
4559 /*
4560 * We must insert VLAN tag
4561 * by software means.
4562 */
4563 goto single_part_inline;
4564 }
4565 goto single_no_inline;
4566 }
4567 /*
4568 * Completely inlined packet data WQE:
4569 * - Control Segment, SEND opcode
4570 * - Ethernet Segment, no VLAN insertion
4571 * - Data inlined, VLAN optionally inserted
4572 * - Alignment to MLX5_WSEG_SIZE
4573 * Have to estimate amount of WQEBBs
4574 */
4575 seg_n = (inlen + 3 * MLX5_WSEG_SIZE -
4576 MLX5_ESEG_MIN_INLINE_SIZE +
4577 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4578 /* Check if there are enough WQEBBs. */
4579 wqe_n = (seg_n + 3) / 4;
4580 if (wqe_n > loc->wqe_free)
4581 return MLX5_TXCMP_CODE_EXIT;
4582 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4583 loc->wqe_last = wqe;
4584 mlx5_tx_cseg_init(txq, loc, wqe, seg_n,
4585 MLX5_OPCODE_SEND, olx);
4586 mlx5_tx_eseg_data(txq, loc, wqe,
4587 vlan, inlen, 0, olx);
4588 txq->wqe_ci += wqe_n;
4589 loc->wqe_free -= wqe_n;
4590 /*
4591 * Packet data are completely inlined,
4592 * free the packet immediately.
4593 */
4594 rte_pktmbuf_free_seg(loc->mbuf);
4595 } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
4596 MLX5_TXOFF_CONFIG(MPW)) &&
4597 txq->inlen_mode) {
4598 /*
4599 * If minimal inlining is requested the eMPW
4600 * feature should be disabled due to data is
4601 * inlined into Ethernet Segment, which can
4602 * not contain inlined data for eMPW due to
4603 * segment shared for all packets.
4604 */
4605 struct mlx5_wqe_dseg *restrict dseg;
4606 unsigned int ds;
4607 uint8_t *dptr;
4608
4609 /*
4610 * The inline-mode settings require
4611 * to inline the specified amount of
4612 * data bytes to the Ethernet Segment.
4613 * We should check the free space in
4614 * WQE ring buffer to inline partially.
4615 */
4616 single_min_inline:
4617 MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
4618 MLX5_ASSERT(inlen > txq->inlen_mode);
4619 MLX5_ASSERT(txq->inlen_mode >=
4620 MLX5_ESEG_MIN_INLINE_SIZE);
4621 /*
4622 * Check whether there are enough free WQEBBs:
4623 * - Control Segment
4624 * - Ethernet Segment
4625 * - First Segment of inlined Ethernet data
4626 * - ... data continued ...
4627 * - Finishing Data Segment of pointer type
4628 */
4629 ds = (MLX5_WQE_CSEG_SIZE +
4630 MLX5_WQE_ESEG_SIZE +
4631 MLX5_WQE_DSEG_SIZE +
4632 txq->inlen_mode -
4633 MLX5_ESEG_MIN_INLINE_SIZE +
4634 MLX5_WQE_DSEG_SIZE +
4635 MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
4636 if (loc->wqe_free < ((ds + 3) / 4))
4637 return MLX5_TXCMP_CODE_EXIT;
4638 /*
4639 * Build the ordinary SEND WQE:
4640 * - Control Segment
4641 * - Ethernet Segment, inline inlen_mode bytes
4642 * - Data Segment of pointer type
4643 */
4644 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4645 loc->wqe_last = wqe;
4646 mlx5_tx_cseg_init(txq, loc, wqe, ds,
4647 MLX5_OPCODE_SEND, olx);
4648 dseg = mlx5_tx_eseg_data(txq, loc, wqe, vlan,
4649 txq->inlen_mode,
4650 0, olx);
4651 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4652 txq->inlen_mode - vlan;
4653 inlen -= txq->inlen_mode;
4654 mlx5_tx_dseg_ptr(txq, loc, dseg,
4655 dptr, inlen, olx);
4656 /*
4657 * WQE is built, update the loop parameters
4658 * and got to the next packet.
4659 */
4660 txq->wqe_ci += (ds + 3) / 4;
4661 loc->wqe_free -= (ds + 3) / 4;
4662 /* We have to store mbuf in elts.*/
4663 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4664 txq->elts[txq->elts_head++ & txq->elts_m] =
4665 loc->mbuf;
4666 --loc->elts_free;
4667 } else {
4668 uint8_t *dptr;
4669 unsigned int dlen;
4670
4671 /*
4672 * Partially inlined packet data WQE, we have
4673 * some space in title WQEBB, we can fill it
4674 * with some packet data. It takes one WQEBB,
4675 * it is available, no extra space check:
4676 * - Control Segment, SEND opcode
4677 * - Ethernet Segment, no VLAN insertion
4678 * - MLX5_ESEG_MIN_INLINE_SIZE bytes of Data
4679 * - Data Segment, pointer type
4680 *
4681 * We also get here if VLAN insertion is not
4682 * supported by HW, the inline is enabled.
4683 */
4684 single_part_inline:
4685 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4686 loc->wqe_last = wqe;
4687 mlx5_tx_cseg_init(txq, loc, wqe, 4,
4688 MLX5_OPCODE_SEND, olx);
4689 mlx5_tx_eseg_dmin(txq, loc, wqe, vlan, olx);
4690 dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *) +
4691 MLX5_ESEG_MIN_INLINE_SIZE - vlan;
4692 /*
4693 * The length check is performed above, by
4694 * comparing with txq->inlen_send. We should
4695 * not get overflow here.
4696 */
4697 MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
4698 dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
4699 mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
4700 dptr, dlen, olx);
4701 ++txq->wqe_ci;
4702 --loc->wqe_free;
4703 /* We have to store mbuf in elts.*/
4704 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
4705 txq->elts[txq->elts_head++ & txq->elts_m] =
4706 loc->mbuf;
4707 --loc->elts_free;
4708 }
4709 #ifdef MLX5_PMD_SOFT_COUNTERS
4710 /* Update sent data bytes counter. */
4711 txq->stats.obytes += vlan +
4712 rte_pktmbuf_data_len(loc->mbuf);
4713 #endif
4714 } else {
4715 /*
4716 * No inline at all, it means the CPU cycles saving
4717 * is prioritized at configuration, we should not
4718 * copy any packet data to WQE.
4719 *
4720 * SEND WQE, one WQEBB:
4721 * - Control Segment, SEND opcode
4722 * - Ethernet Segment, optional VLAN, no inline
4723 * - Data Segment, pointer type
4724 */
4725 single_no_inline:
4726 wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
4727 loc->wqe_last = wqe;
4728 mlx5_tx_cseg_init(txq, loc, wqe, 3,
4729 MLX5_OPCODE_SEND, olx);
4730 mlx5_tx_eseg_none(txq, loc, wqe, olx);
4731 mlx5_tx_dseg_ptr
4732 (txq, loc, &wqe->dseg[0],
4733 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
4734 rte_pktmbuf_data_len(loc->mbuf), olx);
4735 ++txq->wqe_ci;
4736 --loc->wqe_free;
4737 /*
4738 * We should not store mbuf pointer in elts
4739 * if no inlining is configured, this is done
4740 * by calling routine in a batch copy.
4741 */
4742 MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
4743 --loc->elts_free;
4744 #ifdef MLX5_PMD_SOFT_COUNTERS
4745 /* Update sent data bytes counter. */
4746 txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
4747 if (MLX5_TXOFF_CONFIG(VLAN) &&
4748 loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
4749 txq->stats.obytes +=
4750 sizeof(struct rte_vlan_hdr);
4751 #endif
4752 }
4753 ++loc->pkts_sent;
4754 --pkts_n;
4755 if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
4756 return MLX5_TXCMP_CODE_EXIT;
4757 loc->mbuf = *pkts++;
4758 if (pkts_n > 1)
4759 rte_prefetch0(*pkts);
4760 ret = mlx5_tx_able_to_empw(txq, loc, olx, true);
4761 if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
4762 return ret;
4763 }
4764 MLX5_ASSERT(false);
4765 }
4766
4767 static __rte_always_inline enum mlx5_txcmp_code
4768 mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
4769 struct rte_mbuf **restrict pkts,
4770 unsigned int pkts_n,
4771 struct mlx5_txq_local *restrict loc,
4772 unsigned int olx)
4773 {
4774 enum mlx5_txcmp_code ret;
4775
4776 ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
4777 if (ret == MLX5_TXCMP_CODE_SINGLE)
4778 goto ordinary_send;
4779 MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
4780 for (;;) {
4781 /* Optimize for inline/no inline eMPW send. */
4782 ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
4783 mlx5_tx_burst_empw_inline
4784 (txq, pkts, pkts_n, loc, olx) :
4785 mlx5_tx_burst_empw_simple
4786 (txq, pkts, pkts_n, loc, olx);
4787 if (ret != MLX5_TXCMP_CODE_SINGLE)
4788 return ret;
4789 /* The resources to send one packet should remain. */
4790 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4791 ordinary_send:
4792 ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
4793 MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
4794 if (ret != MLX5_TXCMP_CODE_EMPW)
4795 return ret;
4796 /* The resources to send one packet should remain. */
4797 MLX5_ASSERT(loc->elts_free && loc->wqe_free);
4798 }
4799 }
4800
4801 /**
4802 * DPDK Tx callback template. This is configured template
4803 * used to generate routines optimized for specified offload setup.
4804 * One of this generated functions is chosen at SQ configuration
4805 * time.
4806 *
4807 * @param txq
4808 * Generic pointer to TX queue structure.
4809 * @param[in] pkts
4810 * Packets to transmit.
4811 * @param pkts_n
4812 * Number of packets in array.
4813 * @param olx
4814 * Configured offloads mask, presents the bits of MLX5_TXOFF_CONFIG_xxx
4815 * values. Should be static to take compile time static configuration
4816 * advantages.
4817 *
4818 * @return
4819 * Number of packets successfully transmitted (<= pkts_n).
4820 */
4821 static __rte_always_inline uint16_t
4822 mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
4823 struct rte_mbuf **restrict pkts,
4824 uint16_t pkts_n,
4825 unsigned int olx)
4826 {
4827 struct mlx5_txq_local loc;
4828 enum mlx5_txcmp_code ret;
4829 unsigned int part;
4830
4831 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4832 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4833 if (unlikely(!pkts_n))
4834 return 0;
4835 loc.pkts_sent = 0;
4836 loc.pkts_copy = 0;
4837 loc.wqe_last = NULL;
4838
4839 send_loop:
4840 loc.pkts_loop = loc.pkts_sent;
4841 /*
4842 * Check if there are some CQEs, if any:
4843 * - process an encountered errors
4844 * - process the completed WQEs
4845 * - free related mbufs
4846 * - doorbell the NIC about processed CQEs
4847 */
4848 rte_prefetch0(*(pkts + loc.pkts_sent));
4849 mlx5_tx_handle_completion(txq, olx);
4850 /*
4851 * Calculate the number of available resources - elts and WQEs.
4852 * There are two possible different scenarios:
4853 * - no data inlining into WQEs, one WQEBB may contains up to
4854 * four packets, in this case elts become scarce resource
4855 * - data inlining into WQEs, one packet may require multiple
4856 * WQEBBs, the WQEs become the limiting factor.
4857 */
4858 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
4859 loc.elts_free = txq->elts_s -
4860 (uint16_t)(txq->elts_head - txq->elts_tail);
4861 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
4862 loc.wqe_free = txq->wqe_s -
4863 (uint16_t)(txq->wqe_ci - txq->wqe_pi);
4864 if (unlikely(!loc.elts_free || !loc.wqe_free))
4865 goto burst_exit;
4866 for (;;) {
4867 /*
4868 * Fetch the packet from array. Usually this is
4869 * the first packet in series of multi/single
4870 * segment packets.
4871 */
4872 loc.mbuf = *(pkts + loc.pkts_sent);
4873 /* Dedicated branch for multi-segment packets. */
4874 if (MLX5_TXOFF_CONFIG(MULTI) &&
4875 unlikely(NB_SEGS(loc.mbuf) > 1)) {
4876 /*
4877 * Multi-segment packet encountered.
4878 * Hardware is able to process it only
4879 * with SEND/TSO opcodes, one packet
4880 * per WQE, do it in dedicated routine.
4881 */
4882 enter_send_multi:
4883 MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
4884 part = loc.pkts_sent - loc.pkts_copy;
4885 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
4886 /*
4887 * There are some single-segment mbufs not
4888 * stored in elts. The mbufs must be in the
4889 * same order as WQEs, so we must copy the
4890 * mbufs to elts here, before the coming
4891 * multi-segment packet mbufs is appended.
4892 */
4893 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy,
4894 part, olx);
4895 loc.pkts_copy = loc.pkts_sent;
4896 }
4897 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4898 ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
4899 if (!MLX5_TXOFF_CONFIG(INLINE))
4900 loc.pkts_copy = loc.pkts_sent;
4901 /*
4902 * These returned code checks are supposed
4903 * to be optimized out due to routine inlining.
4904 */
4905 if (ret == MLX5_TXCMP_CODE_EXIT) {
4906 /*
4907 * The routine returns this code when
4908 * all packets are sent or there is no
4909 * enough resources to complete request.
4910 */
4911 break;
4912 }
4913 if (ret == MLX5_TXCMP_CODE_ERROR) {
4914 /*
4915 * The routine returns this code when
4916 * some error in the incoming packets
4917 * format occurred.
4918 */
4919 txq->stats.oerrors++;
4920 break;
4921 }
4922 if (ret == MLX5_TXCMP_CODE_SINGLE) {
4923 /*
4924 * The single-segment packet was encountered
4925 * in the array, try to send it with the
4926 * best optimized way, possible engaging eMPW.
4927 */
4928 goto enter_send_single;
4929 }
4930 if (MLX5_TXOFF_CONFIG(TSO) &&
4931 ret == MLX5_TXCMP_CODE_TSO) {
4932 /*
4933 * The single-segment TSO packet was
4934 * encountered in the array.
4935 */
4936 goto enter_send_tso;
4937 }
4938 /* We must not get here. Something is going wrong. */
4939 MLX5_ASSERT(false);
4940 txq->stats.oerrors++;
4941 break;
4942 }
4943 /* Dedicated branch for single-segment TSO packets. */
4944 if (MLX5_TXOFF_CONFIG(TSO) &&
4945 unlikely(loc.mbuf->ol_flags & PKT_TX_TCP_SEG)) {
4946 /*
4947 * TSO might require special way for inlining
4948 * (dedicated parameters) and is sent with
4949 * MLX5_OPCODE_TSO opcode only, provide this
4950 * in dedicated branch.
4951 */
4952 enter_send_tso:
4953 MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
4954 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4955 ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
4956 /*
4957 * These returned code checks are supposed
4958 * to be optimized out due to routine inlining.
4959 */
4960 if (ret == MLX5_TXCMP_CODE_EXIT)
4961 break;
4962 if (ret == MLX5_TXCMP_CODE_ERROR) {
4963 txq->stats.oerrors++;
4964 break;
4965 }
4966 if (ret == MLX5_TXCMP_CODE_SINGLE)
4967 goto enter_send_single;
4968 if (MLX5_TXOFF_CONFIG(MULTI) &&
4969 ret == MLX5_TXCMP_CODE_MULTI) {
4970 /*
4971 * The multi-segment packet was
4972 * encountered in the array.
4973 */
4974 goto enter_send_multi;
4975 }
4976 /* We must not get here. Something is going wrong. */
4977 MLX5_ASSERT(false);
4978 txq->stats.oerrors++;
4979 break;
4980 }
4981 /*
4982 * The dedicated branch for the single-segment packets
4983 * without TSO. Often these ones can be sent using
4984 * MLX5_OPCODE_EMPW with multiple packets in one WQE.
4985 * The routine builds the WQEs till it encounters
4986 * the TSO or multi-segment packet (in case if these
4987 * offloads are requested at SQ configuration time).
4988 */
4989 enter_send_single:
4990 MLX5_ASSERT(pkts_n > loc.pkts_sent);
4991 ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
4992 /*
4993 * These returned code checks are supposed
4994 * to be optimized out due to routine inlining.
4995 */
4996 if (ret == MLX5_TXCMP_CODE_EXIT)
4997 break;
4998 if (ret == MLX5_TXCMP_CODE_ERROR) {
4999 txq->stats.oerrors++;
5000 break;
5001 }
5002 if (MLX5_TXOFF_CONFIG(MULTI) &&
5003 ret == MLX5_TXCMP_CODE_MULTI) {
5004 /*
5005 * The multi-segment packet was
5006 * encountered in the array.
5007 */
5008 goto enter_send_multi;
5009 }
5010 if (MLX5_TXOFF_CONFIG(TSO) &&
5011 ret == MLX5_TXCMP_CODE_TSO) {
5012 /*
5013 * The single-segment TSO packet was
5014 * encountered in the array.
5015 */
5016 goto enter_send_tso;
5017 }
5018 /* We must not get here. Something is going wrong. */
5019 MLX5_ASSERT(false);
5020 txq->stats.oerrors++;
5021 break;
5022 }
5023 /*
5024 * Main Tx loop is completed, do the rest:
5025 * - set completion request if thresholds are reached
5026 * - doorbell the hardware
5027 * - copy the rest of mbufs to elts (if any)
5028 */
5029 MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
5030 loc.pkts_sent >= loc.pkts_copy);
5031 /* Take a shortcut if nothing is sent. */
5032 if (unlikely(loc.pkts_sent == loc.pkts_loop))
5033 goto burst_exit;
5034 /* Request CQE generation if limits are reached. */
5035 mlx5_tx_request_completion(txq, &loc, olx);
5036 /*
5037 * Ring QP doorbell immediately after WQE building completion
5038 * to improve latencies. The pure software related data treatment
5039 * can be completed after doorbell. Tx CQEs for this SQ are
5040 * processed in this thread only by the polling.
5041 *
5042 * The rdma core library can map doorbell register in two ways,
5043 * depending on the environment variable "MLX5_SHUT_UP_BF":
5044 *
5045 * - as regular cached memory, the variable is either missing or
5046 * set to zero. This type of mapping may cause the significant
5047 * doorbell register writing latency and requires explicit
5048 * memory write barrier to mitigate this issue and prevent
5049 * write combining.
5050 *
5051 * - as non-cached memory, the variable is present and set to
5052 * not "0" value. This type of mapping may cause performance
5053 * impact under heavy loading conditions but the explicit write
5054 * memory barrier is not required and it may improve core
5055 * performance.
5056 *
5057 * - the legacy behaviour (prior 19.08 release) was to use some
5058 * heuristics to decide whether write memory barrier should
5059 * be performed. This behavior is supported with specifying
5060 * tx_db_nc=2, write barrier is skipped if application
5061 * provides the full recommended burst of packets, it
5062 * supposes the next packets are coming and the write barrier
5063 * will be issued on the next burst (after descriptor writing,
5064 * at least).
5065 */
5066 mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
5067 (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
5068 /* Not all of the mbufs may be stored into elts yet. */
5069 part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
5070 if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
5071 /*
5072 * There are some single-segment mbufs not stored in elts.
5073 * It can be only if the last packet was single-segment.
5074 * The copying is gathered into one place due to it is
5075 * a good opportunity to optimize that with SIMD.
5076 * Unfortunately if inlining is enabled the gaps in
5077 * pointer array may happen due to early freeing of the
5078 * inlined mbufs.
5079 */
5080 mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
5081 loc.pkts_copy = loc.pkts_sent;
5082 }
5083 MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
5084 MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
5085 if (pkts_n > loc.pkts_sent) {
5086 /*
5087 * If burst size is large there might be no enough CQE
5088 * fetched from completion queue and no enough resources
5089 * freed to send all the packets.
5090 */
5091 goto send_loop;
5092 }
5093 burst_exit:
5094 #ifdef MLX5_PMD_SOFT_COUNTERS
5095 /* Increment sent packets counter. */
5096 txq->stats.opackets += loc.pkts_sent;
5097 #endif
5098 return loc.pkts_sent;
5099 }
5100
5101 /* Generate routines with Enhanced Multi-Packet Write support. */
5102 MLX5_TXOFF_DECL(full_empw,
5103 MLX5_TXOFF_CONFIG_FULL | MLX5_TXOFF_CONFIG_EMPW)
5104
5105 MLX5_TXOFF_DECL(none_empw,
5106 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5107
5108 MLX5_TXOFF_DECL(md_empw,
5109 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5110
5111 MLX5_TXOFF_DECL(mt_empw,
5112 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5113 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5114
5115 MLX5_TXOFF_DECL(mtsc_empw,
5116 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5117 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5118 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5119
5120 MLX5_TXOFF_DECL(mti_empw,
5121 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5122 MLX5_TXOFF_CONFIG_INLINE |
5123 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5124
5125 MLX5_TXOFF_DECL(mtv_empw,
5126 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5127 MLX5_TXOFF_CONFIG_VLAN |
5128 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5129
5130 MLX5_TXOFF_DECL(mtiv_empw,
5131 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5132 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5133 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5134
5135 MLX5_TXOFF_DECL(sc_empw,
5136 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5137 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5138
5139 MLX5_TXOFF_DECL(sci_empw,
5140 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5141 MLX5_TXOFF_CONFIG_INLINE |
5142 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5143
5144 MLX5_TXOFF_DECL(scv_empw,
5145 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5146 MLX5_TXOFF_CONFIG_VLAN |
5147 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5148
5149 MLX5_TXOFF_DECL(sciv_empw,
5150 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5151 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5152 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5153
5154 MLX5_TXOFF_DECL(i_empw,
5155 MLX5_TXOFF_CONFIG_INLINE |
5156 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5157
5158 MLX5_TXOFF_DECL(v_empw,
5159 MLX5_TXOFF_CONFIG_VLAN |
5160 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5161
5162 MLX5_TXOFF_DECL(iv_empw,
5163 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5164 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5165
5166 /* Generate routines without Enhanced Multi-Packet Write support. */
5167 MLX5_TXOFF_DECL(full,
5168 MLX5_TXOFF_CONFIG_FULL)
5169
5170 MLX5_TXOFF_DECL(none,
5171 MLX5_TXOFF_CONFIG_NONE)
5172
5173 MLX5_TXOFF_DECL(md,
5174 MLX5_TXOFF_CONFIG_METADATA)
5175
5176 MLX5_TXOFF_DECL(mt,
5177 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5178 MLX5_TXOFF_CONFIG_METADATA)
5179
5180 MLX5_TXOFF_DECL(mtsc,
5181 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5182 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5183 MLX5_TXOFF_CONFIG_METADATA)
5184
5185 MLX5_TXOFF_DECL(mti,
5186 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5187 MLX5_TXOFF_CONFIG_INLINE |
5188 MLX5_TXOFF_CONFIG_METADATA)
5189
5190
5191 MLX5_TXOFF_DECL(mtv,
5192 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5193 MLX5_TXOFF_CONFIG_VLAN |
5194 MLX5_TXOFF_CONFIG_METADATA)
5195
5196
5197 MLX5_TXOFF_DECL(mtiv,
5198 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5199 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5200 MLX5_TXOFF_CONFIG_METADATA)
5201
5202 MLX5_TXOFF_DECL(sc,
5203 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5204 MLX5_TXOFF_CONFIG_METADATA)
5205
5206 MLX5_TXOFF_DECL(sci,
5207 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5208 MLX5_TXOFF_CONFIG_INLINE |
5209 MLX5_TXOFF_CONFIG_METADATA)
5210
5211
5212 MLX5_TXOFF_DECL(scv,
5213 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5214 MLX5_TXOFF_CONFIG_VLAN |
5215 MLX5_TXOFF_CONFIG_METADATA)
5216
5217
5218 MLX5_TXOFF_DECL(sciv,
5219 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5220 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5221 MLX5_TXOFF_CONFIG_METADATA)
5222
5223 MLX5_TXOFF_DECL(i,
5224 MLX5_TXOFF_CONFIG_INLINE |
5225 MLX5_TXOFF_CONFIG_METADATA)
5226
5227 MLX5_TXOFF_DECL(v,
5228 MLX5_TXOFF_CONFIG_VLAN |
5229 MLX5_TXOFF_CONFIG_METADATA)
5230
5231 MLX5_TXOFF_DECL(iv,
5232 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5233 MLX5_TXOFF_CONFIG_METADATA)
5234
5235 /*
5236 * Generate routines with Legacy Multi-Packet Write support.
5237 * This mode is supported by ConnectX-4 Lx only and imposes
5238 * offload limitations, not supported:
5239 * - ACL/Flows (metadata are becoming meaningless)
5240 * - WQE Inline headers
5241 * - SRIOV (E-Switch offloads)
5242 * - VLAN insertion
5243 * - tunnel encapsulation/decapsulation
5244 * - TSO
5245 */
5246 MLX5_TXOFF_DECL(none_mpw,
5247 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5248 MLX5_TXOFF_CONFIG_MPW)
5249
5250 MLX5_TXOFF_DECL(mci_mpw,
5251 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5252 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5253 MLX5_TXOFF_CONFIG_MPW)
5254
5255 MLX5_TXOFF_DECL(mc_mpw,
5256 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5257 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5258
5259 MLX5_TXOFF_DECL(i_mpw,
5260 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5261 MLX5_TXOFF_CONFIG_MPW)
5262
5263 /*
5264 * Array of declared and compiled Tx burst function and corresponding
5265 * supported offloads set. The array is used to select the Tx burst
5266 * function for specified offloads set at Tx queue configuration time.
5267 */
5268 const struct {
5269 eth_tx_burst_t func;
5270 unsigned int olx;
5271 } txoff_func[] = {
5272 MLX5_TXOFF_INFO(full_empw,
5273 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5274 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5275 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5276 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5277
5278 MLX5_TXOFF_INFO(none_empw,
5279 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW)
5280
5281 MLX5_TXOFF_INFO(md_empw,
5282 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5283
5284 MLX5_TXOFF_INFO(mt_empw,
5285 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5286 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5287
5288 MLX5_TXOFF_INFO(mtsc_empw,
5289 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5290 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5291 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5292
5293 MLX5_TXOFF_INFO(mti_empw,
5294 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5295 MLX5_TXOFF_CONFIG_INLINE |
5296 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5297
5298 MLX5_TXOFF_INFO(mtv_empw,
5299 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5300 MLX5_TXOFF_CONFIG_VLAN |
5301 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5302
5303 MLX5_TXOFF_INFO(mtiv_empw,
5304 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5305 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5306 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5307
5308 MLX5_TXOFF_INFO(sc_empw,
5309 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5310 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5311
5312 MLX5_TXOFF_INFO(sci_empw,
5313 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5314 MLX5_TXOFF_CONFIG_INLINE |
5315 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5316
5317 MLX5_TXOFF_INFO(scv_empw,
5318 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5319 MLX5_TXOFF_CONFIG_VLAN |
5320 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5321
5322 MLX5_TXOFF_INFO(sciv_empw,
5323 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5324 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5325 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5326
5327 MLX5_TXOFF_INFO(i_empw,
5328 MLX5_TXOFF_CONFIG_INLINE |
5329 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5330
5331 MLX5_TXOFF_INFO(v_empw,
5332 MLX5_TXOFF_CONFIG_VLAN |
5333 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5334
5335 MLX5_TXOFF_INFO(iv_empw,
5336 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5337 MLX5_TXOFF_CONFIG_METADATA | MLX5_TXOFF_CONFIG_EMPW)
5338
5339 MLX5_TXOFF_INFO(full,
5340 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5341 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5342 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5343 MLX5_TXOFF_CONFIG_METADATA)
5344
5345 MLX5_TXOFF_INFO(none,
5346 MLX5_TXOFF_CONFIG_NONE)
5347
5348 MLX5_TXOFF_INFO(md,
5349 MLX5_TXOFF_CONFIG_METADATA)
5350
5351 MLX5_TXOFF_INFO(mt,
5352 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5353 MLX5_TXOFF_CONFIG_METADATA)
5354
5355 MLX5_TXOFF_INFO(mtsc,
5356 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5357 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5358 MLX5_TXOFF_CONFIG_METADATA)
5359
5360 MLX5_TXOFF_INFO(mti,
5361 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5362 MLX5_TXOFF_CONFIG_INLINE |
5363 MLX5_TXOFF_CONFIG_METADATA)
5364
5365 MLX5_TXOFF_INFO(mtv,
5366 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5367 MLX5_TXOFF_CONFIG_VLAN |
5368 MLX5_TXOFF_CONFIG_METADATA)
5369
5370 MLX5_TXOFF_INFO(mtiv,
5371 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
5372 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5373 MLX5_TXOFF_CONFIG_METADATA)
5374
5375 MLX5_TXOFF_INFO(sc,
5376 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5377 MLX5_TXOFF_CONFIG_METADATA)
5378
5379 MLX5_TXOFF_INFO(sci,
5380 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5381 MLX5_TXOFF_CONFIG_INLINE |
5382 MLX5_TXOFF_CONFIG_METADATA)
5383
5384 MLX5_TXOFF_INFO(scv,
5385 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5386 MLX5_TXOFF_CONFIG_VLAN |
5387 MLX5_TXOFF_CONFIG_METADATA)
5388
5389 MLX5_TXOFF_INFO(sciv,
5390 MLX5_TXOFF_CONFIG_SWP | MLX5_TXOFF_CONFIG_CSUM |
5391 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5392 MLX5_TXOFF_CONFIG_METADATA)
5393
5394 MLX5_TXOFF_INFO(i,
5395 MLX5_TXOFF_CONFIG_INLINE |
5396 MLX5_TXOFF_CONFIG_METADATA)
5397
5398 MLX5_TXOFF_INFO(v,
5399 MLX5_TXOFF_CONFIG_VLAN |
5400 MLX5_TXOFF_CONFIG_METADATA)
5401
5402 MLX5_TXOFF_INFO(iv,
5403 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
5404 MLX5_TXOFF_CONFIG_METADATA)
5405
5406 MLX5_TXOFF_INFO(none_mpw,
5407 MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
5408 MLX5_TXOFF_CONFIG_MPW)
5409
5410 MLX5_TXOFF_INFO(mci_mpw,
5411 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5412 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5413 MLX5_TXOFF_CONFIG_MPW)
5414
5415 MLX5_TXOFF_INFO(mc_mpw,
5416 MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
5417 MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
5418
5419 MLX5_TXOFF_INFO(i_mpw,
5420 MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
5421 MLX5_TXOFF_CONFIG_MPW)
5422 };
5423
5424 /**
5425 * Configure the Tx function to use. The routine checks configured
5426 * Tx offloads for the device and selects appropriate Tx burst
5427 * routine. There are multiple Tx burst routines compiled from
5428 * the same template in the most optimal way for the dedicated
5429 * Tx offloads set.
5430 *
5431 * @param dev
5432 * Pointer to private data structure.
5433 *
5434 * @return
5435 * Pointer to selected Tx burst function.
5436 */
5437 eth_tx_burst_t
5438 mlx5_select_tx_function(struct rte_eth_dev *dev)
5439 {
5440 struct mlx5_priv *priv = dev->data->dev_private;
5441 struct mlx5_dev_config *config = &priv->config;
5442 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
5443 unsigned int diff = 0, olx = 0, i, m;
5444
5445 static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
5446 MLX5_DSEG_MAX, "invalid WQE max size");
5447 static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
5448 "invalid WQE Control Segment size");
5449 static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
5450 "invalid WQE Ethernet Segment size");
5451 static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
5452 "invalid WQE Data Segment size");
5453 static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
5454 "invalid WQE size");
5455 MLX5_ASSERT(priv);
5456 if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
5457 /* We should support Multi-Segment Packets. */
5458 olx |= MLX5_TXOFF_CONFIG_MULTI;
5459 }
5460 if (tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
5461 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
5462 DEV_TX_OFFLOAD_GRE_TNL_TSO |
5463 DEV_TX_OFFLOAD_IP_TNL_TSO |
5464 DEV_TX_OFFLOAD_UDP_TNL_TSO)) {
5465 /* We should support TCP Send Offload. */
5466 olx |= MLX5_TXOFF_CONFIG_TSO;
5467 }
5468 if (tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
5469 DEV_TX_OFFLOAD_UDP_TNL_TSO |
5470 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5471 /* We should support Software Parser for Tunnels. */
5472 olx |= MLX5_TXOFF_CONFIG_SWP;
5473 }
5474 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
5475 DEV_TX_OFFLOAD_UDP_CKSUM |
5476 DEV_TX_OFFLOAD_TCP_CKSUM |
5477 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) {
5478 /* We should support IP/TCP/UDP Checksums. */
5479 olx |= MLX5_TXOFF_CONFIG_CSUM;
5480 }
5481 if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) {
5482 /* We should support VLAN insertion. */
5483 olx |= MLX5_TXOFF_CONFIG_VLAN;
5484 }
5485 if (priv->txqs_n && (*priv->txqs)[0]) {
5486 struct mlx5_txq_data *txd = (*priv->txqs)[0];
5487
5488 if (txd->inlen_send) {
5489 /*
5490 * Check the data inline requirements. Data inline
5491 * is enabled on per device basis, we can check
5492 * the first Tx queue only.
5493 *
5494 * If device does not support VLAN insertion in WQE
5495 * and some queues are requested to perform VLAN
5496 * insertion offload than inline must be enabled.
5497 */
5498 olx |= MLX5_TXOFF_CONFIG_INLINE;
5499 }
5500 }
5501 if (config->mps == MLX5_MPW_ENHANCED &&
5502 config->txq_inline_min <= 0) {
5503 /*
5504 * The NIC supports Enhanced Multi-Packet Write
5505 * and does not require minimal inline data.
5506 */
5507 olx |= MLX5_TXOFF_CONFIG_EMPW;
5508 }
5509 if (rte_flow_dynf_metadata_avail()) {
5510 /* We should support Flow metadata. */
5511 olx |= MLX5_TXOFF_CONFIG_METADATA;
5512 }
5513 if (config->mps == MLX5_MPW) {
5514 /*
5515 * The NIC supports Legacy Multi-Packet Write.
5516 * The MLX5_TXOFF_CONFIG_MPW controls the
5517 * descriptor building method in combination
5518 * with MLX5_TXOFF_CONFIG_EMPW.
5519 */
5520 if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
5521 MLX5_TXOFF_CONFIG_SWP |
5522 MLX5_TXOFF_CONFIG_VLAN |
5523 MLX5_TXOFF_CONFIG_METADATA)))
5524 olx |= MLX5_TXOFF_CONFIG_EMPW |
5525 MLX5_TXOFF_CONFIG_MPW;
5526 }
5527 /*
5528 * Scan the routines table to find the minimal
5529 * satisfying routine with requested offloads.
5530 */
5531 m = RTE_DIM(txoff_func);
5532 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5533 unsigned int tmp;
5534
5535 tmp = txoff_func[i].olx;
5536 if (tmp == olx) {
5537 /* Meets requested offloads exactly.*/
5538 m = i;
5539 break;
5540 }
5541 if ((tmp & olx) != olx) {
5542 /* Does not meet requested offloads at all. */
5543 continue;
5544 }
5545 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
5546 /* Do not enable eMPW if not configured. */
5547 continue;
5548 if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_INLINE)
5549 /* Do not enable inlining if not configured. */
5550 continue;
5551 /*
5552 * Some routine meets the requirements.
5553 * Check whether it has minimal amount
5554 * of not requested offloads.
5555 */
5556 tmp = __builtin_popcountl(tmp & ~olx);
5557 if (m >= RTE_DIM(txoff_func) || tmp < diff) {
5558 /* First or better match, save and continue. */
5559 m = i;
5560 diff = tmp;
5561 continue;
5562 }
5563 if (tmp == diff) {
5564 tmp = txoff_func[i].olx ^ txoff_func[m].olx;
5565 if (__builtin_ffsl(txoff_func[i].olx & ~tmp) <
5566 __builtin_ffsl(txoff_func[m].olx & ~tmp)) {
5567 /* Lighter not requested offload. */
5568 m = i;
5569 }
5570 }
5571 }
5572 if (m >= RTE_DIM(txoff_func)) {
5573 DRV_LOG(DEBUG, "port %u has no selected Tx function"
5574 " for requested offloads %04X",
5575 dev->data->port_id, olx);
5576 return NULL;
5577 }
5578 DRV_LOG(DEBUG, "port %u has selected Tx function"
5579 " supporting offloads %04X/%04X",
5580 dev->data->port_id, olx, txoff_func[m].olx);
5581 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MULTI)
5582 DRV_LOG(DEBUG, "\tMULTI (multi segment)");
5583 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_TSO)
5584 DRV_LOG(DEBUG, "\tTSO (TCP send offload)");
5585 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_SWP)
5586 DRV_LOG(DEBUG, "\tSWP (software parser)");
5587 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_CSUM)
5588 DRV_LOG(DEBUG, "\tCSUM (checksum offload)");
5589 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_INLINE)
5590 DRV_LOG(DEBUG, "\tINLIN (inline data)");
5591 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_VLAN)
5592 DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
5593 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
5594 DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
5595 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
5596 if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
5597 DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
5598 else
5599 DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
5600 }
5601 return txoff_func[m].func;
5602 }
5603
5604 /**
5605 * DPDK callback to get the TX queue information
5606 *
5607 * @param dev
5608 * Pointer to the device structure.
5609 *
5610 * @param tx_queue_id
5611 * Tx queue identificator.
5612 *
5613 * @param qinfo
5614 * Pointer to the TX queue information structure.
5615 *
5616 * @return
5617 * None.
5618 */
5619
5620 void
5621 mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
5622 struct rte_eth_txq_info *qinfo)
5623 {
5624 struct mlx5_priv *priv = dev->data->dev_private;
5625 struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
5626 struct mlx5_txq_ctrl *txq_ctrl =
5627 container_of(txq, struct mlx5_txq_ctrl, txq);
5628
5629 if (!txq)
5630 return;
5631 qinfo->nb_desc = txq->elts_s;
5632 qinfo->conf.tx_thresh.pthresh = 0;
5633 qinfo->conf.tx_thresh.hthresh = 0;
5634 qinfo->conf.tx_thresh.wthresh = 0;
5635 qinfo->conf.tx_rs_thresh = 0;
5636 qinfo->conf.tx_free_thresh = 0;
5637 qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
5638 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
5639 }
5640
5641 /**
5642 * DPDK callback to get the TX packet burst mode information
5643 *
5644 * @param dev
5645 * Pointer to the device structure.
5646 *
5647 * @param tx_queue_id
5648 * Tx queue identificatior.
5649 *
5650 * @param mode
5651 * Pointer to the burts mode information.
5652 *
5653 * @return
5654 * 0 as success, -EINVAL as failure.
5655 */
5656
5657 int
5658 mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
5659 uint16_t tx_queue_id __rte_unused,
5660 struct rte_eth_burst_mode *mode)
5661 {
5662 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
5663 unsigned int i, olx;
5664
5665 for (i = 0; i < RTE_DIM(txoff_func); i++) {
5666 if (pkt_burst == txoff_func[i].func) {
5667 olx = txoff_func[i].olx;
5668 snprintf(mode->info, sizeof(mode->info),
5669 "%s%s%s%s%s%s%s%s",
5670 (olx & MLX5_TXOFF_CONFIG_EMPW) ?
5671 ((olx & MLX5_TXOFF_CONFIG_MPW) ?
5672 "Legacy MPW" : "Enhanced MPW") : "No MPW",
5673 (olx & MLX5_TXOFF_CONFIG_MULTI) ?
5674 " + MULTI" : "",
5675 (olx & MLX5_TXOFF_CONFIG_TSO) ?
5676 " + TSO" : "",
5677 (olx & MLX5_TXOFF_CONFIG_SWP) ?
5678 " + SWP" : "",
5679 (olx & MLX5_TXOFF_CONFIG_CSUM) ?
5680 " + CSUM" : "",
5681 (olx & MLX5_TXOFF_CONFIG_INLINE) ?
5682 " + INLINE" : "",
5683 (olx & MLX5_TXOFF_CONFIG_VLAN) ?
5684 " + VLAN" : "",
5685 (olx & MLX5_TXOFF_CONFIG_METADATA) ?
5686 " + METADATA" : "");
5687 return 0;
5688 }
5689 }
5690 return -EINVAL;
5691 }