1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Data plane functions for mlx4 driver.
15 /* Verbs headers do not support -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
21 #pragma GCC diagnostic error "-Wpedantic"
24 #include <rte_branch_prediction.h>
25 #include <rte_common.h>
28 #include <rte_mempool.h>
29 #include <rte_prefetch.h>
33 #include "mlx4_rxtx.h"
34 #include "mlx4_utils.h"
37 * Pointer-value pair structure used in tx_post_send for saving the first
38 * DWORD (32 byte) of a TXBB.
42 volatile struct mlx4_wqe_data_seg
*dseg
;
43 volatile uint32_t *dst
;
48 /** A helper structure for TSO packet handling. */
50 /** Pointer to the array of saved first DWORD (32 byte) of a TXBB. */
52 /** Current entry in the pv array. */
54 /** Total size of the WQE including padding. */
56 /** Size of TSO header to prepend to each packet to send. */
57 uint16_t tso_header_size
;
58 /** Total size of the TSO segment in the WQE. */
59 uint16_t wqe_tso_seg_size
;
60 /** Raw WQE size in units of 16 Bytes and without padding. */
64 /** A table to translate Rx completion flags to packet type. */
65 uint32_t mlx4_ptype_table
[0x100] __rte_cache_aligned
= {
67 * The index to the array should have:
68 * bit[7] - MLX4_CQE_L2_TUNNEL
69 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
70 * bit[5] - MLX4_CQE_STATUS_UDP
71 * bit[4] - MLX4_CQE_STATUS_TCP
72 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
73 * bit[2] - MLX4_CQE_STATUS_IPV6
74 * bit[1] - MLX4_CQE_STATUS_IPF
75 * bit[0] - MLX4_CQE_STATUS_IPV4
76 * giving a total of up to 256 entries.
79 [0x00] = RTE_PTYPE_L2_ETHER
,
81 [0x01] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
83 [0x02] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
85 [0x03] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
87 [0x04] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
89 [0x06] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
91 [0x08] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
93 [0x09] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
95 [0x0a] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
97 [0x0b] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
100 [0x11] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
102 [0x14] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
104 [0x16] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
106 [0x18] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
108 [0x19] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
111 [0x21] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
113 [0x24] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
115 [0x26] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
117 [0x28] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
119 [0x29] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT
|
121 /* Tunneled - L3 IPV6 */
122 [0x80] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
,
123 [0x81] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
124 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
125 RTE_PTYPE_INNER_L4_NONFRAG
,
126 [0x82] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
127 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
128 RTE_PTYPE_INNER_L4_FRAG
,
129 [0x83] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
130 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
131 RTE_PTYPE_INNER_L4_FRAG
,
132 [0x84] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
133 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
134 RTE_PTYPE_INNER_L4_NONFRAG
,
135 [0x86] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
136 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
137 RTE_PTYPE_INNER_L4_FRAG
,
138 [0x88] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
139 RTE_PTYPE_INNER_L3_IPV4_EXT
|
140 RTE_PTYPE_INNER_L4_NONFRAG
,
141 [0x89] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
142 RTE_PTYPE_INNER_L3_IPV4_EXT
|
143 RTE_PTYPE_INNER_L4_NONFRAG
,
144 [0x8a] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
145 RTE_PTYPE_INNER_L3_IPV4_EXT
|
146 RTE_PTYPE_INNER_L4_FRAG
,
147 [0x8b] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
148 RTE_PTYPE_INNER_L3_IPV4_EXT
|
149 RTE_PTYPE_INNER_L4_FRAG
,
150 /* Tunneled - L3 IPV6, TCP */
151 [0x91] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
152 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
153 RTE_PTYPE_INNER_L4_TCP
,
154 [0x94] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
155 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
156 RTE_PTYPE_INNER_L4_TCP
,
157 [0x96] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
158 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
159 RTE_PTYPE_INNER_L4_FRAG
,
160 [0x98] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
161 RTE_PTYPE_INNER_L3_IPV4_EXT
| RTE_PTYPE_INNER_L4_TCP
,
162 [0x99] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
163 RTE_PTYPE_INNER_L3_IPV4_EXT
| RTE_PTYPE_INNER_L4_TCP
,
164 /* Tunneled - L3 IPV6, UDP */
165 [0xa1] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
166 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
167 RTE_PTYPE_INNER_L4_UDP
,
168 [0xa4] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
169 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
170 RTE_PTYPE_INNER_L4_UDP
,
171 [0xa6] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
172 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
173 RTE_PTYPE_INNER_L4_FRAG
,
174 [0xa8] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
175 RTE_PTYPE_INNER_L3_IPV4_EXT
|
176 RTE_PTYPE_INNER_L4_UDP
,
177 [0xa9] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
|
178 RTE_PTYPE_INNER_L3_IPV4_EXT
|
179 RTE_PTYPE_INNER_L4_UDP
,
180 /* Tunneled - L3 IPV4 */
181 [0xc0] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
,
182 [0xc1] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
183 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
184 RTE_PTYPE_INNER_L4_NONFRAG
,
185 [0xc2] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
186 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
187 RTE_PTYPE_INNER_L4_FRAG
,
188 [0xc3] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
189 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
190 RTE_PTYPE_INNER_L4_FRAG
,
191 [0xc4] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
192 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
193 RTE_PTYPE_INNER_L4_NONFRAG
,
194 [0xc6] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
195 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
196 RTE_PTYPE_INNER_L4_FRAG
,
197 [0xc8] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
198 RTE_PTYPE_INNER_L3_IPV4_EXT
|
199 RTE_PTYPE_INNER_L4_NONFRAG
,
200 [0xc9] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
201 RTE_PTYPE_INNER_L3_IPV4_EXT
|
202 RTE_PTYPE_INNER_L4_NONFRAG
,
203 [0xca] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
204 RTE_PTYPE_INNER_L3_IPV4_EXT
|
205 RTE_PTYPE_INNER_L4_FRAG
,
206 [0xcb] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
207 RTE_PTYPE_INNER_L3_IPV4_EXT
|
208 RTE_PTYPE_INNER_L4_FRAG
,
209 /* Tunneled - L3 IPV4, TCP */
210 [0xd1] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
211 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
212 RTE_PTYPE_INNER_L4_TCP
,
213 [0xd4] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
214 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
215 RTE_PTYPE_INNER_L4_TCP
,
216 [0xd6] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
217 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
218 RTE_PTYPE_INNER_L4_FRAG
,
219 [0xd8] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
220 RTE_PTYPE_INNER_L3_IPV4_EXT
|
221 RTE_PTYPE_INNER_L4_TCP
,
222 [0xd9] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
223 RTE_PTYPE_INNER_L3_IPV4_EXT
|
224 RTE_PTYPE_INNER_L4_TCP
,
225 /* Tunneled - L3 IPV4, UDP */
226 [0xe1] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
227 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN
|
228 RTE_PTYPE_INNER_L4_UDP
,
229 [0xe4] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
230 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
231 RTE_PTYPE_INNER_L4_UDP
,
232 [0xe6] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
233 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN
|
234 RTE_PTYPE_INNER_L4_FRAG
,
235 [0xe8] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
236 RTE_PTYPE_INNER_L3_IPV4_EXT
|
237 RTE_PTYPE_INNER_L4_UDP
,
238 [0xe9] = RTE_PTYPE_L2_ETHER
| RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
|
239 RTE_PTYPE_INNER_L3_IPV4_EXT
|
240 RTE_PTYPE_INNER_L4_UDP
,
244 * Stamp TXBB burst so it won't be reused by the HW.
246 * Routine is used when freeing WQE used by the chip or when failing
247 * building an WQ entry has failed leaving partial information on the queue.
250 * Pointer to the SQ structure.
252 * Pointer to the first TXBB to stamp.
254 * Pointer to the followed end TXBB to stamp.
257 * Stamping burst size in byte units.
260 mlx4_txq_stamp_freed_wqe(struct mlx4_sq
*sq
, volatile uint32_t *start
,
261 volatile uint32_t *end
)
263 uint32_t stamp
= sq
->stamp
;
264 int32_t size
= (intptr_t)end
- (intptr_t)start
;
266 assert(start
!= end
);
267 /* Hold SQ ring wrap around. */
269 size
= (int32_t)sq
->size
+ size
;
272 start
+= MLX4_SQ_STAMP_DWORDS
;
273 } while (start
!= (volatile uint32_t *)sq
->eob
);
274 start
= (volatile uint32_t *)sq
->buf
;
275 /* Flip invalid stamping ownership. */
276 stamp
^= RTE_BE32(1u << MLX4_SQ_OWNER_BIT
);
283 start
+= MLX4_SQ_STAMP_DWORDS
;
284 } while (start
!= end
);
285 return (uint32_t)size
;
289 * Manage Tx completions.
291 * When sending a burst, mlx4_tx_burst() posts several WRs.
292 * To improve performance, a completion event is only required once every
293 * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
294 * for other WRs, but this information would not be used anyway.
297 * Pointer to Tx queue structure.
299 * Tx elements number mask.
301 * Pointer to the SQ structure.
304 mlx4_txq_complete(struct txq
*txq
, const unsigned int elts_m
,
307 unsigned int elts_tail
= txq
->elts_tail
;
308 struct mlx4_cq
*cq
= &txq
->mcq
;
309 volatile struct mlx4_cqe
*cqe
;
311 uint32_t cons_index
= cq
->cons_index
;
312 volatile uint32_t *first_txbb
;
315 * Traverse over all CQ entries reported and handle each WQ entry
319 cqe
= (volatile struct mlx4_cqe
*)mlx4_get_cqe(cq
, cons_index
);
320 if (unlikely(!!(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
) ^
321 !!(cons_index
& cq
->cqe_cnt
)))
325 * Make sure we read the CQE after we read the ownership bit.
328 if (unlikely((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) ==
329 MLX4_CQE_OPCODE_ERROR
)) {
330 volatile struct mlx4_err_cqe
*cqe_err
=
331 (volatile struct mlx4_err_cqe
*)cqe
;
332 ERROR("%p CQE error - vendor syndrome: 0x%x"
334 (void *)txq
, cqe_err
->vendor_err
,
341 completed
= (cons_index
- cq
->cons_index
) * txq
->elts_comp_cd_init
;
342 if (unlikely(!completed
))
344 /* First stamping address is the end of the last one. */
345 first_txbb
= (&(*txq
->elts
)[elts_tail
& elts_m
])->eocb
;
346 elts_tail
+= completed
;
347 /* The new tail element holds the end address. */
348 sq
->remain_size
+= mlx4_txq_stamp_freed_wqe(sq
, first_txbb
,
349 (&(*txq
->elts
)[elts_tail
& elts_m
])->eocb
);
350 /* Update CQ consumer index. */
351 cq
->cons_index
= cons_index
;
352 *cq
->set_ci_db
= rte_cpu_to_be_32(cons_index
& MLX4_CQ_DB_CI_MASK
);
353 txq
->elts_tail
= elts_tail
;
357 * Write Tx data segment to the SQ.
360 * Pointer to data segment in SQ.
362 * Memory region lkey.
366 * Big endian bytes count of the data to send.
369 mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg
*dseg
,
370 uint32_t lkey
, uintptr_t addr
, rte_be32_t byte_count
)
372 dseg
->addr
= rte_cpu_to_be_64(addr
);
374 #if RTE_CACHE_LINE_SIZE < 64
376 * Need a barrier here before writing the byte_count
377 * fields to make sure that all the data is visible
378 * before the byte_count field is set.
379 * Otherwise, if the segment begins a new cacheline,
380 * the HCA prefetcher could grab the 64-byte chunk and
381 * get a valid (!= 0xffffffff) byte count but stale
382 * data, and end up sending the wrong data.
385 #endif /* RTE_CACHE_LINE_SIZE */
386 dseg
->byte_count
= byte_count
;
390 * Obtain and calculate TSO information needed for assembling a TSO WQE.
393 * Pointer to the first packet mbuf.
395 * Pointer to Tx queue structure.
397 * Pointer to a structure to fill the info with.
400 * 0 on success, negative value upon error.
403 mlx4_tx_burst_tso_get_params(struct rte_mbuf
*buf
,
405 struct tso_info
*tinfo
)
407 struct mlx4_sq
*sq
= &txq
->msq
;
408 const uint8_t tunneled
= txq
->priv
->hw_csum_l2tun
&&
409 (buf
->ol_flags
& PKT_TX_TUNNEL_MASK
);
411 tinfo
->tso_header_size
= buf
->l2_len
+ buf
->l3_len
+ buf
->l4_len
;
413 tinfo
->tso_header_size
+=
414 buf
->outer_l2_len
+ buf
->outer_l3_len
;
415 if (unlikely(buf
->tso_segsz
== 0 ||
416 tinfo
->tso_header_size
== 0 ||
417 tinfo
->tso_header_size
> MLX4_MAX_TSO_HEADER
||
418 tinfo
->tso_header_size
> buf
->data_len
))
421 * Calculate the WQE TSO segment size
423 * 1. An LSO segment must be padded such that the subsequent data
424 * segment is 16-byte aligned.
425 * 2. The start address of the TSO segment is always 16 Bytes aligned.
427 tinfo
->wqe_tso_seg_size
= RTE_ALIGN(sizeof(struct mlx4_wqe_lso_seg
) +
428 tinfo
->tso_header_size
,
429 sizeof(struct mlx4_wqe_data_seg
));
430 tinfo
->fence_size
= ((sizeof(struct mlx4_wqe_ctrl_seg
) +
431 tinfo
->wqe_tso_seg_size
) >> MLX4_SEG_SHIFT
) +
434 RTE_ALIGN((uint32_t)(tinfo
->fence_size
<< MLX4_SEG_SHIFT
),
436 /* Validate WQE size and WQE space in the send queue. */
437 if (sq
->remain_size
< tinfo
->wqe_size
||
438 tinfo
->wqe_size
> MLX4_MAX_WQE_SIZE
)
441 tinfo
->pv
= (struct pv
*)txq
->bounce_buf
;
442 tinfo
->pv_counter
= 0;
447 * Fill the TSO WQE data segments with info on buffers to transmit .
450 * Pointer to the first packet mbuf.
452 * Pointer to Tx queue structure.
454 * Pointer to TSO info to use.
456 * Pointer to the first data segment in the TSO WQE.
458 * Pointer to the control segment in the TSO WQE.
461 * 0 on success, negative value upon error.
463 static inline volatile struct mlx4_wqe_ctrl_seg
*
464 mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf
*buf
,
466 struct tso_info
*tinfo
,
467 volatile struct mlx4_wqe_data_seg
*dseg
,
468 volatile struct mlx4_wqe_ctrl_seg
*ctrl
)
471 int nb_segs
= buf
->nb_segs
;
473 struct mlx4_sq
*sq
= &txq
->msq
;
474 struct rte_mbuf
*sbuf
= buf
;
475 struct pv
*pv
= tinfo
->pv
;
476 int *pv_counter
= &tinfo
->pv_counter
;
477 volatile struct mlx4_wqe_ctrl_seg
*ctrl_next
=
478 (volatile struct mlx4_wqe_ctrl_seg
*)
479 ((volatile uint8_t *)ctrl
+ tinfo
->wqe_size
);
480 uint16_t data_len
= sbuf
->data_len
- tinfo
->tso_header_size
;
481 uintptr_t data_addr
= rte_pktmbuf_mtod_offset(sbuf
, uintptr_t,
482 tinfo
->tso_header_size
);
485 /* how many dseg entries do we have in the current TXBB ? */
486 nb_segs_txbb
= (MLX4_TXBB_SIZE
-
487 ((uintptr_t)dseg
& (MLX4_TXBB_SIZE
- 1))) >>
489 switch (nb_segs_txbb
) {
492 /* Should never happen. */
493 rte_panic("%p: Invalid number of SGEs(%d) for a TXBB",
494 (void *)txq
, nb_segs_txbb
);
495 /* rte_panic never returns. */
499 /* Memory region key for this memory pool. */
500 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
501 if (unlikely(lkey
== (uint32_t)-1))
503 dseg
->addr
= rte_cpu_to_be_64(data_addr
);
506 * This data segment starts at the beginning of a new
507 * TXBB, so we need to postpone its byte_count writing
510 pv
[*pv_counter
].dseg
= dseg
;
512 * Zero length segment is treated as inline segment
515 pv
[(*pv_counter
)++].val
=
516 rte_cpu_to_be_32(data_len
?
521 /* Prepare next buf info */
524 data_len
= sbuf
->data_len
;
525 data_addr
= rte_pktmbuf_mtod(sbuf
, uintptr_t);
528 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
529 if (unlikely(lkey
== (uint32_t)-1))
531 mlx4_fill_tx_data_seg(dseg
, lkey
, data_addr
,
532 rte_cpu_to_be_32(data_len
?
537 /* Prepare next buf info */
540 data_len
= sbuf
->data_len
;
541 data_addr
= rte_pktmbuf_mtod(sbuf
, uintptr_t);
544 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
545 if (unlikely(lkey
== (uint32_t)-1))
547 mlx4_fill_tx_data_seg(dseg
, lkey
, data_addr
,
548 rte_cpu_to_be_32(data_len
?
553 /* Prepare next buf info */
556 data_len
= sbuf
->data_len
;
557 data_addr
= rte_pktmbuf_mtod(sbuf
, uintptr_t);
560 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
561 if (unlikely(lkey
== (uint32_t)-1))
563 mlx4_fill_tx_data_seg(dseg
, lkey
, data_addr
,
564 rte_cpu_to_be_32(data_len
?
569 /* Prepare next buf info */
572 data_len
= sbuf
->data_len
;
573 data_addr
= rte_pktmbuf_mtod(sbuf
, uintptr_t);
576 /* Wrap dseg if it points at the end of the queue. */
577 if ((volatile uint8_t *)dseg
>= sq
->eob
)
578 dseg
= (volatile struct mlx4_wqe_data_seg
*)
579 ((volatile uint8_t *)dseg
- sq
->size
);
586 * Fill the packet's l2, l3 and l4 headers to the WQE.
588 * This will be used as the header for each TSO segment that is transmitted.
591 * Pointer to the first packet mbuf.
593 * Pointer to Tx queue structure.
595 * Pointer to TSO info to use.
597 * Pointer to the control segment in the TSO WQE.
600 * 0 on success, negative value upon error.
602 static inline volatile struct mlx4_wqe_data_seg
*
603 mlx4_tx_burst_fill_tso_hdr(struct rte_mbuf
*buf
,
605 struct tso_info
*tinfo
,
606 volatile struct mlx4_wqe_ctrl_seg
*ctrl
)
608 volatile struct mlx4_wqe_lso_seg
*tseg
=
609 (volatile struct mlx4_wqe_lso_seg
*)(ctrl
+ 1);
610 struct mlx4_sq
*sq
= &txq
->msq
;
611 struct pv
*pv
= tinfo
->pv
;
612 int *pv_counter
= &tinfo
->pv_counter
;
613 int remain_size
= tinfo
->tso_header_size
;
614 char *from
= rte_pktmbuf_mtod(buf
, char *);
615 uint16_t txbb_avail_space
;
616 /* Union to overcome volatile constraints when copying TSO header. */
618 volatile uint8_t *vto
;
620 } thdr
= { .vto
= (volatile uint8_t *)tseg
->header
, };
623 * TSO data always starts at offset 20 from the beginning of the TXBB
624 * (16 byte ctrl + 4byte TSO desc). Since each TXBB is 64Byte aligned
625 * we can write the first 44 TSO header bytes without worry for TxQ
626 * wrapping or overwriting the first TXBB 32bit word.
628 txbb_avail_space
= MLX4_TXBB_SIZE
-
629 (sizeof(struct mlx4_wqe_ctrl_seg
) +
630 sizeof(struct mlx4_wqe_lso_seg
));
631 while (remain_size
>= (int)(txbb_avail_space
+ sizeof(uint32_t))) {
632 /* Copy to end of txbb. */
633 rte_memcpy(thdr
.to
, from
, txbb_avail_space
);
634 from
+= txbb_avail_space
;
635 thdr
.to
+= txbb_avail_space
;
636 /* New TXBB, Check for TxQ wrap. */
637 if (thdr
.to
>= sq
->eob
)
639 /* New TXBB, stash the first 32bits for later use. */
640 pv
[*pv_counter
].dst
= (volatile uint32_t *)thdr
.to
;
641 pv
[(*pv_counter
)++].val
= *(uint32_t *)from
,
642 from
+= sizeof(uint32_t);
643 thdr
.to
+= sizeof(uint32_t);
644 remain_size
-= txbb_avail_space
+ sizeof(uint32_t);
645 /* Avail space in new TXBB is TXBB size - 4 */
646 txbb_avail_space
= MLX4_TXBB_SIZE
- sizeof(uint32_t);
648 if (remain_size
> txbb_avail_space
) {
649 rte_memcpy(thdr
.to
, from
, txbb_avail_space
);
650 from
+= txbb_avail_space
;
651 thdr
.to
+= txbb_avail_space
;
652 remain_size
-= txbb_avail_space
;
653 /* New TXBB, Check for TxQ wrap. */
654 if (thdr
.to
>= sq
->eob
)
656 pv
[*pv_counter
].dst
= (volatile uint32_t *)thdr
.to
;
657 rte_memcpy(&pv
[*pv_counter
].val
, from
, remain_size
);
659 } else if (remain_size
) {
660 rte_memcpy(thdr
.to
, from
, remain_size
);
662 tseg
->mss_hdr_size
= rte_cpu_to_be_32((buf
->tso_segsz
<< 16) |
663 tinfo
->tso_header_size
);
664 /* Calculate data segment location */
665 return (volatile struct mlx4_wqe_data_seg
*)
666 ((uintptr_t)tseg
+ tinfo
->wqe_tso_seg_size
);
670 * Write data segments and header for TSO uni/multi segment packet.
673 * Pointer to the first packet mbuf.
675 * Pointer to Tx queue structure.
677 * Pointer to the WQE control segment.
680 * Pointer to the next WQE control segment on success, NULL otherwise.
682 static volatile struct mlx4_wqe_ctrl_seg
*
683 mlx4_tx_burst_tso(struct rte_mbuf
*buf
, struct txq
*txq
,
684 volatile struct mlx4_wqe_ctrl_seg
*ctrl
)
686 volatile struct mlx4_wqe_data_seg
*dseg
;
687 volatile struct mlx4_wqe_ctrl_seg
*ctrl_next
;
688 struct mlx4_sq
*sq
= &txq
->msq
;
689 struct tso_info tinfo
;
694 ret
= mlx4_tx_burst_tso_get_params(buf
, txq
, &tinfo
);
697 dseg
= mlx4_tx_burst_fill_tso_hdr(buf
, txq
, &tinfo
, ctrl
);
698 if (unlikely(dseg
== NULL
))
700 if ((uintptr_t)dseg
>= (uintptr_t)sq
->eob
)
701 dseg
= (volatile struct mlx4_wqe_data_seg
*)
702 ((uintptr_t)dseg
- sq
->size
);
703 ctrl_next
= mlx4_tx_burst_fill_tso_dsegs(buf
, txq
, &tinfo
, dseg
, ctrl
);
704 if (unlikely(ctrl_next
== NULL
))
706 /* Write the first DWORD of each TXBB save earlier. */
707 if (likely(tinfo
.pv_counter
)) {
709 pv_counter
= tinfo
.pv_counter
;
710 /* Need a barrier here before writing the first TXBB word. */
714 *pv
[pv_counter
].dst
= pv
[pv_counter
].val
;
715 } while (pv_counter
> 0);
717 ctrl
->fence_size
= tinfo
.fence_size
;
718 sq
->remain_size
-= tinfo
.wqe_size
;
721 txq
->stats
.odropped
++;
726 * Write data segments of multi-segment packet.
729 * Pointer to the first packet mbuf.
731 * Pointer to Tx queue structure.
733 * Pointer to the WQE control segment.
736 * Pointer to the next WQE control segment on success, NULL otherwise.
738 static volatile struct mlx4_wqe_ctrl_seg
*
739 mlx4_tx_burst_segs(struct rte_mbuf
*buf
, struct txq
*txq
,
740 volatile struct mlx4_wqe_ctrl_seg
*ctrl
)
742 struct pv
*pv
= (struct pv
*)txq
->bounce_buf
;
743 struct mlx4_sq
*sq
= &txq
->msq
;
744 struct rte_mbuf
*sbuf
= buf
;
747 int nb_segs
= buf
->nb_segs
;
749 volatile struct mlx4_wqe_data_seg
*dseg
=
750 (volatile struct mlx4_wqe_data_seg
*)(ctrl
+ 1);
752 ctrl
->fence_size
= 1 + nb_segs
;
753 wqe_size
= RTE_ALIGN((uint32_t)(ctrl
->fence_size
<< MLX4_SEG_SHIFT
),
755 /* Validate WQE size and WQE space in the send queue. */
756 if (sq
->remain_size
< wqe_size
||
757 wqe_size
> MLX4_MAX_WQE_SIZE
)
760 * Fill the data segments with buffer information.
761 * First WQE TXBB head segment is always control segment,
762 * so jump to tail TXBB data segments code for the first
763 * WQE data segments filling.
767 /* Memory region key (big endian) for this memory pool. */
768 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
769 if (unlikely(lkey
== (uint32_t)-1)) {
770 DEBUG("%p: unable to get MP <-> MR association",
774 /* Handle WQE wraparound. */
776 (volatile struct mlx4_wqe_data_seg
*)sq
->eob
)
777 dseg
= (volatile struct mlx4_wqe_data_seg
*)
779 dseg
->addr
= rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf
, uintptr_t));
782 * This data segment starts at the beginning of a new
783 * TXBB, so we need to postpone its byte_count writing
786 pv
[pv_counter
].dseg
= dseg
;
788 * Zero length segment is treated as inline segment
791 pv
[pv_counter
++].val
= rte_cpu_to_be_32(sbuf
->data_len
?
792 sbuf
->data_len
: 0x80000000);
797 /* Jump to default if there are more than two segments remaining. */
800 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
801 if (unlikely(lkey
== (uint32_t)-1)) {
802 DEBUG("%p: unable to get MP <-> MR association",
806 mlx4_fill_tx_data_seg(dseg
, lkey
,
807 rte_pktmbuf_mtod(sbuf
, uintptr_t),
808 rte_cpu_to_be_32(sbuf
->data_len
?
816 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
817 if (unlikely(lkey
== (uint32_t)-1)) {
818 DEBUG("%p: unable to get MP <-> MR association",
822 mlx4_fill_tx_data_seg(dseg
, lkey
,
823 rte_pktmbuf_mtod(sbuf
, uintptr_t),
824 rte_cpu_to_be_32(sbuf
->data_len
?
832 lkey
= mlx4_tx_mb2mr(txq
, sbuf
);
833 if (unlikely(lkey
== (uint32_t)-1)) {
834 DEBUG("%p: unable to get MP <-> MR association",
838 mlx4_fill_tx_data_seg(dseg
, lkey
,
839 rte_pktmbuf_mtod(sbuf
, uintptr_t),
840 rte_cpu_to_be_32(sbuf
->data_len
?
853 /* Write the first DWORD of each TXBB save earlier. */
855 /* Need a barrier here before writing the byte_count. */
857 for (--pv_counter
; pv_counter
>= 0; pv_counter
--)
858 pv
[pv_counter
].dseg
->byte_count
= pv
[pv_counter
].val
;
860 sq
->remain_size
-= wqe_size
;
861 /* Align next WQE address to the next TXBB. */
862 return (volatile struct mlx4_wqe_ctrl_seg
*)
863 ((volatile uint8_t *)ctrl
+ wqe_size
);
867 * DPDK callback for Tx.
870 * Generic pointer to Tx queue structure.
872 * Packets to transmit.
874 * Number of packets in array.
877 * Number of packets successfully transmitted (<= pkts_n).
880 mlx4_tx_burst(void *dpdk_txq
, struct rte_mbuf
**pkts
, uint16_t pkts_n
)
882 struct txq
*txq
= (struct txq
*)dpdk_txq
;
883 unsigned int elts_head
= txq
->elts_head
;
884 const unsigned int elts_n
= txq
->elts_n
;
885 const unsigned int elts_m
= elts_n
- 1;
886 unsigned int bytes_sent
= 0;
888 unsigned int max
= elts_head
- txq
->elts_tail
;
889 struct mlx4_sq
*sq
= &txq
->msq
;
890 volatile struct mlx4_wqe_ctrl_seg
*ctrl
;
893 assert(txq
->elts_comp_cd
!= 0);
894 if (likely(max
>= txq
->elts_comp_cd_init
))
895 mlx4_txq_complete(txq
, elts_m
, sq
);
898 assert(max
<= elts_n
);
899 /* Always leave one free entry in the ring. */
903 elt
= &(*txq
->elts
)[elts_head
& elts_m
];
904 /* First Tx burst element saves the next WQE control segment. */
906 for (i
= 0; (i
!= max
); ++i
) {
907 struct rte_mbuf
*buf
= pkts
[i
];
908 struct txq_elt
*elt_next
= &(*txq
->elts
)[++elts_head
& elts_m
];
909 uint32_t owner_opcode
= sq
->owner_opcode
;
910 volatile struct mlx4_wqe_data_seg
*dseg
=
911 (volatile struct mlx4_wqe_data_seg
*)(ctrl
+ 1);
912 volatile struct mlx4_wqe_ctrl_seg
*ctrl_next
;
918 bool tso
= txq
->priv
->tso
&& (buf
->ol_flags
& PKT_TX_TCP_SEG
);
920 /* Clean up old buffer. */
921 if (likely(elt
->buf
!= NULL
)) {
922 struct rte_mbuf
*tmp
= elt
->buf
;
926 memset(&elt
->buf
, 0x66, sizeof(struct rte_mbuf
*));
928 /* Faster than rte_pktmbuf_free(). */
930 struct rte_mbuf
*next
= tmp
->next
;
932 rte_pktmbuf_free_seg(tmp
);
934 } while (tmp
!= NULL
);
936 RTE_MBUF_PREFETCH_TO_FREE(elt_next
->buf
);
938 /* Change opcode to TSO */
939 owner_opcode
&= ~MLX4_OPCODE_CONFIG_CMD
;
940 owner_opcode
|= MLX4_OPCODE_LSO
| MLX4_WQE_CTRL_RR
;
941 ctrl_next
= mlx4_tx_burst_tso(buf
, txq
, ctrl
);
946 } else if (buf
->nb_segs
== 1) {
947 /* Validate WQE space in the send queue. */
948 if (sq
->remain_size
< MLX4_TXBB_SIZE
) {
952 lkey
= mlx4_tx_mb2mr(txq
, buf
);
953 if (unlikely(lkey
== (uint32_t)-1)) {
954 /* MR does not exist. */
955 DEBUG("%p: unable to get MP <-> MR association",
960 mlx4_fill_tx_data_seg(dseg
++, lkey
,
961 rte_pktmbuf_mtod(buf
, uintptr_t),
962 rte_cpu_to_be_32(buf
->data_len
));
963 /* Set WQE size in 16-byte units. */
964 ctrl
->fence_size
= 0x2;
965 sq
->remain_size
-= MLX4_TXBB_SIZE
;
966 /* Align next WQE address to the next TXBB. */
967 ctrl_next
= ctrl
+ 0x4;
969 ctrl_next
= mlx4_tx_burst_segs(buf
, txq
, ctrl
);
975 /* Hold SQ ring wrap around. */
976 if ((volatile uint8_t *)ctrl_next
>= sq
->eob
) {
977 ctrl_next
= (volatile struct mlx4_wqe_ctrl_seg
*)
978 ((volatile uint8_t *)ctrl_next
- sq
->size
);
979 /* Flip HW valid ownership. */
980 sq
->owner_opcode
^= 1u << MLX4_SQ_OWNER_BIT
;
983 * For raw Ethernet, the SOLICIT flag is used to indicate
984 * that no ICRC should be calculated.
986 if (--txq
->elts_comp_cd
== 0) {
987 /* Save the completion burst end address. */
988 elt_next
->eocb
= (volatile uint32_t *)ctrl_next
;
989 txq
->elts_comp_cd
= txq
->elts_comp_cd_init
;
990 srcrb
.flags
= RTE_BE32(MLX4_WQE_CTRL_SOLICIT
|
991 MLX4_WQE_CTRL_CQ_UPDATE
);
993 srcrb
.flags
= RTE_BE32(MLX4_WQE_CTRL_SOLICIT
);
995 /* Enable HW checksum offload if requested */
998 (PKT_TX_IP_CKSUM
| PKT_TX_TCP_CKSUM
| PKT_TX_UDP_CKSUM
))) {
999 const uint64_t is_tunneled
= (buf
->ol_flags
&
1000 (PKT_TX_TUNNEL_GRE
|
1001 PKT_TX_TUNNEL_VXLAN
));
1003 if (is_tunneled
&& txq
->csum_l2tun
) {
1004 owner_opcode
|= MLX4_WQE_CTRL_IIP_HDR_CSUM
|
1005 MLX4_WQE_CTRL_IL4_HDR_CSUM
;
1006 if (buf
->ol_flags
& PKT_TX_OUTER_IP_CKSUM
)
1008 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM
);
1011 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM
|
1012 MLX4_WQE_CTRL_TCP_UDP_CSUM
);
1017 * Copy destination MAC address to the WQE, this allows
1018 * loopback in eSwitch, so that VFs and PF can
1019 * communicate with each other.
1021 srcrb
.flags16
[0] = *(rte_pktmbuf_mtod(buf
, uint16_t *));
1022 ctrl
->imm
= *(rte_pktmbuf_mtod_offset(buf
, uint32_t *,
1027 ctrl
->srcrb_flags
= srcrb
.flags
;
1029 * Make sure descriptor is fully written before
1030 * setting ownership bit (because HW can start
1031 * executing as soon as we do).
1034 ctrl
->owner_opcode
= rte_cpu_to_be_32(owner_opcode
);
1036 bytes_sent
+= buf
->pkt_len
;
1040 /* Take a shortcut if nothing must be sent. */
1041 if (unlikely(i
== 0))
1043 /* Save WQE address of the next Tx burst element. */
1045 /* Increment send statistics counters. */
1046 txq
->stats
.opackets
+= i
;
1047 txq
->stats
.obytes
+= bytes_sent
;
1048 /* Make sure that descriptors are written before doorbell record. */
1050 /* Ring QP doorbell. */
1051 rte_write32(txq
->msq
.doorbell_qpn
, txq
->msq
.db
);
1052 txq
->elts_head
+= i
;
1057 * Translate Rx completion flags to packet type.
1063 * Packet type for struct rte_mbuf.
1065 static inline uint32_t
1066 rxq_cq_to_pkt_type(volatile struct mlx4_cqe
*cqe
,
1067 uint32_t l2tun_offload
)
1070 uint32_t pinfo
= rte_be_to_cpu_32(cqe
->vlan_my_qpn
);
1071 uint32_t status
= rte_be_to_cpu_32(cqe
->status
);
1074 * The index to the array should have:
1075 * bit[7] - MLX4_CQE_L2_TUNNEL
1076 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
1078 if (l2tun_offload
&& (pinfo
& MLX4_CQE_L2_TUNNEL
))
1079 idx
|= ((pinfo
& MLX4_CQE_L2_TUNNEL
) >> 20) |
1080 ((pinfo
& MLX4_CQE_L2_TUNNEL_IPV4
) >> 19);
1082 * The index to the array should have:
1083 * bit[5] - MLX4_CQE_STATUS_UDP
1084 * bit[4] - MLX4_CQE_STATUS_TCP
1085 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
1086 * bit[2] - MLX4_CQE_STATUS_IPV6
1087 * bit[1] - MLX4_CQE_STATUS_IPF
1088 * bit[0] - MLX4_CQE_STATUS_IPV4
1089 * giving a total of up to 256 entries.
1091 idx
|= ((status
& MLX4_CQE_STATUS_PTYPE_MASK
) >> 22);
1092 if (status
& MLX4_CQE_STATUS_IPV6
)
1093 idx
|= ((status
& MLX4_CQE_STATUS_IPV6F
) >> 11);
1094 return mlx4_ptype_table
[idx
];
1098 * Translate Rx completion flags to offload flags.
1101 * Rx completion flags returned by mlx4_cqe_flags().
1103 * Whether Rx checksums are enabled.
1105 * Whether Rx L2 tunnel checksums are enabled.
1108 * Offload flags (ol_flags) in mbuf format.
1110 static inline uint32_t
1111 rxq_cq_to_ol_flags(uint32_t flags
, int csum
, int csum_l2tun
)
1113 uint32_t ol_flags
= 0;
1117 mlx4_transpose(flags
,
1118 MLX4_CQE_STATUS_IP_HDR_CSUM_OK
,
1119 PKT_RX_IP_CKSUM_GOOD
) |
1120 mlx4_transpose(flags
,
1121 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK
,
1122 PKT_RX_L4_CKSUM_GOOD
);
1123 if ((flags
& MLX4_CQE_L2_TUNNEL
) && csum_l2tun
)
1125 mlx4_transpose(flags
,
1126 MLX4_CQE_L2_TUNNEL_IPOK
,
1127 PKT_RX_IP_CKSUM_GOOD
) |
1128 mlx4_transpose(flags
,
1129 MLX4_CQE_L2_TUNNEL_L4_CSUM
,
1130 PKT_RX_L4_CKSUM_GOOD
);
1135 * Extract checksum information from CQE flags.
1138 * Pointer to CQE structure.
1140 * Whether Rx checksums are enabled.
1142 * Whether Rx L2 tunnel checksums are enabled.
1145 * CQE checksum information.
1147 static inline uint32_t
1148 mlx4_cqe_flags(volatile struct mlx4_cqe
*cqe
, int csum
, int csum_l2tun
)
1153 * The relevant bits are in different locations on their
1154 * CQE fields therefore we can join them in one 32bit
1158 flags
= (rte_be_to_cpu_32(cqe
->status
) &
1159 MLX4_CQE_STATUS_IPV4_CSUM_OK
);
1161 flags
|= (rte_be_to_cpu_32(cqe
->vlan_my_qpn
) &
1162 (MLX4_CQE_L2_TUNNEL
|
1163 MLX4_CQE_L2_TUNNEL_IPOK
|
1164 MLX4_CQE_L2_TUNNEL_L4_CSUM
|
1165 MLX4_CQE_L2_TUNNEL_IPV4
));
1170 * Poll one CQE from CQ.
1173 * Pointer to the receive queue structure.
1178 * Number of bytes of the CQE, 0 in case there is no completion.
1181 mlx4_cq_poll_one(struct rxq
*rxq
, volatile struct mlx4_cqe
**out
)
1184 volatile struct mlx4_cqe
*cqe
= NULL
;
1185 struct mlx4_cq
*cq
= &rxq
->mcq
;
1187 cqe
= (volatile struct mlx4_cqe
*)mlx4_get_cqe(cq
, cq
->cons_index
);
1188 if (!!(cqe
->owner_sr_opcode
& MLX4_CQE_OWNER_MASK
) ^
1189 !!(cq
->cons_index
& cq
->cqe_cnt
))
1192 * Make sure we read CQ entry contents after we've checked the
1196 assert(!(cqe
->owner_sr_opcode
& MLX4_CQE_IS_SEND_MASK
));
1197 assert((cqe
->owner_sr_opcode
& MLX4_CQE_OPCODE_MASK
) !=
1198 MLX4_CQE_OPCODE_ERROR
);
1199 ret
= rte_be_to_cpu_32(cqe
->byte_cnt
);
1207 * DPDK callback for Rx with scattered packets support.
1210 * Generic pointer to Rx queue structure.
1212 * Array to store received packets.
1214 * Maximum number of packets in array.
1217 * Number of packets successfully received (<= pkts_n).
1220 mlx4_rx_burst(void *dpdk_rxq
, struct rte_mbuf
**pkts
, uint16_t pkts_n
)
1222 struct rxq
*rxq
= dpdk_rxq
;
1223 const uint32_t wr_cnt
= (1 << rxq
->elts_n
) - 1;
1224 const uint16_t sges_n
= rxq
->sges_n
;
1225 struct rte_mbuf
*pkt
= NULL
;
1226 struct rte_mbuf
*seg
= NULL
;
1228 uint32_t rq_ci
= rxq
->rq_ci
<< sges_n
;
1232 volatile struct mlx4_cqe
*cqe
;
1233 uint32_t idx
= rq_ci
& wr_cnt
;
1234 struct rte_mbuf
*rep
= (*rxq
->elts
)[idx
];
1235 volatile struct mlx4_wqe_data_seg
*scat
= &(*rxq
->wqes
)[idx
];
1237 /* Update the 'next' pointer of the previous segment. */
1242 rte_prefetch0(scat
);
1243 rep
= rte_mbuf_raw_alloc(rxq
->mp
);
1244 if (unlikely(rep
== NULL
)) {
1245 ++rxq
->stats
.rx_nombuf
;
1248 * No buffers before we even started,
1249 * bail out silently.
1253 while (pkt
!= seg
) {
1254 assert(pkt
!= (*rxq
->elts
)[idx
]);
1258 rte_mbuf_raw_free(pkt
);
1264 /* Looking for the new packet. */
1265 len
= mlx4_cq_poll_one(rxq
, &cqe
);
1267 rte_mbuf_raw_free(rep
);
1270 if (unlikely(len
< 0)) {
1271 /* Rx error, packet is likely too large. */
1272 rte_mbuf_raw_free(rep
);
1273 ++rxq
->stats
.idropped
;
1277 assert(len
>= (rxq
->crc_present
<< 2));
1278 /* Update packet information. */
1280 rxq_cq_to_pkt_type(cqe
, rxq
->l2tun_offload
);
1281 pkt
->ol_flags
= PKT_RX_RSS_HASH
;
1282 pkt
->hash
.rss
= cqe
->immed_rss_invalid
;
1283 if (rxq
->crc_present
)
1284 len
-= ETHER_CRC_LEN
;
1286 if (rxq
->csum
| rxq
->csum_l2tun
) {
1293 rxq_cq_to_ol_flags(flags
,
1299 rep
->port
= rxq
->port_id
;
1300 rep
->data_len
= seg
->data_len
;
1301 rep
->data_off
= seg
->data_off
;
1302 (*rxq
->elts
)[idx
] = rep
;
1304 * Fill NIC descriptor with the new buffer. The lkey and size
1305 * of the buffers are already known, only the buffer address
1308 scat
->addr
= rte_cpu_to_be_64(rte_pktmbuf_mtod(rep
, uintptr_t));
1309 /* If there's only one MR, no need to replace LKey in WQE. */
1310 if (unlikely(mlx4_mr_btree_len(&rxq
->mr_ctrl
.cache_bh
) > 1))
1311 scat
->lkey
= mlx4_rx_mb2mr(rxq
, rep
);
1312 if (len
> seg
->data_len
) {
1313 len
-= seg
->data_len
;
1318 /* The last segment. */
1319 seg
->data_len
= len
;
1320 /* Increment bytes counter. */
1321 rxq
->stats
.ibytes
+= pkt
->pkt_len
;
1322 /* Return packet. */
1328 /* Align consumer index to the next stride. */
1333 if (unlikely(i
== 0 && (rq_ci
>> sges_n
) == rxq
->rq_ci
))
1335 /* Update the consumer index. */
1336 rxq
->rq_ci
= rq_ci
>> sges_n
;
1338 *rxq
->rq_db
= rte_cpu_to_be_32(rxq
->rq_ci
);
1339 *rxq
->mcq
.set_ci_db
=
1340 rte_cpu_to_be_32(rxq
->mcq
.cons_index
& MLX4_CQ_DB_CI_MASK
);
1341 /* Increment packets counter. */
1342 rxq
->stats
.ipackets
+= i
;
1347 * Dummy DPDK callback for Tx.
1349 * This function is used to temporarily replace the real callback during
1350 * unsafe control operations on the queue, or in case of error.
1353 * Generic pointer to Tx queue structure.
1355 * Packets to transmit.
1357 * Number of packets in array.
1360 * Number of packets successfully transmitted (<= pkts_n).
1363 mlx4_tx_burst_removed(void *dpdk_txq
, struct rte_mbuf
**pkts
, uint16_t pkts_n
)
1372 * Dummy DPDK callback for Rx.
1374 * This function is used to temporarily replace the real callback during
1375 * unsafe control operations on the queue, or in case of error.
1378 * Generic pointer to Rx queue structure.
1380 * Array to store received packets.
1382 * Maximum number of packets in array.
1385 * Number of packets successfully received (<= pkts_n).
1388 mlx4_rx_burst_removed(void *dpdk_rxq
, struct rte_mbuf
**pkts
, uint16_t pkts_n
)