]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ixgbe / ixgbe_rxtx.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
7c673cae
FG
4 */
5
6#include <sys/queue.h>
7
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <errno.h>
12#include <stdint.h>
13#include <stdarg.h>
14#include <unistd.h>
15#include <inttypes.h>
16
17#include <rte_byteorder.h>
18#include <rte_common.h>
19#include <rte_cycles.h>
20#include <rte_log.h>
21#include <rte_debug.h>
22#include <rte_interrupts.h>
23#include <rte_pci.h>
24#include <rte_memory.h>
25#include <rte_memzone.h>
26#include <rte_launch.h>
27#include <rte_eal.h>
28#include <rte_per_lcore.h>
29#include <rte_lcore.h>
30#include <rte_atomic.h>
31#include <rte_branch_prediction.h>
32#include <rte_mempool.h>
33#include <rte_malloc.h>
34#include <rte_mbuf.h>
35#include <rte_ether.h>
11fdf7f2 36#include <rte_ethdev_driver.h>
7c673cae
FG
37#include <rte_prefetch.h>
38#include <rte_udp.h>
39#include <rte_tcp.h>
40#include <rte_sctp.h>
41#include <rte_string_fns.h>
42#include <rte_errno.h>
43#include <rte_ip.h>
11fdf7f2 44#include <rte_net.h>
7c673cae
FG
45
46#include "ixgbe_logs.h"
47#include "base/ixgbe_api.h"
48#include "base/ixgbe_vf.h"
49#include "ixgbe_ethdev.h"
50#include "base/ixgbe_dcb.h"
51#include "base/ixgbe_common.h"
52#include "ixgbe_rxtx.h"
53
11fdf7f2
TL
54#ifdef RTE_LIBRTE_IEEE1588
55#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
56#else
57#define IXGBE_TX_IEEE1588_TMST 0
58#endif
7c673cae
FG
59/* Bit Mask to indicate what bits required for building TX context */
60#define IXGBE_TX_OFFLOAD_MASK ( \
9f95a23c
TL
61 PKT_TX_OUTER_IPV6 | \
62 PKT_TX_OUTER_IPV4 | \
63 PKT_TX_IPV6 | \
64 PKT_TX_IPV4 | \
7c673cae
FG
65 PKT_TX_VLAN_PKT | \
66 PKT_TX_IP_CKSUM | \
67 PKT_TX_L4_MASK | \
68 PKT_TX_TCP_SEG | \
11fdf7f2
TL
69 PKT_TX_MACSEC | \
70 PKT_TX_OUTER_IP_CKSUM | \
71 PKT_TX_SEC_OFFLOAD | \
72 IXGBE_TX_IEEE1588_TMST)
73
74#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
75 (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
7c673cae
FG
76
77#if 1
78#define RTE_PMD_USE_PREFETCH
79#endif
80
81#ifdef RTE_PMD_USE_PREFETCH
82/*
83 * Prefetch a cache line into all cache levels.
84 */
85#define rte_ixgbe_prefetch(p) rte_prefetch0(p)
86#else
87#define rte_ixgbe_prefetch(p) do {} while (0)
88#endif
89
11fdf7f2
TL
90#ifdef RTE_IXGBE_INC_VECTOR
91uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
92 uint16_t nb_pkts);
93#endif
94
7c673cae
FG
95/*********************************************************************
96 *
97 * TX functions
98 *
99 **********************************************************************/
100
101/*
102 * Check for descriptors with their DD bit set and free mbufs.
103 * Return the total number of buffers freed.
104 */
11fdf7f2 105static __rte_always_inline int
7c673cae
FG
106ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
107{
108 struct ixgbe_tx_entry *txep;
109 uint32_t status;
110 int i, nb_free = 0;
111 struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
112
113 /* check DD bit on threshold descriptor */
114 status = txq->tx_ring[txq->tx_next_dd].wb.status;
115 if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
116 return 0;
117
118 /*
119 * first buffer to free from S/W ring is at index
120 * tx_next_dd - (tx_rs_thresh-1)
121 */
122 txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]);
123
124 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
125 /* free buffers one at a time */
11fdf7f2 126 m = rte_pktmbuf_prefree_seg(txep->mbuf);
7c673cae
FG
127 txep->mbuf = NULL;
128
129 if (unlikely(m == NULL))
130 continue;
131
132 if (nb_free >= RTE_IXGBE_TX_MAX_FREE_BUF_SZ ||
133 (nb_free > 0 && m->pool != free[0]->pool)) {
134 rte_mempool_put_bulk(free[0]->pool,
135 (void **)free, nb_free);
136 nb_free = 0;
137 }
138
139 free[nb_free++] = m;
140 }
141
142 if (nb_free > 0)
143 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
144
145 /* buffers were freed, update counters */
146 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
147 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
148 if (txq->tx_next_dd >= txq->nb_tx_desc)
149 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
150
151 return txq->tx_rs_thresh;
152}
153
154/* Populate 4 descriptors with data from 4 mbufs */
155static inline void
156tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
157{
158 uint64_t buf_dma_addr;
159 uint32_t pkt_len;
160 int i;
161
162 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
11fdf7f2 163 buf_dma_addr = rte_mbuf_data_iova(*pkts);
7c673cae
FG
164 pkt_len = (*pkts)->data_len;
165
166 /* write data to descriptor */
167 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
168
169 txdp->read.cmd_type_len =
170 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
171
172 txdp->read.olinfo_status =
173 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
174
175 rte_prefetch0(&(*pkts)->pool);
176 }
177}
178
179/* Populate 1 descriptor with data from 1 mbuf */
180static inline void
181tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
182{
183 uint64_t buf_dma_addr;
184 uint32_t pkt_len;
185
11fdf7f2 186 buf_dma_addr = rte_mbuf_data_iova(*pkts);
7c673cae
FG
187 pkt_len = (*pkts)->data_len;
188
189 /* write data to descriptor */
190 txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
191 txdp->read.cmd_type_len =
192 rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
193 txdp->read.olinfo_status =
194 rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
195 rte_prefetch0(&(*pkts)->pool);
196}
197
198/*
199 * Fill H/W descriptor ring with mbuf data.
200 * Copy mbuf pointers to the S/W ring.
201 */
202static inline void
203ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
204 uint16_t nb_pkts)
205{
206 volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
207 struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
208 const int N_PER_LOOP = 4;
209 const int N_PER_LOOP_MASK = N_PER_LOOP-1;
210 int mainpart, leftover;
211 int i, j;
212
213 /*
214 * Process most of the packets in chunks of N pkts. Any
215 * leftover packets will get processed one at a time.
216 */
217 mainpart = (nb_pkts & ((uint32_t) ~N_PER_LOOP_MASK));
218 leftover = (nb_pkts & ((uint32_t) N_PER_LOOP_MASK));
219 for (i = 0; i < mainpart; i += N_PER_LOOP) {
220 /* Copy N mbuf pointers to the S/W ring */
221 for (j = 0; j < N_PER_LOOP; ++j) {
222 (txep + i + j)->mbuf = *(pkts + i + j);
223 }
224 tx4(txdp + i, pkts + i);
225 }
226
227 if (unlikely(leftover > 0)) {
228 for (i = 0; i < leftover; ++i) {
229 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
230 tx1(txdp + mainpart + i, pkts + mainpart + i);
231 }
232 }
233}
234
235static inline uint16_t
236tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
237 uint16_t nb_pkts)
238{
239 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
240 volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
241 uint16_t n = 0;
242
243 /*
244 * Begin scanning the H/W ring for done descriptors when the
245 * number of available descriptors drops below tx_free_thresh. For
246 * each done descriptor, free the associated buffer.
247 */
248 if (txq->nb_tx_free < txq->tx_free_thresh)
249 ixgbe_tx_free_bufs(txq);
250
251 /* Only use descriptors that are available */
252 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
253 if (unlikely(nb_pkts == 0))
254 return 0;
255
256 /* Use exactly nb_pkts descriptors */
257 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
258
259 /*
260 * At this point, we know there are enough descriptors in the
261 * ring to transmit all the packets. This assumes that each
262 * mbuf contains a single segment, and that no new offloads
263 * are expected, which would require a new context descriptor.
264 */
265
266 /*
267 * See if we're going to wrap-around. If so, handle the top
268 * of the descriptor ring first, then do the bottom. If not,
269 * the processing looks just like the "bottom" part anyway...
270 */
271 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
272 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
273 ixgbe_tx_fill_hw_ring(txq, tx_pkts, n);
274
275 /*
276 * We know that the last descriptor in the ring will need to
277 * have its RS bit set because tx_rs_thresh has to be
278 * a divisor of the ring size
279 */
280 tx_r[txq->tx_next_rs].read.cmd_type_len |=
281 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
282 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
283
284 txq->tx_tail = 0;
285 }
286
287 /* Fill H/W descriptor ring with mbuf data */
288 ixgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
289 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
290
291 /*
292 * Determine if RS bit should be set
293 * This is what we actually want:
294 * if ((txq->tx_tail - 1) >= txq->tx_next_rs)
295 * but instead of subtracting 1 and doing >=, we can just do
296 * greater than without subtracting.
297 */
298 if (txq->tx_tail > txq->tx_next_rs) {
299 tx_r[txq->tx_next_rs].read.cmd_type_len |=
300 rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
301 txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
302 txq->tx_rs_thresh);
303 if (txq->tx_next_rs >= txq->nb_tx_desc)
304 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
305 }
306
307 /*
308 * Check for wrap-around. This would only happen if we used
309 * up to the last descriptor in the ring, no more, no less.
310 */
311 if (txq->tx_tail >= txq->nb_tx_desc)
312 txq->tx_tail = 0;
313
314 /* update tail pointer */
315 rte_wmb();
11fdf7f2 316 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
7c673cae
FG
317
318 return nb_pkts;
319}
320
321uint16_t
322ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
323 uint16_t nb_pkts)
324{
325 uint16_t nb_tx;
326
327 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
328 if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
329 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
330
331 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
332 nb_tx = 0;
333 while (nb_pkts) {
334 uint16_t ret, n;
335
336 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
337 ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
338 nb_tx = (uint16_t)(nb_tx + ret);
339 nb_pkts = (uint16_t)(nb_pkts - ret);
340 if (ret < n)
341 break;
342 }
343
344 return nb_tx;
345}
346
11fdf7f2
TL
347#ifdef RTE_IXGBE_INC_VECTOR
348static uint16_t
349ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
350 uint16_t nb_pkts)
351{
352 uint16_t nb_tx = 0;
353 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
354
355 while (nb_pkts) {
356 uint16_t ret, num;
357
358 num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
359 ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
360 num);
361 nb_tx += ret;
362 nb_pkts -= ret;
363 if (ret < num)
364 break;
365 }
366
367 return nb_tx;
368}
369#endif
370
7c673cae
FG
371static inline void
372ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
373 volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
11fdf7f2
TL
374 uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
375 __rte_unused uint64_t *mdata)
7c673cae
FG
376{
377 uint32_t type_tucmd_mlhl;
378 uint32_t mss_l4len_idx = 0;
379 uint32_t ctx_idx;
380 uint32_t vlan_macip_lens;
381 union ixgbe_tx_offload tx_offload_mask;
382 uint32_t seqnum_seed = 0;
383
384 ctx_idx = txq->ctx_curr;
385 tx_offload_mask.data[0] = 0;
386 tx_offload_mask.data[1] = 0;
387 type_tucmd_mlhl = 0;
388
389 /* Specify which HW CTX to upload. */
390 mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
391
392 if (ol_flags & PKT_TX_VLAN_PKT) {
393 tx_offload_mask.vlan_tci |= ~0;
394 }
395
396 /* check if TCP segmentation required for this packet */
397 if (ol_flags & PKT_TX_TCP_SEG) {
398 /* implies IP cksum in IPv4 */
399 if (ol_flags & PKT_TX_IP_CKSUM)
400 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
401 IXGBE_ADVTXD_TUCMD_L4T_TCP |
402 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
403 else
404 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV6 |
405 IXGBE_ADVTXD_TUCMD_L4T_TCP |
406 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
407
408 tx_offload_mask.l2_len |= ~0;
409 tx_offload_mask.l3_len |= ~0;
410 tx_offload_mask.l4_len |= ~0;
411 tx_offload_mask.tso_segsz |= ~0;
412 mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
413 mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
414 } else { /* no TSO, check if hardware checksum is needed */
415 if (ol_flags & PKT_TX_IP_CKSUM) {
416 type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
417 tx_offload_mask.l2_len |= ~0;
418 tx_offload_mask.l3_len |= ~0;
419 }
420
421 switch (ol_flags & PKT_TX_L4_MASK) {
422 case PKT_TX_UDP_CKSUM:
423 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
424 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
425 mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
426 tx_offload_mask.l2_len |= ~0;
427 tx_offload_mask.l3_len |= ~0;
428 break;
429 case PKT_TX_TCP_CKSUM:
430 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
431 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
432 mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
433 tx_offload_mask.l2_len |= ~0;
434 tx_offload_mask.l3_len |= ~0;
435 break;
436 case PKT_TX_SCTP_CKSUM:
437 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
438 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
439 mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
440 tx_offload_mask.l2_len |= ~0;
441 tx_offload_mask.l3_len |= ~0;
442 break;
443 default:
444 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
445 IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
446 break;
447 }
448 }
449
450 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
451 tx_offload_mask.outer_l2_len |= ~0;
452 tx_offload_mask.outer_l3_len |= ~0;
453 tx_offload_mask.l2_len |= ~0;
454 seqnum_seed |= tx_offload.outer_l3_len
455 << IXGBE_ADVTXD_OUTER_IPLEN;
456 seqnum_seed |= tx_offload.l2_len
457 << IXGBE_ADVTXD_TUNNEL_LEN;
458 }
11fdf7f2
TL
459#ifdef RTE_LIBRTE_SECURITY
460 if (ol_flags & PKT_TX_SEC_OFFLOAD) {
461 union ixgbe_crypto_tx_desc_md *md =
462 (union ixgbe_crypto_tx_desc_md *)mdata;
463 seqnum_seed |=
464 (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
465 type_tucmd_mlhl |= md->enc ?
466 (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
467 IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
468 type_tucmd_mlhl |=
469 (md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
470 tx_offload_mask.sa_idx |= ~0;
471 tx_offload_mask.sec_pad_len |= ~0;
472 }
473#endif
7c673cae
FG
474
475 txq->ctx_cache[ctx_idx].flags = ol_flags;
476 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
477 tx_offload_mask.data[0] & tx_offload.data[0];
478 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
479 tx_offload_mask.data[1] & tx_offload.data[1];
480 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
481
482 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
483 vlan_macip_lens = tx_offload.l3_len;
484 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
485 vlan_macip_lens |= (tx_offload.outer_l2_len <<
486 IXGBE_ADVTXD_MACLEN_SHIFT);
487 else
488 vlan_macip_lens |= (tx_offload.l2_len <<
489 IXGBE_ADVTXD_MACLEN_SHIFT);
490 vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
491 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
492 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
493 ctx_txd->seqnum_seed = seqnum_seed;
494}
495
496/*
497 * Check which hardware context can be used. Use the existing match
498 * or create a new context descriptor.
499 */
500static inline uint32_t
501what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
502 union ixgbe_tx_offload tx_offload)
503{
504 /* If match with the current used context */
505 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
506 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
507 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
508 & tx_offload.data[0])) &&
509 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
510 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
511 & tx_offload.data[1]))))
512 return txq->ctx_curr;
513
514 /* What if match with the next context */
515 txq->ctx_curr ^= 1;
516 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
517 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
518 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
519 & tx_offload.data[0])) &&
520 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
521 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
522 & tx_offload.data[1]))))
523 return txq->ctx_curr;
524
525 /* Mismatch, use the previous context */
526 return IXGBE_CTX_NUM;
527}
528
529static inline uint32_t
530tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
531{
532 uint32_t tmp = 0;
533
534 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
535 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
536 if (ol_flags & PKT_TX_IP_CKSUM)
537 tmp |= IXGBE_ADVTXD_POPTS_IXSM;
538 if (ol_flags & PKT_TX_TCP_SEG)
539 tmp |= IXGBE_ADVTXD_POPTS_TXSM;
540 return tmp;
541}
542
543static inline uint32_t
544tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
545{
546 uint32_t cmdtype = 0;
547
548 if (ol_flags & PKT_TX_VLAN_PKT)
549 cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
550 if (ol_flags & PKT_TX_TCP_SEG)
551 cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
552 if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
553 cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
11fdf7f2
TL
554 if (ol_flags & PKT_TX_MACSEC)
555 cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
7c673cae
FG
556 return cmdtype;
557}
558
559/* Default RS bit threshold values */
560#ifndef DEFAULT_TX_RS_THRESH
561#define DEFAULT_TX_RS_THRESH 32
562#endif
563#ifndef DEFAULT_TX_FREE_THRESH
564#define DEFAULT_TX_FREE_THRESH 32
565#endif
566
567/* Reset transmit descriptors after they have been used */
568static inline int
569ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
570{
571 struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
572 volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
573 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
574 uint16_t nb_tx_desc = txq->nb_tx_desc;
575 uint16_t desc_to_clean_to;
576 uint16_t nb_tx_to_clean;
577 uint32_t status;
578
579 /* Determine the last descriptor needing to be cleaned */
580 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
581 if (desc_to_clean_to >= nb_tx_desc)
582 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
583
584 /* Check to make sure the last descriptor to clean is done */
585 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
586 status = txr[desc_to_clean_to].wb.status;
587 if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
588 PMD_TX_FREE_LOG(DEBUG,
589 "TX descriptor %4u is not done"
590 "(port=%d queue=%d)",
591 desc_to_clean_to,
592 txq->port_id, txq->queue_id);
593 /* Failed to clean any descriptors, better luck next time */
594 return -(1);
595 }
596
597 /* Figure out how many descriptors will be cleaned */
598 if (last_desc_cleaned > desc_to_clean_to)
599 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
600 desc_to_clean_to);
601 else
602 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
603 last_desc_cleaned);
604
605 PMD_TX_FREE_LOG(DEBUG,
606 "Cleaning %4u TX descriptors: %4u to %4u "
607 "(port=%d queue=%d)",
608 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
609 txq->port_id, txq->queue_id);
610
611 /*
612 * The last descriptor to clean is done, so that means all the
613 * descriptors from the last descriptor that was cleaned
614 * up to the last descriptor with the RS bit set
615 * are done. Only reset the threshold descriptor.
616 */
617 txr[desc_to_clean_to].wb.status = 0;
618
619 /* Update the txq to reflect the last descriptor that was cleaned */
620 txq->last_desc_cleaned = desc_to_clean_to;
621 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
622
623 /* No Error */
624 return 0;
625}
626
627uint16_t
628ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
629 uint16_t nb_pkts)
630{
631 struct ixgbe_tx_queue *txq;
632 struct ixgbe_tx_entry *sw_ring;
633 struct ixgbe_tx_entry *txe, *txn;
634 volatile union ixgbe_adv_tx_desc *txr;
635 volatile union ixgbe_adv_tx_desc *txd, *txp;
636 struct rte_mbuf *tx_pkt;
637 struct rte_mbuf *m_seg;
638 uint64_t buf_dma_addr;
639 uint32_t olinfo_status;
640 uint32_t cmd_type_len;
641 uint32_t pkt_len;
642 uint16_t slen;
643 uint64_t ol_flags;
644 uint16_t tx_id;
645 uint16_t tx_last;
646 uint16_t nb_tx;
647 uint16_t nb_used;
648 uint64_t tx_ol_req;
649 uint32_t ctx = 0;
650 uint32_t new_ctx;
651 union ixgbe_tx_offload tx_offload;
11fdf7f2
TL
652#ifdef RTE_LIBRTE_SECURITY
653 uint8_t use_ipsec;
654#endif
7c673cae
FG
655
656 tx_offload.data[0] = 0;
657 tx_offload.data[1] = 0;
658 txq = tx_queue;
659 sw_ring = txq->sw_ring;
660 txr = txq->tx_ring;
661 tx_id = txq->tx_tail;
662 txe = &sw_ring[tx_id];
663 txp = NULL;
664
665 /* Determine if the descriptor ring needs to be cleaned. */
666 if (txq->nb_tx_free < txq->tx_free_thresh)
667 ixgbe_xmit_cleanup(txq);
668
669 rte_prefetch0(&txe->mbuf->pool);
670
671 /* TX loop */
672 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
673 new_ctx = 0;
674 tx_pkt = *tx_pkts++;
675 pkt_len = tx_pkt->pkt_len;
676
677 /*
678 * Determine how many (if any) context descriptors
679 * are needed for offload functionality.
680 */
681 ol_flags = tx_pkt->ol_flags;
11fdf7f2
TL
682#ifdef RTE_LIBRTE_SECURITY
683 use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
684#endif
7c673cae
FG
685
686 /* If hardware offload required */
687 tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
688 if (tx_ol_req) {
689 tx_offload.l2_len = tx_pkt->l2_len;
690 tx_offload.l3_len = tx_pkt->l3_len;
691 tx_offload.l4_len = tx_pkt->l4_len;
692 tx_offload.vlan_tci = tx_pkt->vlan_tci;
693 tx_offload.tso_segsz = tx_pkt->tso_segsz;
694 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
695 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
11fdf7f2
TL
696#ifdef RTE_LIBRTE_SECURITY
697 if (use_ipsec) {
698 union ixgbe_crypto_tx_desc_md *ipsec_mdata =
699 (union ixgbe_crypto_tx_desc_md *)
700 &tx_pkt->udata64;
701 tx_offload.sa_idx = ipsec_mdata->sa_idx;
702 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
703 }
704#endif
7c673cae
FG
705
706 /* If new context need be built or reuse the exist ctx. */
707 ctx = what_advctx_update(txq, tx_ol_req,
708 tx_offload);
709 /* Only allocate context descriptor if required*/
710 new_ctx = (ctx == IXGBE_CTX_NUM);
711 ctx = txq->ctx_curr;
712 }
713
714 /*
715 * Keep track of how many descriptors are used this loop
716 * This will always be the number of segments + the number of
717 * Context descriptors required to transmit the packet
718 */
719 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
720
721 if (txp != NULL &&
722 nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
723 /* set RS on the previous packet in the burst */
724 txp->read.cmd_type_len |=
725 rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
726
727 /*
728 * The number of descriptors that must be allocated for a
729 * packet is the number of segments of that packet, plus 1
730 * Context Descriptor for the hardware offload, if any.
731 * Determine the last TX descriptor to allocate in the TX ring
732 * for the packet, starting from the current position (tx_id)
733 * in the ring.
734 */
735 tx_last = (uint16_t) (tx_id + nb_used - 1);
736
737 /* Circular ring */
738 if (tx_last >= txq->nb_tx_desc)
739 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
740
741 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
742 " tx_first=%u tx_last=%u",
743 (unsigned) txq->port_id,
744 (unsigned) txq->queue_id,
745 (unsigned) pkt_len,
746 (unsigned) tx_id,
747 (unsigned) tx_last);
748
749 /*
750 * Make sure there are enough TX descriptors available to
751 * transmit the entire packet.
752 * nb_used better be less than or equal to txq->tx_rs_thresh
753 */
754 if (nb_used > txq->nb_tx_free) {
755 PMD_TX_FREE_LOG(DEBUG,
756 "Not enough free TX descriptors "
757 "nb_used=%4u nb_free=%4u "
758 "(port=%d queue=%d)",
759 nb_used, txq->nb_tx_free,
760 txq->port_id, txq->queue_id);
761
762 if (ixgbe_xmit_cleanup(txq) != 0) {
763 /* Could not clean any descriptors */
764 if (nb_tx == 0)
765 return 0;
766 goto end_of_tx;
767 }
768
769 /* nb_used better be <= txq->tx_rs_thresh */
770 if (unlikely(nb_used > txq->tx_rs_thresh)) {
771 PMD_TX_FREE_LOG(DEBUG,
772 "The number of descriptors needed to "
773 "transmit the packet exceeds the "
774 "RS bit threshold. This will impact "
775 "performance."
776 "nb_used=%4u nb_free=%4u "
777 "tx_rs_thresh=%4u. "
778 "(port=%d queue=%d)",
779 nb_used, txq->nb_tx_free,
780 txq->tx_rs_thresh,
781 txq->port_id, txq->queue_id);
782 /*
783 * Loop here until there are enough TX
784 * descriptors or until the ring cannot be
785 * cleaned.
786 */
787 while (nb_used > txq->nb_tx_free) {
788 if (ixgbe_xmit_cleanup(txq) != 0) {
789 /*
790 * Could not clean any
791 * descriptors
792 */
793 if (nb_tx == 0)
794 return 0;
795 goto end_of_tx;
796 }
797 }
798 }
799 }
800
801 /*
802 * By now there are enough free TX descriptors to transmit
803 * the packet.
804 */
805
806 /*
807 * Set common flags of all TX Data Descriptors.
808 *
809 * The following bits must be set in all Data Descriptors:
810 * - IXGBE_ADVTXD_DTYP_DATA
811 * - IXGBE_ADVTXD_DCMD_DEXT
812 *
813 * The following bits must be set in the first Data Descriptor
814 * and are ignored in the other ones:
815 * - IXGBE_ADVTXD_DCMD_IFCS
816 * - IXGBE_ADVTXD_MAC_1588
817 * - IXGBE_ADVTXD_DCMD_VLE
818 *
819 * The following bits must only be set in the last Data
820 * Descriptor:
821 * - IXGBE_TXD_CMD_EOP
822 *
823 * The following bits can be set in any Data Descriptor, but
824 * are only set in the last Data Descriptor:
825 * - IXGBE_TXD_CMD_RS
826 */
827 cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
828 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
829
830#ifdef RTE_LIBRTE_IEEE1588
831 if (ol_flags & PKT_TX_IEEE1588_TMST)
832 cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
833#endif
834
835 olinfo_status = 0;
836 if (tx_ol_req) {
837
838 if (ol_flags & PKT_TX_TCP_SEG) {
839 /* when TSO is on, paylen in descriptor is the
840 * not the packet len but the tcp payload len */
841 pkt_len -= (tx_offload.l2_len +
842 tx_offload.l3_len + tx_offload.l4_len);
843 }
844
845 /*
846 * Setup the TX Advanced Context Descriptor if required
847 */
848 if (new_ctx) {
849 volatile struct ixgbe_adv_tx_context_desc *
850 ctx_txd;
851
852 ctx_txd = (volatile struct
853 ixgbe_adv_tx_context_desc *)
854 &txr[tx_id];
855
856 txn = &sw_ring[txe->next_id];
857 rte_prefetch0(&txn->mbuf->pool);
858
859 if (txe->mbuf != NULL) {
860 rte_pktmbuf_free_seg(txe->mbuf);
861 txe->mbuf = NULL;
862 }
863
864 ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
11fdf7f2 865 tx_offload, &tx_pkt->udata64);
7c673cae
FG
866
867 txe->last_id = tx_last;
868 tx_id = txe->next_id;
869 txe = txn;
870 }
871
872 /*
873 * Setup the TX Advanced Data Descriptor,
874 * This path will go through
875 * whatever new/reuse the context descriptor
876 */
877 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
878 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
879 olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
880 }
881
882 olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
11fdf7f2
TL
883#ifdef RTE_LIBRTE_SECURITY
884 if (use_ipsec)
885 olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
886#endif
7c673cae
FG
887
888 m_seg = tx_pkt;
889 do {
890 txd = &txr[tx_id];
891 txn = &sw_ring[txe->next_id];
892 rte_prefetch0(&txn->mbuf->pool);
893
894 if (txe->mbuf != NULL)
895 rte_pktmbuf_free_seg(txe->mbuf);
896 txe->mbuf = m_seg;
897
898 /*
899 * Set up Transmit Data Descriptor.
900 */
901 slen = m_seg->data_len;
11fdf7f2 902 buf_dma_addr = rte_mbuf_data_iova(m_seg);
7c673cae
FG
903 txd->read.buffer_addr =
904 rte_cpu_to_le_64(buf_dma_addr);
905 txd->read.cmd_type_len =
906 rte_cpu_to_le_32(cmd_type_len | slen);
907 txd->read.olinfo_status =
908 rte_cpu_to_le_32(olinfo_status);
909 txe->last_id = tx_last;
910 tx_id = txe->next_id;
911 txe = txn;
912 m_seg = m_seg->next;
913 } while (m_seg != NULL);
914
915 /*
916 * The last packet data descriptor needs End Of Packet (EOP)
917 */
918 cmd_type_len |= IXGBE_TXD_CMD_EOP;
919 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
920 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
921
922 /* Set RS bit only on threshold packets' last descriptor */
923 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
924 PMD_TX_FREE_LOG(DEBUG,
925 "Setting RS bit on TXD id="
926 "%4u (port=%d queue=%d)",
927 tx_last, txq->port_id, txq->queue_id);
928
929 cmd_type_len |= IXGBE_TXD_CMD_RS;
930
931 /* Update txq RS bit counters */
932 txq->nb_tx_used = 0;
933 txp = NULL;
934 } else
935 txp = txd;
936
937 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
938 }
939
940end_of_tx:
941 /* set RS on last packet in the burst */
942 if (txp != NULL)
943 txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
944
945 rte_wmb();
946
947 /*
948 * Set the Transmit Descriptor Tail (TDT)
949 */
950 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
951 (unsigned) txq->port_id, (unsigned) txq->queue_id,
952 (unsigned) tx_id, (unsigned) nb_tx);
11fdf7f2 953 IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
7c673cae
FG
954 txq->tx_tail = tx_id;
955
956 return nb_tx;
957}
958
11fdf7f2
TL
959/*********************************************************************
960 *
961 * TX prep functions
962 *
963 **********************************************************************/
964uint16_t
965ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
966{
967 int i, ret;
968 uint64_t ol_flags;
969 struct rte_mbuf *m;
970 struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
971
972 for (i = 0; i < nb_pkts; i++) {
973 m = tx_pkts[i];
974 ol_flags = m->ol_flags;
975
976 /**
977 * Check if packet meets requirements for number of segments
978 *
979 * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
980 * non-TSO
981 */
982
983 if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
984 rte_errno = -EINVAL;
985 return i;
986 }
987
988 if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
989 rte_errno = -ENOTSUP;
990 return i;
991 }
992
993#ifdef RTE_LIBRTE_ETHDEV_DEBUG
994 ret = rte_validate_tx_offload(m);
995 if (ret != 0) {
996 rte_errno = ret;
997 return i;
998 }
999#endif
1000 ret = rte_net_intel_cksum_prepare(m);
1001 if (ret != 0) {
1002 rte_errno = ret;
1003 return i;
1004 }
1005 }
1006
1007 return i;
1008}
1009
7c673cae
FG
1010/*********************************************************************
1011 *
1012 * RX functions
1013 *
1014 **********************************************************************/
1015
1016#define IXGBE_PACKET_TYPE_ETHER 0X00
1017#define IXGBE_PACKET_TYPE_IPV4 0X01
1018#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
1019#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
1020#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
1021#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
1022#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
1023#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
1024#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
1025#define IXGBE_PACKET_TYPE_IPV6 0X04
1026#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
1027#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
1028#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
1029#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
1030#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
1031#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
1032#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
1033#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
1034#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
1035#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
1036#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
1037#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
1038#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
1039#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
1040#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
1041#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
1042#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
1043#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
1044#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
1045#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
1046#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
1047#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
1048#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
1049
1050#define IXGBE_PACKET_TYPE_NVGRE 0X00
1051#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
1052#define IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP 0X11
1053#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
1054#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
1055#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
1056#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
1057#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
1058#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
1059#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
1060#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
1061#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
1062#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
1063#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
1064#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
1065#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
1066#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
1067#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
1068#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
1069#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
1070#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT 0X0D
1071#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP 0X1D
1072#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP 0X2D
1073
1074#define IXGBE_PACKET_TYPE_VXLAN 0X80
1075#define IXGBE_PACKET_TYPE_VXLAN_IPV4 0X81
1076#define IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP 0x91
1077#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
1078#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
1079#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
1080#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
1081#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
1082#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
1083#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
1084#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
1085#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
1086#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
1087#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
1088#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
1089#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
1090#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
1091#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
1092#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
1093#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
1094#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT 0X8D
1095#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP 0X9D
1096#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP 0XAD
1097
11fdf7f2
TL
1098/**
1099 * Use 2 different table for normal packet and tunnel packet
1100 * to save the space.
1101 */
1102const uint32_t
1103 ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
1104 [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
1105 [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
1106 RTE_PTYPE_L3_IPV4,
1107 [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1108 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
1109 [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1110 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
1111 [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1112 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
1113 [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1114 RTE_PTYPE_L3_IPV4_EXT,
1115 [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1116 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
1117 [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1118 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
1119 [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1120 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
1121 [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
1122 RTE_PTYPE_L3_IPV6,
1123 [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1124 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
1125 [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1126 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
1127 [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1128 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
1129 [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1130 RTE_PTYPE_L3_IPV6_EXT,
1131 [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1132 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
1133 [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1134 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
1135 [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1136 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
1137 [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1138 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1139 RTE_PTYPE_INNER_L3_IPV6,
1140 [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1141 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1142 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1143 [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1144 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1145 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1146 [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1147 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1148 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1149 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
1150 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1151 RTE_PTYPE_INNER_L3_IPV6,
1152 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1153 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1154 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1155 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1156 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1157 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1158 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1159 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1160 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1161 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1162 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1163 RTE_PTYPE_INNER_L3_IPV6_EXT,
1164 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1165 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1166 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1167 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1168 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1169 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1170 [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1171 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
1172 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1173 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1174 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1175 RTE_PTYPE_INNER_L3_IPV6_EXT,
1176 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1177 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1178 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1179 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1180 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1181 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1182 [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
1183 RTE_PTYPE_L2_ETHER |
1184 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
1185 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1186};
1187
1188const uint32_t
1189 ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX] __rte_cache_aligned = {
1190 [IXGBE_PACKET_TYPE_NVGRE] = RTE_PTYPE_L2_ETHER |
1191 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1192 RTE_PTYPE_INNER_L2_ETHER,
1193 [IXGBE_PACKET_TYPE_NVGRE_IPV4] = RTE_PTYPE_L2_ETHER |
1194 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1195 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1196 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1197 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1198 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT,
1199 [IXGBE_PACKET_TYPE_NVGRE_IPV6] = RTE_PTYPE_L2_ETHER |
1200 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1201 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6,
1202 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1203 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1204 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1205 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1206 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1207 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT,
1208 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1209 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1210 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1211 [IXGBE_PACKET_TYPE_NVGRE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1212 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1213 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1214 RTE_PTYPE_INNER_L4_TCP,
1215 [IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1216 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1217 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1218 RTE_PTYPE_INNER_L4_TCP,
1219 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1220 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1221 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1222 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1223 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1224 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1225 RTE_PTYPE_INNER_L4_TCP,
1226 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_TCP] =
1227 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1228 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1229 RTE_PTYPE_INNER_L3_IPV4,
1230 [IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1231 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1232 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1233 RTE_PTYPE_INNER_L4_UDP,
1234 [IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1235 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1236 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1237 RTE_PTYPE_INNER_L4_UDP,
1238 [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1239 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1240 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
1241 RTE_PTYPE_INNER_L4_SCTP,
1242 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1243 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1244 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1245 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1246 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1247 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1248 RTE_PTYPE_INNER_L4_UDP,
1249 [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1250 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1251 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
1252 RTE_PTYPE_INNER_L4_SCTP,
1253 [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
1254 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1255 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
1256 RTE_PTYPE_INNER_L3_IPV4,
1257 [IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1258 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1259 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4 |
1260 RTE_PTYPE_INNER_L4_SCTP,
1261 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1262 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1263 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1264 RTE_PTYPE_INNER_L4_SCTP,
1265 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1266 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1267 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1268 RTE_PTYPE_INNER_L4_TCP,
1269 [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1270 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
1271 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
1272 RTE_PTYPE_INNER_L4_UDP,
1273
1274 [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
1275 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1276 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER,
1277 [IXGBE_PACKET_TYPE_VXLAN_IPV4] = RTE_PTYPE_L2_ETHER |
1278 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1279 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1280 RTE_PTYPE_INNER_L3_IPV4,
1281 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
1282 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1283 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1284 RTE_PTYPE_INNER_L3_IPV4_EXT,
1285 [IXGBE_PACKET_TYPE_VXLAN_IPV6] = RTE_PTYPE_L2_ETHER |
1286 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1287 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1288 RTE_PTYPE_INNER_L3_IPV6,
1289 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
1290 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1291 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1292 RTE_PTYPE_INNER_L3_IPV4,
1293 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1294 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1295 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1296 RTE_PTYPE_INNER_L3_IPV6_EXT,
1297 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
1298 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1299 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1300 RTE_PTYPE_INNER_L3_IPV4,
1301 [IXGBE_PACKET_TYPE_VXLAN_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
1302 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1303 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1304 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_TCP,
1305 [IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1306 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1307 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1308 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
1309 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
1310 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1311 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1312 RTE_PTYPE_INNER_L3_IPV4,
1313 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1314 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1315 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1316 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
1317 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_TCP] =
1318 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1319 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1320 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1321 [IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
1322 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1323 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1324 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_UDP,
1325 [IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1326 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1327 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1328 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
1329 [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
1330 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1331 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1332 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
1333 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
1334 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1335 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1336 RTE_PTYPE_INNER_L3_IPV4,
1337 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1338 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1339 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1340 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
1341 [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1342 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1343 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1344 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
1345 [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
1346 RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1347 RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
1348 RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
1349 [IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
1350 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1351 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1352 RTE_PTYPE_INNER_L3_IPV4 | RTE_PTYPE_INNER_L4_SCTP,
1353 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
1354 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1355 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1356 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
1357 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
1358 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1359 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1360 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
1361 [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
1362 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
1363 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
1364 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
1365};
7c673cae
FG
1366
1367/* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */
1368static inline uint32_t
1369ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
1370{
7c673cae
FG
1371
1372 if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1373 return RTE_PTYPE_UNKNOWN;
1374
1375 pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) & ptype_mask;
1376
1377 /* For tunnel packet */
1378 if (pkt_info & IXGBE_PACKET_TYPE_TUNNEL_BIT) {
1379 /* Remove the tunnel bit to save the space. */
1380 pkt_info &= IXGBE_PACKET_TYPE_MASK_TUNNEL;
1381 return ptype_table_tn[pkt_info];
1382 }
1383
1384 /**
1385 * For x550, if it's not tunnel,
1386 * tunnel type bit should be set to 0.
1387 * Reuse 82599's mask.
1388 */
1389 pkt_info &= IXGBE_PACKET_TYPE_MASK_82599;
1390
1391 return ptype_table[pkt_info];
1392}
1393
1394static inline uint64_t
1395ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
1396{
1397 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1398 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1399 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1400 PKT_RX_RSS_HASH, 0, 0, 0,
1401 0, 0, 0, PKT_RX_FDIR,
1402 };
1403#ifdef RTE_LIBRTE_IEEE1588
1404 static uint64_t ip_pkt_etqf_map[8] = {
1405 0, 0, 0, PKT_RX_IEEE1588_PTP,
1406 0, 0, 0, 0,
1407 };
1408
1409 if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
1410 return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
1411 ip_rss_types_map[pkt_info & 0XF];
1412 else
1413 return ip_rss_types_map[pkt_info & 0XF];
1414#else
1415 return ip_rss_types_map[pkt_info & 0XF];
1416#endif
1417}
1418
1419static inline uint64_t
1420rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1421{
1422 uint64_t pkt_flags;
1423
1424 /*
1425 * Check if VLAN present only.
1426 * Do not check whether L3/L4 rx checksum done by NIC or not,
11fdf7f2 1427 * That can be found from rte_eth_rxmode.offloads flag
7c673cae
FG
1428 */
1429 pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
1430
1431#ifdef RTE_LIBRTE_IEEE1588
1432 if (rx_status & IXGBE_RXD_STAT_TMST)
1433 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
1434#endif
1435 return pkt_flags;
1436}
1437
1438static inline uint64_t
1439rx_desc_error_to_pkt_flags(uint32_t rx_status)
1440{
1441 uint64_t pkt_flags;
1442
1443 /*
1444 * Bit 31: IPE, IPv4 checksum error
1445 * Bit 30: L4I, L4I integrity error
1446 */
1447 static uint64_t error_to_pkt_flags_map[4] = {
1448 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
1449 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
1450 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
1451 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
1452 };
1453 pkt_flags = error_to_pkt_flags_map[(rx_status >>
1454 IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
1455
1456 if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
1457 (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
1458 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1459 }
1460
11fdf7f2
TL
1461#ifdef RTE_LIBRTE_SECURITY
1462 if (rx_status & IXGBE_RXD_STAT_SECP) {
1463 pkt_flags |= PKT_RX_SEC_OFFLOAD;
1464 if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
1465 pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
1466 }
1467#endif
1468
7c673cae
FG
1469 return pkt_flags;
1470}
1471
1472/*
1473 * LOOK_AHEAD defines how many desc statuses to check beyond the
1474 * current descriptor.
1475 * It must be a pound define for optimal performance.
1476 * Do not change the value of LOOK_AHEAD, as the ixgbe_rx_scan_hw_ring
1477 * function only works with LOOK_AHEAD=8.
1478 */
1479#define LOOK_AHEAD 8
1480#if (LOOK_AHEAD != 8)
1481#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
1482#endif
1483static inline int
1484ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
1485{
1486 volatile union ixgbe_adv_rx_desc *rxdp;
1487 struct ixgbe_rx_entry *rxep;
1488 struct rte_mbuf *mb;
1489 uint16_t pkt_len;
1490 uint64_t pkt_flags;
1491 int nb_dd;
1492 uint32_t s[LOOK_AHEAD];
1493 uint32_t pkt_info[LOOK_AHEAD];
1494 int i, j, nb_rx = 0;
1495 uint32_t status;
1496 uint64_t vlan_flags = rxq->vlan_flags;
1497
1498 /* get references to current descriptor and S/W ring entry */
1499 rxdp = &rxq->rx_ring[rxq->rx_tail];
1500 rxep = &rxq->sw_ring[rxq->rx_tail];
1501
1502 status = rxdp->wb.upper.status_error;
1503 /* check to make sure there is at least 1 packet to receive */
1504 if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1505 return 0;
1506
1507 /*
1508 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1509 * reference packets that are ready to be received.
1510 */
1511 for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
1512 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1513 /* Read desc statuses backwards to avoid race condition */
11fdf7f2 1514 for (j = 0; j < LOOK_AHEAD; j++)
7c673cae
FG
1515 s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
1516
11fdf7f2 1517 rte_smp_rmb();
7c673cae
FG
1518
1519 /* Compute how many status bits were set */
11fdf7f2
TL
1520 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1521 (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
1522 ;
1523
1524 for (j = 0; j < nb_dd; j++)
1525 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
1526 lo_dword.data);
7c673cae
FG
1527
1528 nb_rx += nb_dd;
1529
1530 /* Translate descriptor info to mbuf format */
1531 for (j = 0; j < nb_dd; ++j) {
1532 mb = rxep[j].mbuf;
1533 pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
1534 rxq->crc_len;
1535 mb->data_len = pkt_len;
1536 mb->pkt_len = pkt_len;
1537 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
1538
1539 /* convert descriptor fields to rte mbuf flags */
1540 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1541 vlan_flags);
1542 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1543 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
1544 ((uint16_t)pkt_info[j]);
1545 mb->ol_flags = pkt_flags;
1546 mb->packet_type =
1547 ixgbe_rxd_pkt_info_to_pkt_type
1548 (pkt_info[j], rxq->pkt_type_mask);
1549
1550 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1551 mb->hash.rss = rte_le_to_cpu_32(
1552 rxdp[j].wb.lower.hi_dword.rss);
1553 else if (pkt_flags & PKT_RX_FDIR) {
1554 mb->hash.fdir.hash = rte_le_to_cpu_16(
1555 rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
1556 IXGBE_ATR_HASH_MASK;
1557 mb->hash.fdir.id = rte_le_to_cpu_16(
1558 rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
1559 }
1560 }
1561
1562 /* Move mbuf pointers from the S/W ring to the stage */
1563 for (j = 0; j < LOOK_AHEAD; ++j) {
1564 rxq->rx_stage[i + j] = rxep[j].mbuf;
1565 }
1566
1567 /* stop if all requested packets could not be received */
1568 if (nb_dd != LOOK_AHEAD)
1569 break;
1570 }
1571
1572 /* clear software ring entries so we can cleanup correctly */
1573 for (i = 0; i < nb_rx; ++i) {
1574 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1575 }
1576
1577
1578 return nb_rx;
1579}
1580
1581static inline int
1582ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
1583{
1584 volatile union ixgbe_adv_rx_desc *rxdp;
1585 struct ixgbe_rx_entry *rxep;
1586 struct rte_mbuf *mb;
1587 uint16_t alloc_idx;
1588 __le64 dma_addr;
1589 int diag, i;
1590
1591 /* allocate buffers in bulk directly into the S/W ring */
1592 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1593 rxep = &rxq->sw_ring[alloc_idx];
1594 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1595 rxq->rx_free_thresh);
1596 if (unlikely(diag != 0))
1597 return -ENOMEM;
1598
1599 rxdp = &rxq->rx_ring[alloc_idx];
1600 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1601 /* populate the static rte mbuf fields */
1602 mb = rxep[i].mbuf;
1603 if (reset_mbuf) {
7c673cae
FG
1604 mb->port = rxq->port_id;
1605 }
1606
1607 rte_mbuf_refcnt_set(mb, 1);
1608 mb->data_off = RTE_PKTMBUF_HEADROOM;
1609
1610 /* populate the descriptors */
11fdf7f2 1611 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
7c673cae
FG
1612 rxdp[i].read.hdr_addr = 0;
1613 rxdp[i].read.pkt_addr = dma_addr;
1614 }
1615
1616 /* update state of internal queue structure */
1617 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1618 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1619 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1620
1621 /* no errors */
1622 return 0;
1623}
1624
1625static inline uint16_t
1626ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1627 uint16_t nb_pkts)
1628{
1629 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1630 int i;
1631
1632 /* how many packets are ready to return? */
1633 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1634
1635 /* copy mbuf pointers to the application's packet list */
1636 for (i = 0; i < nb_pkts; ++i)
1637 rx_pkts[i] = stage[i];
1638
1639 /* update internal queue state */
1640 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1641 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1642
1643 return nb_pkts;
1644}
1645
1646static inline uint16_t
1647rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1648 uint16_t nb_pkts)
1649{
1650 struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
1651 uint16_t nb_rx = 0;
1652
1653 /* Any previously recv'd pkts will be returned from the Rx stage */
1654 if (rxq->rx_nb_avail)
1655 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1656
1657 /* Scan the H/W ring for packets to receive */
1658 nb_rx = (uint16_t)ixgbe_rx_scan_hw_ring(rxq);
1659
1660 /* update internal queue state */
1661 rxq->rx_next_avail = 0;
1662 rxq->rx_nb_avail = nb_rx;
1663 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1664
1665 /* if required, allocate new buffers to replenish descriptors */
1666 if (rxq->rx_tail > rxq->rx_free_trigger) {
1667 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1668
1669 if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
1670 int i, j;
1671
1672 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1673 "queue_id=%u", (unsigned) rxq->port_id,
1674 (unsigned) rxq->queue_id);
1675
1676 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
1677 rxq->rx_free_thresh;
1678
1679 /*
1680 * Need to rewind any previous receives if we cannot
1681 * allocate new buffers to replenish the old ones.
1682 */
1683 rxq->rx_nb_avail = 0;
1684 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1685 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1686 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1687
1688 return 0;
1689 }
1690
1691 /* update tail pointer */
1692 rte_wmb();
11fdf7f2
TL
1693 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
1694 cur_free_trigger);
7c673cae
FG
1695 }
1696
1697 if (rxq->rx_tail >= rxq->nb_rx_desc)
1698 rxq->rx_tail = 0;
1699
1700 /* received any packets this loop? */
1701 if (rxq->rx_nb_avail)
1702 return ixgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1703
1704 return 0;
1705}
1706
1707/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
1708uint16_t
1709ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1710 uint16_t nb_pkts)
1711{
1712 uint16_t nb_rx;
1713
1714 if (unlikely(nb_pkts == 0))
1715 return 0;
1716
1717 if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
1718 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1719
1720 /* request is relatively large, chunk it up */
1721 nb_rx = 0;
1722 while (nb_pkts) {
1723 uint16_t ret, n;
1724
1725 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
1726 ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1727 nb_rx = (uint16_t)(nb_rx + ret);
1728 nb_pkts = (uint16_t)(nb_pkts - ret);
1729 if (ret < n)
1730 break;
1731 }
1732
1733 return nb_rx;
1734}
1735
1736uint16_t
1737ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1738 uint16_t nb_pkts)
1739{
1740 struct ixgbe_rx_queue *rxq;
1741 volatile union ixgbe_adv_rx_desc *rx_ring;
1742 volatile union ixgbe_adv_rx_desc *rxdp;
1743 struct ixgbe_rx_entry *sw_ring;
1744 struct ixgbe_rx_entry *rxe;
1745 struct rte_mbuf *rxm;
1746 struct rte_mbuf *nmb;
1747 union ixgbe_adv_rx_desc rxd;
1748 uint64_t dma_addr;
1749 uint32_t staterr;
1750 uint32_t pkt_info;
1751 uint16_t pkt_len;
1752 uint16_t rx_id;
1753 uint16_t nb_rx;
1754 uint16_t nb_hold;
1755 uint64_t pkt_flags;
1756 uint64_t vlan_flags;
1757
1758 nb_rx = 0;
1759 nb_hold = 0;
1760 rxq = rx_queue;
1761 rx_id = rxq->rx_tail;
1762 rx_ring = rxq->rx_ring;
1763 sw_ring = rxq->sw_ring;
1764 vlan_flags = rxq->vlan_flags;
1765 while (nb_rx < nb_pkts) {
1766 /*
1767 * The order of operations here is important as the DD status
1768 * bit must not be read after any other descriptor fields.
1769 * rx_ring and rxdp are pointing to volatile data so the order
1770 * of accesses cannot be reordered by the compiler. If they were
1771 * not volatile, they could be reordered which could lead to
1772 * using invalid descriptor fields when read from rxd.
1773 */
1774 rxdp = &rx_ring[rx_id];
1775 staterr = rxdp->wb.upper.status_error;
1776 if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
1777 break;
1778 rxd = *rxdp;
1779
1780 /*
1781 * End of packet.
1782 *
1783 * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
1784 * is likely to be invalid and to be dropped by the various
1785 * validation checks performed by the network stack.
1786 *
1787 * Allocate a new mbuf to replenish the RX ring descriptor.
1788 * If the allocation fails:
1789 * - arrange for that RX descriptor to be the first one
1790 * being parsed the next time the receive function is
1791 * invoked [on the same queue].
1792 *
1793 * - Stop parsing the RX ring and return immediately.
1794 *
1795 * This policy do not drop the packet received in the RX
1796 * descriptor for which the allocation of a new mbuf failed.
1797 * Thus, it allows that packet to be later retrieved if
1798 * mbuf have been freed in the mean time.
1799 * As a side effect, holding RX descriptors instead of
1800 * systematically giving them back to the NIC may lead to
1801 * RX ring exhaustion situations.
1802 * However, the NIC can gracefully prevent such situations
1803 * to happen by sending specific "back-pressure" flow control
1804 * frames to its peer(s).
1805 */
1806 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1807 "ext_err_stat=0x%08x pkt_len=%u",
1808 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1809 (unsigned) rx_id, (unsigned) staterr,
1810 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1811
1812 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1813 if (nmb == NULL) {
1814 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1815 "queue_id=%u", (unsigned) rxq->port_id,
1816 (unsigned) rxq->queue_id);
1817 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1818 break;
1819 }
1820
1821 nb_hold++;
1822 rxe = &sw_ring[rx_id];
1823 rx_id++;
1824 if (rx_id == rxq->nb_rx_desc)
1825 rx_id = 0;
1826
1827 /* Prefetch next mbuf while processing current one. */
1828 rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
1829
1830 /*
1831 * When next RX descriptor is on a cache-line boundary,
1832 * prefetch the next 4 RX descriptors and the next 8 pointers
1833 * to mbufs.
1834 */
1835 if ((rx_id & 0x3) == 0) {
1836 rte_ixgbe_prefetch(&rx_ring[rx_id]);
1837 rte_ixgbe_prefetch(&sw_ring[rx_id]);
1838 }
1839
1840 rxm = rxe->mbuf;
1841 rxe->mbuf = nmb;
1842 dma_addr =
11fdf7f2 1843 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
7c673cae
FG
1844 rxdp->read.hdr_addr = 0;
1845 rxdp->read.pkt_addr = dma_addr;
1846
1847 /*
1848 * Initialize the returned mbuf.
1849 * 1) setup generic mbuf fields:
1850 * - number of segments,
1851 * - next segment,
1852 * - packet length,
1853 * - RX port identifier.
1854 * 2) integrate hardware offload data, if any:
1855 * - RSS flag & hash,
1856 * - IP checksum flag,
1857 * - VLAN TCI, if any,
1858 * - error flags.
1859 */
1860 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
1861 rxq->crc_len);
1862 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1863 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1864 rxm->nb_segs = 1;
1865 rxm->next = NULL;
1866 rxm->pkt_len = pkt_len;
1867 rxm->data_len = pkt_len;
1868 rxm->port = rxq->port_id;
1869
1870 pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
11fdf7f2 1871 /* Only valid if PKT_RX_VLAN set in pkt_flags */
7c673cae
FG
1872 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1873
1874 pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
1875 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1876 pkt_flags = pkt_flags |
1877 ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1878 rxm->ol_flags = pkt_flags;
1879 rxm->packet_type =
1880 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1881 rxq->pkt_type_mask);
1882
1883 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1884 rxm->hash.rss = rte_le_to_cpu_32(
1885 rxd.wb.lower.hi_dword.rss);
1886 else if (pkt_flags & PKT_RX_FDIR) {
1887 rxm->hash.fdir.hash = rte_le_to_cpu_16(
1888 rxd.wb.lower.hi_dword.csum_ip.csum) &
1889 IXGBE_ATR_HASH_MASK;
1890 rxm->hash.fdir.id = rte_le_to_cpu_16(
1891 rxd.wb.lower.hi_dword.csum_ip.ip_id);
1892 }
1893 /*
1894 * Store the mbuf address into the next entry of the array
1895 * of returned packets.
1896 */
1897 rx_pkts[nb_rx++] = rxm;
1898 }
1899 rxq->rx_tail = rx_id;
1900
1901 /*
1902 * If the number of free RX descriptors is greater than the RX free
1903 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1904 * register.
1905 * Update the RDT with the value of the last processed RX descriptor
1906 * minus 1, to guarantee that the RDT register is never equal to the
1907 * RDH register, which creates a "full" ring situtation from the
1908 * hardware point of view...
1909 */
1910 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1911 if (nb_hold > rxq->rx_free_thresh) {
1912 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1913 "nb_hold=%u nb_rx=%u",
1914 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1915 (unsigned) rx_id, (unsigned) nb_hold,
1916 (unsigned) nb_rx);
1917 rx_id = (uint16_t) ((rx_id == 0) ?
1918 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1919 IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1920 nb_hold = 0;
1921 }
1922 rxq->nb_rx_hold = nb_hold;
1923 return nb_rx;
1924}
1925
1926/**
1927 * Detect an RSC descriptor.
1928 */
1929static inline uint32_t
1930ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1931{
1932 return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
1933 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1934}
1935
1936/**
1937 * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1938 *
1939 * Fill the following info in the HEAD buffer of the Rx cluster:
1940 * - RX port identifier
1941 * - hardware offload data, if any:
1942 * - RSS flag & hash
1943 * - IP checksum flag
1944 * - VLAN TCI, if any
1945 * - error flags
1946 * @head HEAD of the packet cluster
1947 * @desc HW descriptor to get data from
1948 * @rxq Pointer to the Rx queue
1949 */
1950static inline void
1951ixgbe_fill_cluster_head_buf(
1952 struct rte_mbuf *head,
1953 union ixgbe_adv_rx_desc *desc,
1954 struct ixgbe_rx_queue *rxq,
1955 uint32_t staterr)
1956{
1957 uint32_t pkt_info;
1958 uint64_t pkt_flags;
1959
1960 head->port = rxq->port_id;
1961
11fdf7f2 1962 /* The vlan_tci field is only valid when PKT_RX_VLAN is
7c673cae
FG
1963 * set in the pkt_flags field.
1964 */
1965 head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
1966 pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
1967 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1968 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1969 pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
1970 head->ol_flags = pkt_flags;
1971 head->packet_type =
1972 ixgbe_rxd_pkt_info_to_pkt_type(pkt_info, rxq->pkt_type_mask);
1973
1974 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1975 head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
1976 else if (pkt_flags & PKT_RX_FDIR) {
1977 head->hash.fdir.hash =
1978 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
1979 & IXGBE_ATR_HASH_MASK;
1980 head->hash.fdir.id =
1981 rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
1982 }
1983}
1984
1985/**
1986 * ixgbe_recv_pkts_lro - receive handler for and LRO case.
1987 *
1988 * @rx_queue Rx queue handle
1989 * @rx_pkts table of received packets
1990 * @nb_pkts size of rx_pkts table
1991 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1992 *
1993 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1994 * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
1995 *
1996 * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
1997 * 1) When non-EOP RSC completion arrives:
1998 * a) Update the HEAD of the current RSC aggregation cluster with the new
1999 * segment's data length.
2000 * b) Set the "next" pointer of the current segment to point to the segment
2001 * at the NEXTP index.
2002 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
2003 * in the sw_rsc_ring.
2004 * 2) When EOP arrives we just update the cluster's total length and offload
2005 * flags and deliver the cluster up to the upper layers. In our case - put it
2006 * in the rx_pkts table.
2007 *
2008 * Returns the number of received packets/clusters (according to the "bulk
2009 * receive" interface).
2010 */
2011static inline uint16_t
2012ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
2013 bool bulk_alloc)
2014{
2015 struct ixgbe_rx_queue *rxq = rx_queue;
2016 volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
2017 struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
2018 struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
2019 uint16_t rx_id = rxq->rx_tail;
2020 uint16_t nb_rx = 0;
2021 uint16_t nb_hold = rxq->nb_rx_hold;
2022 uint16_t prev_id = rxq->rx_tail;
2023
2024 while (nb_rx < nb_pkts) {
2025 bool eop;
2026 struct ixgbe_rx_entry *rxe;
2027 struct ixgbe_scattered_rx_entry *sc_entry;
2028 struct ixgbe_scattered_rx_entry *next_sc_entry;
2029 struct ixgbe_rx_entry *next_rxe = NULL;
2030 struct rte_mbuf *first_seg;
2031 struct rte_mbuf *rxm;
9f95a23c 2032 struct rte_mbuf *nmb = NULL;
7c673cae
FG
2033 union ixgbe_adv_rx_desc rxd;
2034 uint16_t data_len;
2035 uint16_t next_id;
2036 volatile union ixgbe_adv_rx_desc *rxdp;
2037 uint32_t staterr;
2038
2039next_desc:
2040 /*
2041 * The code in this whole file uses the volatile pointer to
2042 * ensure the read ordering of the status and the rest of the
2043 * descriptor fields (on the compiler level only!!!). This is so
2044 * UGLY - why not to just use the compiler barrier instead? DPDK
2045 * even has the rte_compiler_barrier() for that.
2046 *
2047 * But most importantly this is just wrong because this doesn't
2048 * ensure memory ordering in a general case at all. For
2049 * instance, DPDK is supposed to work on Power CPUs where
2050 * compiler barrier may just not be enough!
2051 *
2052 * I tried to write only this function properly to have a
2053 * starting point (as a part of an LRO/RSC series) but the
2054 * compiler cursed at me when I tried to cast away the
2055 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
2056 * keeping it the way it is for now.
2057 *
2058 * The code in this file is broken in so many other places and
2059 * will just not work on a big endian CPU anyway therefore the
2060 * lines below will have to be revisited together with the rest
2061 * of the ixgbe PMD.
2062 *
2063 * TODO:
9f95a23c 2064 * - Get rid of "volatile" and let the compiler do its job.
7c673cae
FG
2065 * - Use the proper memory barrier (rte_rmb()) to ensure the
2066 * memory ordering below.
2067 */
2068 rxdp = &rx_ring[rx_id];
2069 staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
2070
2071 if (!(staterr & IXGBE_RXDADV_STAT_DD))
2072 break;
2073
2074 rxd = *rxdp;
2075
2076 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
2077 "staterr=0x%x data_len=%u",
2078 rxq->port_id, rxq->queue_id, rx_id, staterr,
2079 rte_le_to_cpu_16(rxd.wb.upper.length));
2080
2081 if (!bulk_alloc) {
2082 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
2083 if (nmb == NULL) {
2084 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
2085 "port_id=%u queue_id=%u",
2086 rxq->port_id, rxq->queue_id);
2087
2088 rte_eth_devices[rxq->port_id].data->
2089 rx_mbuf_alloc_failed++;
2090 break;
2091 }
2092 } else if (nb_hold > rxq->rx_free_thresh) {
2093 uint16_t next_rdt = rxq->rx_free_trigger;
2094
2095 if (!ixgbe_rx_alloc_bufs(rxq, false)) {
2096 rte_wmb();
11fdf7f2
TL
2097 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
2098 next_rdt);
7c673cae
FG
2099 nb_hold -= rxq->rx_free_thresh;
2100 } else {
2101 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
2102 "port_id=%u queue_id=%u",
2103 rxq->port_id, rxq->queue_id);
2104
2105 rte_eth_devices[rxq->port_id].data->
2106 rx_mbuf_alloc_failed++;
2107 break;
2108 }
2109 }
2110
2111 nb_hold++;
2112 rxe = &sw_ring[rx_id];
2113 eop = staterr & IXGBE_RXDADV_STAT_EOP;
2114
2115 next_id = rx_id + 1;
2116 if (next_id == rxq->nb_rx_desc)
2117 next_id = 0;
2118
2119 /* Prefetch next mbuf while processing current one. */
2120 rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
2121
2122 /*
2123 * When next RX descriptor is on a cache-line boundary,
2124 * prefetch the next 4 RX descriptors and the next 4 pointers
2125 * to mbufs.
2126 */
2127 if ((next_id & 0x3) == 0) {
2128 rte_ixgbe_prefetch(&rx_ring[next_id]);
2129 rte_ixgbe_prefetch(&sw_ring[next_id]);
2130 }
2131
2132 rxm = rxe->mbuf;
2133
2134 if (!bulk_alloc) {
2135 __le64 dma =
11fdf7f2 2136 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
7c673cae
FG
2137 /*
2138 * Update RX descriptor with the physical address of the
2139 * new data buffer of the new allocated mbuf.
2140 */
2141 rxe->mbuf = nmb;
2142
2143 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2144 rxdp->read.hdr_addr = 0;
2145 rxdp->read.pkt_addr = dma;
2146 } else
2147 rxe->mbuf = NULL;
2148
2149 /*
2150 * Set data length & data buffer address of mbuf.
2151 */
2152 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
2153 rxm->data_len = data_len;
2154
2155 if (!eop) {
2156 uint16_t nextp_id;
2157 /*
2158 * Get next descriptor index:
2159 * - For RSC it's in the NEXTP field.
2160 * - For a scattered packet - it's just a following
2161 * descriptor.
2162 */
2163 if (ixgbe_rsc_count(&rxd))
2164 nextp_id =
2165 (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
2166 IXGBE_RXDADV_NEXTP_SHIFT;
2167 else
2168 nextp_id = next_id;
2169
2170 next_sc_entry = &sw_sc_ring[nextp_id];
2171 next_rxe = &sw_ring[nextp_id];
2172 rte_ixgbe_prefetch(next_rxe);
2173 }
2174
2175 sc_entry = &sw_sc_ring[rx_id];
2176 first_seg = sc_entry->fbuf;
2177 sc_entry->fbuf = NULL;
2178
2179 /*
2180 * If this is the first buffer of the received packet,
2181 * set the pointer to the first mbuf of the packet and
2182 * initialize its context.
2183 * Otherwise, update the total length and the number of segments
2184 * of the current scattered packet, and update the pointer to
2185 * the last mbuf of the current packet.
2186 */
2187 if (first_seg == NULL) {
2188 first_seg = rxm;
2189 first_seg->pkt_len = data_len;
2190 first_seg->nb_segs = 1;
2191 } else {
2192 first_seg->pkt_len += data_len;
2193 first_seg->nb_segs++;
2194 }
2195
2196 prev_id = rx_id;
2197 rx_id = next_id;
2198
2199 /*
2200 * If this is not the last buffer of the received packet, update
2201 * the pointer to the first mbuf at the NEXTP entry in the
2202 * sw_sc_ring and continue to parse the RX ring.
2203 */
2204 if (!eop && next_rxe) {
2205 rxm->next = next_rxe->mbuf;
2206 next_sc_entry->fbuf = first_seg;
2207 goto next_desc;
2208 }
2209
7c673cae
FG
2210 /* Initialize the first mbuf of the returned packet */
2211 ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
2212
2213 /*
2214 * Deal with the case, when HW CRC srip is disabled.
2215 * That can't happen when LRO is enabled, but still could
2216 * happen for scattered RX mode.
2217 */
2218 first_seg->pkt_len -= rxq->crc_len;
2219 if (unlikely(rxm->data_len <= rxq->crc_len)) {
2220 struct rte_mbuf *lp;
2221
2222 for (lp = first_seg; lp->next != rxm; lp = lp->next)
2223 ;
2224
2225 first_seg->nb_segs--;
2226 lp->data_len -= rxq->crc_len - rxm->data_len;
2227 lp->next = NULL;
2228 rte_pktmbuf_free_seg(rxm);
2229 } else
2230 rxm->data_len -= rxq->crc_len;
2231
2232 /* Prefetch data of first segment, if configured to do so. */
2233 rte_packet_prefetch((char *)first_seg->buf_addr +
2234 first_seg->data_off);
2235
2236 /*
2237 * Store the mbuf address into the next entry of the array
2238 * of returned packets.
2239 */
2240 rx_pkts[nb_rx++] = first_seg;
2241 }
2242
2243 /*
2244 * Record index of the next RX descriptor to probe.
2245 */
2246 rxq->rx_tail = rx_id;
2247
2248 /*
2249 * If the number of free RX descriptors is greater than the RX free
2250 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2251 * register.
2252 * Update the RDT with the value of the last processed RX descriptor
2253 * minus 1, to guarantee that the RDT register is never equal to the
2254 * RDH register, which creates a "full" ring situtation from the
2255 * hardware point of view...
2256 */
2257 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
2258 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
2259 "nb_hold=%u nb_rx=%u",
2260 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
2261
2262 rte_wmb();
11fdf7f2 2263 IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
7c673cae
FG
2264 nb_hold = 0;
2265 }
2266
2267 rxq->nb_rx_hold = nb_hold;
2268 return nb_rx;
2269}
2270
2271uint16_t
2272ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2273 uint16_t nb_pkts)
2274{
2275 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
2276}
2277
2278uint16_t
2279ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
2280 uint16_t nb_pkts)
2281{
2282 return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
2283}
2284
2285/*********************************************************************
2286 *
2287 * Queue management functions
2288 *
2289 **********************************************************************/
2290
2291static void __attribute__((cold))
2292ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
2293{
2294 unsigned i;
2295
2296 if (txq->sw_ring != NULL) {
2297 for (i = 0; i < txq->nb_tx_desc; i++) {
2298 if (txq->sw_ring[i].mbuf != NULL) {
2299 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2300 txq->sw_ring[i].mbuf = NULL;
2301 }
2302 }
2303 }
2304}
2305
2306static void __attribute__((cold))
2307ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
2308{
2309 if (txq != NULL &&
2310 txq->sw_ring != NULL)
2311 rte_free(txq->sw_ring);
2312}
2313
2314static void __attribute__((cold))
2315ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
2316{
2317 if (txq != NULL && txq->ops != NULL) {
2318 txq->ops->release_mbufs(txq);
2319 txq->ops->free_swring(txq);
2320 rte_free(txq);
2321 }
2322}
2323
2324void __attribute__((cold))
2325ixgbe_dev_tx_queue_release(void *txq)
2326{
2327 ixgbe_tx_queue_release(txq);
2328}
2329
2330/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
2331static void __attribute__((cold))
2332ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
2333{
2334 static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
2335 struct ixgbe_tx_entry *txe = txq->sw_ring;
2336 uint16_t prev, i;
2337
2338 /* Zero out HW ring memory */
2339 for (i = 0; i < txq->nb_tx_desc; i++) {
2340 txq->tx_ring[i] = zeroed_desc;
2341 }
2342
2343 /* Initialize SW ring entries */
2344 prev = (uint16_t) (txq->nb_tx_desc - 1);
2345 for (i = 0; i < txq->nb_tx_desc; i++) {
2346 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
2347
2348 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
2349 txe[i].mbuf = NULL;
2350 txe[i].last_id = i;
2351 txe[prev].next_id = i;
2352 prev = i;
2353 }
2354
2355 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2356 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2357
2358 txq->tx_tail = 0;
2359 txq->nb_tx_used = 0;
2360 /*
2361 * Always allow 1 descriptor to be un-allocated to avoid
2362 * a H/W race condition
2363 */
2364 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2365 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2366 txq->ctx_curr = 0;
2367 memset((void *)&txq->ctx_cache, 0,
2368 IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
2369}
2370
2371static const struct ixgbe_txq_ops def_txq_ops = {
2372 .release_mbufs = ixgbe_tx_queue_release_mbufs,
2373 .free_swring = ixgbe_tx_free_swring,
2374 .reset = ixgbe_reset_tx_queue,
2375};
2376
2377/* Takes an ethdev and a queue and sets up the tx function to be used based on
2378 * the queue parameters. Used in tx_queue_setup by primary process and then
2379 * in dev_init by secondary process when attaching to an existing ethdev.
2380 */
2381void __attribute__((cold))
2382ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
2383{
2384 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
11fdf7f2
TL
2385 if ((txq->offloads == 0) &&
2386#ifdef RTE_LIBRTE_SECURITY
2387 !(txq->using_ipsec) &&
2388#endif
2389 (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
7c673cae 2390 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
11fdf7f2 2391 dev->tx_pkt_prepare = NULL;
7c673cae
FG
2392#ifdef RTE_IXGBE_INC_VECTOR
2393 if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
2394 (rte_eal_process_type() != RTE_PROC_PRIMARY ||
2395 ixgbe_txq_vec_setup(txq) == 0)) {
2396 PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
2397 dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
2398 } else
2399#endif
2400 dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
2401 } else {
2402 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2403 PMD_INIT_LOG(DEBUG,
11fdf7f2
TL
2404 " - offloads = 0x%" PRIx64,
2405 txq->offloads);
7c673cae
FG
2406 PMD_INIT_LOG(DEBUG,
2407 " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
2408 (unsigned long)txq->tx_rs_thresh,
2409 (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
2410 dev->tx_pkt_burst = ixgbe_xmit_pkts;
11fdf7f2 2411 dev->tx_pkt_prepare = ixgbe_prep_pkts;
7c673cae
FG
2412 }
2413}
2414
11fdf7f2
TL
2415uint64_t
2416ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2417{
2418 RTE_SET_USED(dev);
2419
2420 return 0;
2421}
2422
2423uint64_t
2424ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2425{
2426 uint64_t tx_offload_capa;
2427 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2428
2429 tx_offload_capa =
2430 DEV_TX_OFFLOAD_VLAN_INSERT |
2431 DEV_TX_OFFLOAD_IPV4_CKSUM |
2432 DEV_TX_OFFLOAD_UDP_CKSUM |
2433 DEV_TX_OFFLOAD_TCP_CKSUM |
2434 DEV_TX_OFFLOAD_SCTP_CKSUM |
2435 DEV_TX_OFFLOAD_TCP_TSO |
2436 DEV_TX_OFFLOAD_MULTI_SEGS;
2437
2438 if (hw->mac.type == ixgbe_mac_82599EB ||
2439 hw->mac.type == ixgbe_mac_X540)
2440 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2441
2442 if (hw->mac.type == ixgbe_mac_X550 ||
2443 hw->mac.type == ixgbe_mac_X550EM_x ||
2444 hw->mac.type == ixgbe_mac_X550EM_a)
2445 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2446
2447#ifdef RTE_LIBRTE_SECURITY
2448 if (dev->security_ctx)
2449 tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2450#endif
2451 return tx_offload_capa;
2452}
2453
7c673cae
FG
2454int __attribute__((cold))
2455ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2456 uint16_t queue_idx,
2457 uint16_t nb_desc,
2458 unsigned int socket_id,
2459 const struct rte_eth_txconf *tx_conf)
2460{
2461 const struct rte_memzone *tz;
2462 struct ixgbe_tx_queue *txq;
2463 struct ixgbe_hw *hw;
2464 uint16_t tx_rs_thresh, tx_free_thresh;
11fdf7f2 2465 uint64_t offloads;
7c673cae
FG
2466
2467 PMD_INIT_FUNC_TRACE();
2468 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2469
11fdf7f2
TL
2470 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2471
7c673cae
FG
2472 /*
2473 * Validate number of transmit descriptors.
2474 * It must not exceed hardware maximum, and must be multiple
2475 * of IXGBE_ALIGN.
2476 */
2477 if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
2478 (nb_desc > IXGBE_MAX_RING_DESC) ||
2479 (nb_desc < IXGBE_MIN_RING_DESC)) {
2480 return -EINVAL;
2481 }
2482
2483 /*
2484 * The following two parameters control the setting of the RS bit on
2485 * transmit descriptors.
2486 * TX descriptors will have their RS bit set after txq->tx_rs_thresh
2487 * descriptors have been used.
2488 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2489 * descriptors are used or if the number of descriptors required
2490 * to transmit a packet is greater than the number of free TX
2491 * descriptors.
2492 * The following constraints must be satisfied:
2493 * tx_rs_thresh must be greater than 0.
2494 * tx_rs_thresh must be less than the size of the ring minus 2.
2495 * tx_rs_thresh must be less than or equal to tx_free_thresh.
2496 * tx_rs_thresh must be a divisor of the ring size.
2497 * tx_free_thresh must be greater than 0.
2498 * tx_free_thresh must be less than the size of the ring minus 3.
2499 * One descriptor in the TX ring is used as a sentinel to avoid a
2500 * H/W race condition, hence the maximum threshold constraints.
2501 * When set to zero use default values.
2502 */
2503 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
2504 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
2505 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2506 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2507 if (tx_rs_thresh >= (nb_desc - 2)) {
2508 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
2509 "of TX descriptors minus 2. (tx_rs_thresh=%u "
2510 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2511 (int)dev->data->port_id, (int)queue_idx);
2512 return -(EINVAL);
2513 }
2514 if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
2515 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
2516 "(tx_rs_thresh=%u port=%d queue=%d)",
2517 DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
2518 (int)dev->data->port_id, (int)queue_idx);
2519 return -(EINVAL);
2520 }
2521 if (tx_free_thresh >= (nb_desc - 3)) {
2522 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
2523 "tx_free_thresh must be less than the number of "
2524 "TX descriptors minus 3. (tx_free_thresh=%u "
2525 "port=%d queue=%d)",
2526 (unsigned int)tx_free_thresh,
2527 (int)dev->data->port_id, (int)queue_idx);
2528 return -(EINVAL);
2529 }
2530 if (tx_rs_thresh > tx_free_thresh) {
2531 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
2532 "tx_free_thresh. (tx_free_thresh=%u "
2533 "tx_rs_thresh=%u port=%d queue=%d)",
2534 (unsigned int)tx_free_thresh,
2535 (unsigned int)tx_rs_thresh,
2536 (int)dev->data->port_id,
2537 (int)queue_idx);
2538 return -(EINVAL);
2539 }
2540 if ((nb_desc % tx_rs_thresh) != 0) {
2541 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
2542 "number of TX descriptors. (tx_rs_thresh=%u "
2543 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2544 (int)dev->data->port_id, (int)queue_idx);
2545 return -(EINVAL);
2546 }
2547
2548 /*
2549 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
2550 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
2551 * by the NIC and all descriptors are written back after the NIC
2552 * accumulates WTHRESH descriptors.
2553 */
2554 if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
2555 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
2556 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
2557 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
2558 (int)dev->data->port_id, (int)queue_idx);
2559 return -(EINVAL);
2560 }
2561
2562 /* Free memory prior to re-allocation if needed... */
2563 if (dev->data->tx_queues[queue_idx] != NULL) {
2564 ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2565 dev->data->tx_queues[queue_idx] = NULL;
2566 }
2567
2568 /* First allocate the tx queue data structure */
2569 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
2570 RTE_CACHE_LINE_SIZE, socket_id);
2571 if (txq == NULL)
2572 return -ENOMEM;
2573
2574 /*
2575 * Allocate TX ring hardware descriptors. A memzone large enough to
2576 * handle the maximum ring size is allocated in order to allow for
2577 * resizing in later calls to the queue setup function.
2578 */
2579 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2580 sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
2581 IXGBE_ALIGN, socket_id);
2582 if (tz == NULL) {
2583 ixgbe_tx_queue_release(txq);
2584 return -ENOMEM;
2585 }
2586
2587 txq->nb_tx_desc = nb_desc;
2588 txq->tx_rs_thresh = tx_rs_thresh;
2589 txq->tx_free_thresh = tx_free_thresh;
2590 txq->pthresh = tx_conf->tx_thresh.pthresh;
2591 txq->hthresh = tx_conf->tx_thresh.hthresh;
2592 txq->wthresh = tx_conf->tx_thresh.wthresh;
2593 txq->queue_id = queue_idx;
2594 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2595 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2596 txq->port_id = dev->data->port_id;
11fdf7f2 2597 txq->offloads = offloads;
7c673cae
FG
2598 txq->ops = &def_txq_ops;
2599 txq->tx_deferred_start = tx_conf->tx_deferred_start;
11fdf7f2
TL
2600#ifdef RTE_LIBRTE_SECURITY
2601 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2602 DEV_TX_OFFLOAD_SECURITY);
2603#endif
7c673cae
FG
2604
2605 /*
2606 * Modification to set VFTDT for virtual function if vf is detected
2607 */
2608 if (hw->mac.type == ixgbe_mac_82599_vf ||
2609 hw->mac.type == ixgbe_mac_X540_vf ||
2610 hw->mac.type == ixgbe_mac_X550_vf ||
2611 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2612 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2613 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
2614 else
2615 txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
2616
11fdf7f2 2617 txq->tx_ring_phys_addr = tz->iova;
7c673cae
FG
2618 txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
2619
2620 /* Allocate software ring */
2621 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2622 sizeof(struct ixgbe_tx_entry) * nb_desc,
2623 RTE_CACHE_LINE_SIZE, socket_id);
2624 if (txq->sw_ring == NULL) {
2625 ixgbe_tx_queue_release(txq);
2626 return -ENOMEM;
2627 }
2628 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
2629 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2630
2631 /* set up vector or scalar TX function as appropriate */
2632 ixgbe_set_tx_function(dev, txq);
2633
2634 txq->ops->reset(txq);
2635
2636 dev->data->tx_queues[queue_idx] = txq;
2637
2638
2639 return 0;
2640}
2641
2642/**
2643 * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2644 *
2645 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2646 * in the sw_rsc_ring is not set to NULL but rather points to the next
2647 * mbuf of this RSC aggregation (that has not been completed yet and still
2648 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2649 * will just free first "nb_segs" segments of the cluster explicitly by calling
2650 * an rte_pktmbuf_free_seg().
2651 *
2652 * @m scattered cluster head
2653 */
2654static void __attribute__((cold))
2655ixgbe_free_sc_cluster(struct rte_mbuf *m)
2656{
11fdf7f2 2657 uint16_t i, nb_segs = m->nb_segs;
7c673cae
FG
2658 struct rte_mbuf *next_seg;
2659
2660 for (i = 0; i < nb_segs; i++) {
2661 next_seg = m->next;
2662 rte_pktmbuf_free_seg(m);
2663 m = next_seg;
2664 }
2665}
2666
2667static void __attribute__((cold))
2668ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
2669{
2670 unsigned i;
2671
2672#ifdef RTE_IXGBE_INC_VECTOR
2673 /* SSE Vector driver has a different way of releasing mbufs. */
2674 if (rxq->rx_using_sse) {
2675 ixgbe_rx_queue_release_mbufs_vec(rxq);
2676 return;
2677 }
2678#endif
2679
2680 if (rxq->sw_ring != NULL) {
2681 for (i = 0; i < rxq->nb_rx_desc; i++) {
2682 if (rxq->sw_ring[i].mbuf != NULL) {
2683 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2684 rxq->sw_ring[i].mbuf = NULL;
2685 }
2686 }
2687 if (rxq->rx_nb_avail) {
2688 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2689 struct rte_mbuf *mb;
2690
2691 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2692 rte_pktmbuf_free_seg(mb);
2693 }
2694 rxq->rx_nb_avail = 0;
2695 }
2696 }
2697
2698 if (rxq->sw_sc_ring)
2699 for (i = 0; i < rxq->nb_rx_desc; i++)
2700 if (rxq->sw_sc_ring[i].fbuf) {
2701 ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2702 rxq->sw_sc_ring[i].fbuf = NULL;
2703 }
2704}
2705
2706static void __attribute__((cold))
2707ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
2708{
2709 if (rxq != NULL) {
2710 ixgbe_rx_queue_release_mbufs(rxq);
2711 rte_free(rxq->sw_ring);
2712 rte_free(rxq->sw_sc_ring);
2713 rte_free(rxq);
2714 }
2715}
2716
2717void __attribute__((cold))
2718ixgbe_dev_rx_queue_release(void *rxq)
2719{
2720 ixgbe_rx_queue_release(rxq);
2721}
2722
2723/*
2724 * Check if Rx Burst Bulk Alloc function can be used.
2725 * Return
2726 * 0: the preconditions are satisfied and the bulk allocation function
2727 * can be used.
2728 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2729 * function must be used.
2730 */
2731static inline int __attribute__((cold))
2732check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
2733{
2734 int ret = 0;
2735
2736 /*
2737 * Make sure the following pre-conditions are satisfied:
2738 * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
2739 * rxq->rx_free_thresh < rxq->nb_rx_desc
2740 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
7c673cae
FG
2741 * Scattered packets are not supported. This should be checked
2742 * outside of this function.
2743 */
2744 if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
2745 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2746 "rxq->rx_free_thresh=%d, "
2747 "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
2748 rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
2749 ret = -EINVAL;
2750 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2751 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2752 "rxq->rx_free_thresh=%d, "
2753 "rxq->nb_rx_desc=%d",
2754 rxq->rx_free_thresh, rxq->nb_rx_desc);
2755 ret = -EINVAL;
2756 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2757 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2758 "rxq->nb_rx_desc=%d, "
2759 "rxq->rx_free_thresh=%d",
2760 rxq->nb_rx_desc, rxq->rx_free_thresh);
2761 ret = -EINVAL;
7c673cae
FG
2762 }
2763
2764 return ret;
2765}
2766
2767/* Reset dynamic ixgbe_rx_queue fields back to defaults */
2768static void __attribute__((cold))
2769ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
2770{
2771 static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
2772 unsigned i;
2773 uint16_t len = rxq->nb_rx_desc;
2774
2775 /*
2776 * By default, the Rx queue setup function allocates enough memory for
2777 * IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
11fdf7f2 2778 * extra memory at the end of the descriptor ring to be zero'd out.
7c673cae
FG
2779 */
2780 if (adapter->rx_bulk_alloc_allowed)
2781 /* zero out extra memory */
2782 len += RTE_PMD_IXGBE_RX_MAX_BURST;
2783
2784 /*
2785 * Zero out HW ring memory. Zero out extra memory at the end of
2786 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2787 * reads extra memory as zeros.
2788 */
2789 for (i = 0; i < len; i++) {
2790 rxq->rx_ring[i] = zeroed_desc;
2791 }
2792
2793 /*
2794 * initialize extra software ring entries. Space for these extra
2795 * entries is always allocated
2796 */
2797 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2798 for (i = rxq->nb_rx_desc; i < len; ++i) {
2799 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2800 }
2801
2802 rxq->rx_nb_avail = 0;
2803 rxq->rx_next_avail = 0;
2804 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2805 rxq->rx_tail = 0;
2806 rxq->nb_rx_hold = 0;
2807 rxq->pkt_first_seg = NULL;
2808 rxq->pkt_last_seg = NULL;
2809
2810#ifdef RTE_IXGBE_INC_VECTOR
2811 rxq->rxrearm_start = 0;
2812 rxq->rxrearm_nb = 0;
2813#endif
2814}
2815
11fdf7f2
TL
2816static int
2817ixgbe_is_vf(struct rte_eth_dev *dev)
2818{
2819 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2820
2821 switch (hw->mac.type) {
2822 case ixgbe_mac_82599_vf:
2823 case ixgbe_mac_X540_vf:
2824 case ixgbe_mac_X550_vf:
2825 case ixgbe_mac_X550EM_x_vf:
2826 case ixgbe_mac_X550EM_a_vf:
2827 return 1;
2828 default:
2829 return 0;
2830 }
2831}
2832
2833uint64_t
2834ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
2835{
2836 uint64_t offloads = 0;
2837 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2838
2839 if (hw->mac.type != ixgbe_mac_82598EB)
2840 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2841
2842 return offloads;
2843}
2844
2845uint64_t
2846ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
2847{
2848 uint64_t offloads;
2849 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2850
2851 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
2852 DEV_RX_OFFLOAD_UDP_CKSUM |
2853 DEV_RX_OFFLOAD_TCP_CKSUM |
11fdf7f2
TL
2854 DEV_RX_OFFLOAD_KEEP_CRC |
2855 DEV_RX_OFFLOAD_JUMBO_FRAME |
9f95a23c 2856 DEV_RX_OFFLOAD_VLAN_FILTER |
11fdf7f2
TL
2857 DEV_RX_OFFLOAD_SCATTER;
2858
2859 if (hw->mac.type == ixgbe_mac_82598EB)
2860 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2861
2862 if (ixgbe_is_vf(dev) == 0)
9f95a23c 2863 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
11fdf7f2
TL
2864
2865 /*
2866 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
2867 * mode.
2868 */
2869 if ((hw->mac.type == ixgbe_mac_82599EB ||
9f95a23c
TL
2870 hw->mac.type == ixgbe_mac_X540 ||
2871 hw->mac.type == ixgbe_mac_X550) &&
11fdf7f2
TL
2872 !RTE_ETH_DEV_SRIOV(dev).active)
2873 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
2874
2875 if (hw->mac.type == ixgbe_mac_82599EB ||
2876 hw->mac.type == ixgbe_mac_X540)
2877 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
2878
2879 if (hw->mac.type == ixgbe_mac_X550 ||
2880 hw->mac.type == ixgbe_mac_X550EM_x ||
2881 hw->mac.type == ixgbe_mac_X550EM_a)
2882 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
2883
2884#ifdef RTE_LIBRTE_SECURITY
2885 if (dev->security_ctx)
2886 offloads |= DEV_RX_OFFLOAD_SECURITY;
2887#endif
2888
2889 return offloads;
2890}
2891
7c673cae
FG
2892int __attribute__((cold))
2893ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2894 uint16_t queue_idx,
2895 uint16_t nb_desc,
2896 unsigned int socket_id,
2897 const struct rte_eth_rxconf *rx_conf,
2898 struct rte_mempool *mp)
2899{
2900 const struct rte_memzone *rz;
2901 struct ixgbe_rx_queue *rxq;
2902 struct ixgbe_hw *hw;
2903 uint16_t len;
2904 struct ixgbe_adapter *adapter =
2905 (struct ixgbe_adapter *)dev->data->dev_private;
11fdf7f2 2906 uint64_t offloads;
7c673cae
FG
2907
2908 PMD_INIT_FUNC_TRACE();
2909 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2910
11fdf7f2
TL
2911 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2912
7c673cae
FG
2913 /*
2914 * Validate number of receive descriptors.
2915 * It must not exceed hardware maximum, and must be multiple
2916 * of IXGBE_ALIGN.
2917 */
2918 if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
2919 (nb_desc > IXGBE_MAX_RING_DESC) ||
2920 (nb_desc < IXGBE_MIN_RING_DESC)) {
2921 return -EINVAL;
2922 }
2923
2924 /* Free memory prior to re-allocation if needed... */
2925 if (dev->data->rx_queues[queue_idx] != NULL) {
2926 ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2927 dev->data->rx_queues[queue_idx] = NULL;
2928 }
2929
2930 /* First allocate the rx queue data structure */
2931 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
2932 RTE_CACHE_LINE_SIZE, socket_id);
2933 if (rxq == NULL)
2934 return -ENOMEM;
2935 rxq->mb_pool = mp;
2936 rxq->nb_rx_desc = nb_desc;
2937 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2938 rxq->queue_id = queue_idx;
2939 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2940 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2941 rxq->port_id = dev->data->port_id;
9f95a23c 2942 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
11fdf7f2
TL
2943 rxq->crc_len = ETHER_CRC_LEN;
2944 else
2945 rxq->crc_len = 0;
7c673cae
FG
2946 rxq->drop_en = rx_conf->rx_drop_en;
2947 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
11fdf7f2 2948 rxq->offloads = offloads;
7c673cae
FG
2949
2950 /*
2951 * The packet type in RX descriptor is different for different NICs.
2952 * Some bits are used for x550 but reserved for other NICS.
2953 * So set different masks for different NICs.
2954 */
2955 if (hw->mac.type == ixgbe_mac_X550 ||
2956 hw->mac.type == ixgbe_mac_X550EM_x ||
2957 hw->mac.type == ixgbe_mac_X550EM_a ||
2958 hw->mac.type == ixgbe_mac_X550_vf ||
2959 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2960 hw->mac.type == ixgbe_mac_X550EM_a_vf)
2961 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_X550;
2962 else
2963 rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599;
2964
2965 /*
2966 * Allocate RX ring hardware descriptors. A memzone large enough to
2967 * handle the maximum ring size is allocated in order to allow for
2968 * resizing in later calls to the queue setup function.
2969 */
2970 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2971 RX_RING_SZ, IXGBE_ALIGN, socket_id);
2972 if (rz == NULL) {
2973 ixgbe_rx_queue_release(rxq);
2974 return -ENOMEM;
2975 }
2976
2977 /*
2978 * Zero init all the descriptors in the ring.
2979 */
2980 memset(rz->addr, 0, RX_RING_SZ);
2981
2982 /*
2983 * Modified to setup VFRDT for Virtual Function
2984 */
2985 if (hw->mac.type == ixgbe_mac_82599_vf ||
2986 hw->mac.type == ixgbe_mac_X540_vf ||
2987 hw->mac.type == ixgbe_mac_X550_vf ||
2988 hw->mac.type == ixgbe_mac_X550EM_x_vf ||
2989 hw->mac.type == ixgbe_mac_X550EM_a_vf) {
2990 rxq->rdt_reg_addr =
2991 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
2992 rxq->rdh_reg_addr =
2993 IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
2994 } else {
2995 rxq->rdt_reg_addr =
2996 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
2997 rxq->rdh_reg_addr =
2998 IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
2999 }
3000
11fdf7f2 3001 rxq->rx_ring_phys_addr = rz->iova;
7c673cae
FG
3002 rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
3003
3004 /*
3005 * Certain constraints must be met in order to use the bulk buffer
3006 * allocation Rx burst function. If any of Rx queues doesn't meet them
3007 * the feature should be disabled for the whole port.
3008 */
3009 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
3010 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
3011 "preconditions - canceling the feature for "
3012 "the whole port[%d]",
3013 rxq->queue_id, rxq->port_id);
3014 adapter->rx_bulk_alloc_allowed = false;
3015 }
3016
3017 /*
3018 * Allocate software ring. Allow for space at the end of the
3019 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
3020 * function does not access an invalid memory region.
3021 */
3022 len = nb_desc;
3023 if (adapter->rx_bulk_alloc_allowed)
3024 len += RTE_PMD_IXGBE_RX_MAX_BURST;
3025
3026 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
3027 sizeof(struct ixgbe_rx_entry) * len,
3028 RTE_CACHE_LINE_SIZE, socket_id);
3029 if (!rxq->sw_ring) {
3030 ixgbe_rx_queue_release(rxq);
3031 return -ENOMEM;
3032 }
3033
3034 /*
3035 * Always allocate even if it's not going to be needed in order to
3036 * simplify the code.
3037 *
3038 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
3039 * be requested in ixgbe_dev_rx_init(), which is called later from
3040 * dev_start() flow.
3041 */
3042 rxq->sw_sc_ring =
3043 rte_zmalloc_socket("rxq->sw_sc_ring",
3044 sizeof(struct ixgbe_scattered_rx_entry) * len,
3045 RTE_CACHE_LINE_SIZE, socket_id);
3046 if (!rxq->sw_sc_ring) {
3047 ixgbe_rx_queue_release(rxq);
3048 return -ENOMEM;
3049 }
3050
3051 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
3052 "dma_addr=0x%"PRIx64,
3053 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
3054 rxq->rx_ring_phys_addr);
3055
3056 if (!rte_is_power_of_2(nb_desc)) {
3057 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
3058 "preconditions - canceling the feature for "
3059 "the whole port[%d]",
3060 rxq->queue_id, rxq->port_id);
3061 adapter->rx_vec_allowed = false;
3062 } else
3063 ixgbe_rxq_vec_setup(rxq);
3064
3065 dev->data->rx_queues[queue_idx] = rxq;
3066
3067 ixgbe_reset_rx_queue(adapter, rxq);
3068
3069 return 0;
3070}
3071
3072uint32_t
3073ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
3074{
3075#define IXGBE_RXQ_SCAN_INTERVAL 4
3076 volatile union ixgbe_adv_rx_desc *rxdp;
3077 struct ixgbe_rx_queue *rxq;
3078 uint32_t desc = 0;
3079
7c673cae
FG
3080 rxq = dev->data->rx_queues[rx_queue_id];
3081 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
3082
3083 while ((desc < rxq->nb_rx_desc) &&
3084 (rxdp->wb.upper.status_error &
3085 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
3086 desc += IXGBE_RXQ_SCAN_INTERVAL;
3087 rxdp += IXGBE_RXQ_SCAN_INTERVAL;
3088 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3089 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3090 desc - rxq->nb_rx_desc]);
3091 }
3092
3093 return desc;
3094}
3095
3096int
3097ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
3098{
3099 volatile union ixgbe_adv_rx_desc *rxdp;
3100 struct ixgbe_rx_queue *rxq = rx_queue;
3101 uint32_t desc;
3102
3103 if (unlikely(offset >= rxq->nb_rx_desc))
3104 return 0;
3105 desc = rxq->rx_tail + offset;
3106 if (desc >= rxq->nb_rx_desc)
3107 desc -= rxq->nb_rx_desc;
3108
3109 rxdp = &rxq->rx_ring[desc];
3110 return !!(rxdp->wb.upper.status_error &
3111 rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
3112}
3113
11fdf7f2
TL
3114int
3115ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
3116{
3117 struct ixgbe_rx_queue *rxq = rx_queue;
3118 volatile uint32_t *status;
3119 uint32_t nb_hold, desc;
3120
3121 if (unlikely(offset >= rxq->nb_rx_desc))
3122 return -EINVAL;
3123
3124#ifdef RTE_IXGBE_INC_VECTOR
3125 if (rxq->rx_using_sse)
3126 nb_hold = rxq->rxrearm_nb;
3127 else
3128#endif
3129 nb_hold = rxq->nb_rx_hold;
3130 if (offset >= rxq->nb_rx_desc - nb_hold)
3131 return RTE_ETH_RX_DESC_UNAVAIL;
3132
3133 desc = rxq->rx_tail + offset;
3134 if (desc >= rxq->nb_rx_desc)
3135 desc -= rxq->nb_rx_desc;
3136
3137 status = &rxq->rx_ring[desc].wb.upper.status_error;
3138 if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
3139 return RTE_ETH_RX_DESC_DONE;
3140
3141 return RTE_ETH_RX_DESC_AVAIL;
3142}
3143
3144int
3145ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
3146{
3147 struct ixgbe_tx_queue *txq = tx_queue;
3148 volatile uint32_t *status;
3149 uint32_t desc;
3150
3151 if (unlikely(offset >= txq->nb_tx_desc))
3152 return -EINVAL;
3153
3154 desc = txq->tx_tail + offset;
3155 /* go to next desc that has the RS bit */
3156 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
3157 txq->tx_rs_thresh;
3158 if (desc >= txq->nb_tx_desc) {
3159 desc -= txq->nb_tx_desc;
3160 if (desc >= txq->nb_tx_desc)
3161 desc -= txq->nb_tx_desc;
3162 }
3163
3164 status = &txq->tx_ring[desc].wb.status;
3165 if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
3166 return RTE_ETH_TX_DESC_DONE;
3167
3168 return RTE_ETH_TX_DESC_FULL;
3169}
3170
9f95a23c
TL
3171/*
3172 * Set up link loopback for X540/X550 mode Tx->Rx.
3173 */
3174static inline void __attribute__((cold))
3175ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
3176{
3177 uint32_t macc;
3178 PMD_INIT_FUNC_TRACE();
3179
3180 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
3181
3182 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3183 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
3184 macc = IXGBE_READ_REG(hw, IXGBE_MACC);
3185
3186 if (enable) {
3187 /* datasheet 15.2.1: disable AUTONEG (PHY Bit 7.0.C) */
3188 autoneg_reg |= IXGBE_MII_AUTONEG_ENABLE;
3189 /* datasheet 15.2.1: MACC.FLU = 1 (force link up) */
3190 macc |= IXGBE_MACC_FLU;
3191 } else {
3192 autoneg_reg &= ~IXGBE_MII_AUTONEG_ENABLE;
3193 macc &= ~IXGBE_MACC_FLU;
3194 }
3195
3196 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
3197 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
3198
3199 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
3200}
3201
7c673cae
FG
3202void __attribute__((cold))
3203ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
3204{
3205 unsigned i;
3206 struct ixgbe_adapter *adapter =
3207 (struct ixgbe_adapter *)dev->data->dev_private;
9f95a23c 3208 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7c673cae
FG
3209
3210 PMD_INIT_FUNC_TRACE();
3211
3212 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3213 struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
3214
3215 if (txq != NULL) {
3216 txq->ops->release_mbufs(txq);
3217 txq->ops->reset(txq);
3218 }
3219 }
3220
3221 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3222 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
3223
3224 if (rxq != NULL) {
3225 ixgbe_rx_queue_release_mbufs(rxq);
3226 ixgbe_reset_rx_queue(adapter, rxq);
3227 }
3228 }
9f95a23c
TL
3229 /* If loopback mode was enabled, reconfigure the link accordingly */
3230 if (dev->data->dev_conf.lpbk_mode != 0) {
3231 if (hw->mac.type == ixgbe_mac_X540 ||
3232 hw->mac.type == ixgbe_mac_X550 ||
3233 hw->mac.type == ixgbe_mac_X550EM_x ||
3234 hw->mac.type == ixgbe_mac_X550EM_a)
3235 ixgbe_setup_loopback_link_x540_x550(hw, false);
3236 }
7c673cae
FG
3237}
3238
3239void
3240ixgbe_dev_free_queues(struct rte_eth_dev *dev)
3241{
3242 unsigned i;
3243
3244 PMD_INIT_FUNC_TRACE();
3245
3246 for (i = 0; i < dev->data->nb_rx_queues; i++) {
3247 ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
3248 dev->data->rx_queues[i] = NULL;
3249 }
3250 dev->data->nb_rx_queues = 0;
3251
3252 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3253 ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
3254 dev->data->tx_queues[i] = NULL;
3255 }
3256 dev->data->nb_tx_queues = 0;
3257}
3258
3259/*********************************************************************
3260 *
3261 * Device RX/TX init functions
3262 *
3263 **********************************************************************/
3264
3265/**
3266 * Receive Side Scaling (RSS)
3267 * See section 7.1.2.8 in the following document:
3268 * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
3269 *
3270 * Principles:
3271 * The source and destination IP addresses of the IP header and the source
3272 * and destination ports of TCP/UDP headers, if any, of received packets are
3273 * hashed against a configurable random key to compute a 32-bit RSS hash result.
3274 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
3275 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
3276 * RSS output index which is used as the RX queue index where to store the
3277 * received packets.
3278 * The following output is supplied in the RX write-back descriptor:
3279 * - 32-bit result of the Microsoft RSS hash function,
3280 * - 4-bit RSS type field.
3281 */
3282
3283/*
3284 * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
3285 * Used as the default key.
3286 */
3287static uint8_t rss_intel_key[40] = {
3288 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
3289 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
3290 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
3291 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
3292 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
3293};
3294
3295static void
3296ixgbe_rss_disable(struct rte_eth_dev *dev)
3297{
3298 struct ixgbe_hw *hw;
3299 uint32_t mrqc;
3300 uint32_t mrqc_reg;
3301
3302 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3303 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3304 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3305 mrqc &= ~IXGBE_MRQC_RSSEN;
3306 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3307}
3308
3309static void
3310ixgbe_hw_rss_hash_set(struct ixgbe_hw *hw, struct rte_eth_rss_conf *rss_conf)
3311{
3312 uint8_t *hash_key;
3313 uint32_t mrqc;
3314 uint32_t rss_key;
3315 uint64_t rss_hf;
3316 uint16_t i;
3317 uint32_t mrqc_reg;
3318 uint32_t rssrk_reg;
3319
3320 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3321 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3322
3323 hash_key = rss_conf->rss_key;
3324 if (hash_key != NULL) {
3325 /* Fill in RSS hash key */
3326 for (i = 0; i < 10; i++) {
3327 rss_key = hash_key[(i * 4)];
3328 rss_key |= hash_key[(i * 4) + 1] << 8;
3329 rss_key |= hash_key[(i * 4) + 2] << 16;
3330 rss_key |= hash_key[(i * 4) + 3] << 24;
3331 IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
3332 }
3333 }
3334
3335 /* Set configured hashing protocols in MRQC register */
3336 rss_hf = rss_conf->rss_hf;
3337 mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
3338 if (rss_hf & ETH_RSS_IPV4)
3339 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
3340 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
3341 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
3342 if (rss_hf & ETH_RSS_IPV6)
3343 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
3344 if (rss_hf & ETH_RSS_IPV6_EX)
3345 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
3346 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
3347 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3348 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
3349 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
3350 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
3351 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3352 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
3353 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3354 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
3355 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3356 IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
3357}
3358
3359int
3360ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
3361 struct rte_eth_rss_conf *rss_conf)
3362{
3363 struct ixgbe_hw *hw;
3364 uint32_t mrqc;
3365 uint64_t rss_hf;
3366 uint32_t mrqc_reg;
3367
3368 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3369
3370 if (!ixgbe_rss_update_sp(hw->mac.type)) {
3371 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
3372 "NIC.");
3373 return -ENOTSUP;
3374 }
3375 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3376
3377 /*
3378 * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
3379 * "RSS enabling cannot be done dynamically while it must be
3380 * preceded by a software reset"
3381 * Before changing anything, first check that the update RSS operation
3382 * does not attempt to disable RSS, if RSS was enabled at
3383 * initialization time, or does not attempt to enable RSS, if RSS was
3384 * disabled at initialization time.
3385 */
3386 rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
3387 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3388 if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
3389 if (rss_hf != 0) /* Enable RSS */
3390 return -(EINVAL);
3391 return 0; /* Nothing to do */
3392 }
3393 /* RSS enabled */
3394 if (rss_hf == 0) /* Disable RSS */
3395 return -(EINVAL);
3396 ixgbe_hw_rss_hash_set(hw, rss_conf);
3397 return 0;
3398}
3399
3400int
3401ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3402 struct rte_eth_rss_conf *rss_conf)
3403{
3404 struct ixgbe_hw *hw;
3405 uint8_t *hash_key;
3406 uint32_t mrqc;
3407 uint32_t rss_key;
3408 uint64_t rss_hf;
3409 uint16_t i;
3410 uint32_t mrqc_reg;
3411 uint32_t rssrk_reg;
3412
3413 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3414 mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
3415 rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
3416 hash_key = rss_conf->rss_key;
3417 if (hash_key != NULL) {
3418 /* Return RSS hash key */
3419 for (i = 0; i < 10; i++) {
3420 rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
3421 hash_key[(i * 4)] = rss_key & 0x000000FF;
3422 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
3423 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
3424 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
3425 }
3426 }
3427
3428 /* Get RSS functions configured in MRQC register */
3429 mrqc = IXGBE_READ_REG(hw, mrqc_reg);
3430 if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
3431 rss_conf->rss_hf = 0;
3432 return 0;
3433 }
3434 rss_hf = 0;
3435 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
3436 rss_hf |= ETH_RSS_IPV4;
3437 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
3438 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
3439 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
3440 rss_hf |= ETH_RSS_IPV6;
3441 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
3442 rss_hf |= ETH_RSS_IPV6_EX;
3443 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
3444 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
3445 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
3446 rss_hf |= ETH_RSS_IPV6_TCP_EX;
3447 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
3448 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
3449 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
3450 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
3451 if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
3452 rss_hf |= ETH_RSS_IPV6_UDP_EX;
3453 rss_conf->rss_hf = rss_hf;
3454 return 0;
3455}
3456
3457static void
3458ixgbe_rss_configure(struct rte_eth_dev *dev)
3459{
3460 struct rte_eth_rss_conf rss_conf;
9f95a23c 3461 struct ixgbe_adapter *adapter;
7c673cae
FG
3462 struct ixgbe_hw *hw;
3463 uint32_t reta;
3464 uint16_t i;
3465 uint16_t j;
3466 uint16_t sp_reta_size;
3467 uint32_t reta_reg;
3468
3469 PMD_INIT_FUNC_TRACE();
9f95a23c 3470 adapter = (struct ixgbe_adapter *)dev->data->dev_private;
7c673cae
FG
3471 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3472
3473 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
3474
3475 /*
3476 * Fill in redirection table
3477 * The byte-swap is needed because NIC registers are in
3478 * little-endian order.
3479 */
9f95a23c
TL
3480 if (adapter->rss_reta_updated == 0) {
3481 reta = 0;
3482 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
3483 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
3484
3485 if (j == dev->data->nb_rx_queues)
3486 j = 0;
3487 reta = (reta << 8) | j;
3488 if ((i & 3) == 3)
3489 IXGBE_WRITE_REG(hw, reta_reg,
3490 rte_bswap32(reta));
3491 }
7c673cae
FG
3492 }
3493
3494 /*
3495 * Configure the RSS key and the RSS protocols used to compute
3496 * the RSS hash of input packets.
3497 */
3498 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3499 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
3500 ixgbe_rss_disable(dev);
3501 return;
3502 }
3503 if (rss_conf.rss_key == NULL)
3504 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3505 ixgbe_hw_rss_hash_set(hw, &rss_conf);
3506}
3507
3508#define NUM_VFTA_REGISTERS 128
3509#define NIC_RX_BUFFER_SIZE 0x200
3510#define X550_RX_BUFFER_SIZE 0x180
3511
3512static void
3513ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3514{
3515 struct rte_eth_vmdq_dcb_conf *cfg;
3516 struct ixgbe_hw *hw;
3517 enum rte_eth_nb_pools num_pools;
3518 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3519 uint16_t pbsize;
3520 uint8_t nb_tcs; /* number of traffic classes */
3521 int i;
3522
3523 PMD_INIT_FUNC_TRACE();
3524 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3525 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3526 num_pools = cfg->nb_queue_pools;
3527 /* Check we have a valid number of pools */
3528 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
3529 ixgbe_rss_disable(dev);
3530 return;
3531 }
3532 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3533 nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3534
3535 /*
3536 * RXPBSIZE
3537 * split rx buffer up into sections, each for 1 traffic class
3538 */
3539 switch (hw->mac.type) {
3540 case ixgbe_mac_X550:
3541 case ixgbe_mac_X550EM_x:
3542 case ixgbe_mac_X550EM_a:
3543 pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
3544 break;
3545 default:
3546 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3547 break;
3548 }
3549 for (i = 0; i < nb_tcs; i++) {
3550 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3551
3552 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3553 /* clear 10 bits. */
3554 rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
3555 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3556 }
3557 /* zero alloc all unused TCs */
3558 for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3559 uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
3560
3561 rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
3562 /* clear 10 bits. */
3563 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
3564 }
3565
3566 /* MRQC: enable vmdq and dcb */
3567 mrqc = (num_pools == ETH_16_POOLS) ?
3568 IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
3569 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3570
3571 /* PFVTCTL: turn on virtualisation and set the default pool */
3572 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3573 if (cfg->enable_default_pool) {
3574 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
3575 } else {
3576 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
3577 }
3578
3579 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
3580
3581 /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
3582 queue_mapping = 0;
3583 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
3584 /*
3585 * mapping is done with 3 bits per priority,
3586 * so shift by i*3 each time
3587 */
3588 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3589
3590 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
3591
3592 /* RTRPCS: DCB related */
3593 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
3594
3595 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3596 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3597 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3598 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3599
3600 /* VFTA - enable all vlan filters */
3601 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3602 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3603 }
3604
3605 /* VFRE: pool enabling for receive - 16 or 32 */
3606 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
3607 num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3608
3609 /*
3610 * MPSAR - allow pools to read specific mac addresses
3611 * In this case, all pools should be able to read from mac addr 0
3612 */
3613 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
3614 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
3615
3616 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
3617 for (i = 0; i < cfg->nb_pool_maps; i++) {
3618 /* set vlan id in VF register and set the valid bit */
3619 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
3620 (cfg->pool_map[i].vlan_id & 0xFFF)));
3621 /*
3622 * Put the allowed pools in VFB reg. As we only have 16 or 32
3623 * pools, we only need to use the first half of the register
3624 * i.e. bits 0-31
3625 */
3626 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
3627 }
3628}
3629
3630/**
3631 * ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3632 * @dev: pointer to eth_dev structure
3633 * @dcb_config: pointer to ixgbe_dcb_config structure
3634 */
3635static void
3636ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3637 struct ixgbe_dcb_config *dcb_config)
3638{
3639 uint32_t reg;
7c673cae
FG
3640 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3641
3642 PMD_INIT_FUNC_TRACE();
3643 if (hw->mac.type != ixgbe_mac_82598EB) {
3644 /* Disable the Tx desc arbiter so that MTQC can be changed */
3645 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3646 reg |= IXGBE_RTTDCS_ARBDIS;
3647 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3648
3649 /* Enable DCB for Tx with 8 TCs */
3650 if (dcb_config->num_tcs.pg_tcs == 8) {
3651 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
3652 } else {
3653 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
3654 }
3655 if (dcb_config->vt_mode)
3656 reg |= IXGBE_MTQC_VT_ENA;
3657 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
3658
7c673cae
FG
3659 /* Enable the Tx desc arbiter */
3660 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3661 reg &= ~IXGBE_RTTDCS_ARBDIS;
3662 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
3663
3664 /* Enable Security TX Buffer IFG for DCB */
3665 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
3666 reg |= IXGBE_SECTX_DCB;
3667 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
3668 }
3669}
3670
3671/**
3672 * ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3673 * @dev: pointer to rte_eth_dev structure
3674 * @dcb_config: pointer to ixgbe_dcb_config structure
3675 */
3676static void
3677ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3678 struct ixgbe_dcb_config *dcb_config)
3679{
3680 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3681 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3682 struct ixgbe_hw *hw =
3683 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3684
3685 PMD_INIT_FUNC_TRACE();
3686 if (hw->mac.type != ixgbe_mac_82598EB)
3687 /*PF VF Transmit Enable*/
3688 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
3689 vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3690
3691 /*Configure general DCB TX parameters*/
3692 ixgbe_dcb_tx_hw_config(dev, dcb_config);
3693}
3694
3695static void
3696ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3697 struct ixgbe_dcb_config *dcb_config)
3698{
3699 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3700 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3701 struct ixgbe_dcb_tc_config *tc;
3702 uint8_t i, j;
3703
3704 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3705 if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
3706 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3707 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3708 } else {
3709 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3710 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3711 }
11fdf7f2
TL
3712
3713 /* Initialize User Priority to Traffic Class mapping */
3714 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3715 tc = &dcb_config->tc_config[j];
3716 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3717 }
3718
7c673cae
FG
3719 /* User Priority to Traffic Class mapping */
3720 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3721 j = vmdq_rx_conf->dcb_tc[i];
3722 tc = &dcb_config->tc_config[j];
11fdf7f2
TL
3723 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3724 (uint8_t)(1 << i);
7c673cae
FG
3725 }
3726}
3727
3728static void
3729ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3730 struct ixgbe_dcb_config *dcb_config)
3731{
3732 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3733 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3734 struct ixgbe_dcb_tc_config *tc;
3735 uint8_t i, j;
3736
3737 /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
3738 if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
3739 dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
3740 dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
3741 } else {
3742 dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
3743 dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
3744 }
3745
11fdf7f2
TL
3746 /* Initialize User Priority to Traffic Class mapping */
3747 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3748 tc = &dcb_config->tc_config[j];
3749 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3750 }
3751
7c673cae
FG
3752 /* User Priority to Traffic Class mapping */
3753 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3754 j = vmdq_tx_conf->dcb_tc[i];
3755 tc = &dcb_config->tc_config[j];
11fdf7f2
TL
3756 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3757 (uint8_t)(1 << i);
7c673cae
FG
3758 }
3759}
3760
3761static void
3762ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
3763 struct ixgbe_dcb_config *dcb_config)
3764{
3765 struct rte_eth_dcb_rx_conf *rx_conf =
3766 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3767 struct ixgbe_dcb_tc_config *tc;
3768 uint8_t i, j;
3769
3770 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3771 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3772
11fdf7f2
TL
3773 /* Initialize User Priority to Traffic Class mapping */
3774 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3775 tc = &dcb_config->tc_config[j];
3776 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3777 }
3778
7c673cae
FG
3779 /* User Priority to Traffic Class mapping */
3780 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3781 j = rx_conf->dcb_tc[i];
3782 tc = &dcb_config->tc_config[j];
11fdf7f2
TL
3783 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3784 (uint8_t)(1 << i);
7c673cae
FG
3785 }
3786}
3787
3788static void
3789ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
3790 struct ixgbe_dcb_config *dcb_config)
3791{
3792 struct rte_eth_dcb_tx_conf *tx_conf =
3793 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3794 struct ixgbe_dcb_tc_config *tc;
3795 uint8_t i, j;
3796
3797 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3798 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3799
11fdf7f2
TL
3800 /* Initialize User Priority to Traffic Class mapping */
3801 for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
3802 tc = &dcb_config->tc_config[j];
3803 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3804 }
3805
7c673cae
FG
3806 /* User Priority to Traffic Class mapping */
3807 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
3808 j = tx_conf->dcb_tc[i];
3809 tc = &dcb_config->tc_config[j];
11fdf7f2
TL
3810 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3811 (uint8_t)(1 << i);
7c673cae
FG
3812 }
3813}
3814
3815/**
3816 * ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
11fdf7f2 3817 * @dev: pointer to eth_dev structure
7c673cae
FG
3818 * @dcb_config: pointer to ixgbe_dcb_config structure
3819 */
3820static void
11fdf7f2
TL
3821ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3822 struct ixgbe_dcb_config *dcb_config)
7c673cae
FG
3823{
3824 uint32_t reg;
3825 uint32_t vlanctrl;
3826 uint8_t i;
11fdf7f2
TL
3827 uint32_t q;
3828 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7c673cae
FG
3829
3830 PMD_INIT_FUNC_TRACE();
3831 /*
3832 * Disable the arbiter before changing parameters
3833 * (always enable recycle mode; WSP)
3834 */
3835 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
3836 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3837
3838 if (hw->mac.type != ixgbe_mac_82598EB) {
3839 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
3840 if (dcb_config->num_tcs.pg_tcs == 4) {
3841 if (dcb_config->vt_mode)
3842 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3843 IXGBE_MRQC_VMDQRT4TCEN;
3844 else {
3845 /* no matter the mode is DCB or DCB_RSS, just
3846 * set the MRQE to RSSXTCEN. RSS is controlled
3847 * by RSS_FIELD
3848 */
3849 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3850 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3851 IXGBE_MRQC_RTRSS4TCEN;
3852 }
3853 }
3854 if (dcb_config->num_tcs.pg_tcs == 8) {
3855 if (dcb_config->vt_mode)
3856 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3857 IXGBE_MRQC_VMDQRT8TCEN;
3858 else {
3859 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
3860 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
3861 IXGBE_MRQC_RTRSS8TCEN;
3862 }
3863 }
3864
3865 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
11fdf7f2
TL
3866
3867 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3868 /* Disable drop for all queues in VMDQ mode*/
3869 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3870 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3871 (IXGBE_QDE_WRITE |
3872 (q << IXGBE_QDE_IDX_SHIFT)));
3873 } else {
3874 /* Enable drop for all queues in SRIOV mode */
3875 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
3876 IXGBE_WRITE_REG(hw, IXGBE_QDE,
3877 (IXGBE_QDE_WRITE |
3878 (q << IXGBE_QDE_IDX_SHIFT) |
3879 IXGBE_QDE_ENABLE));
3880 }
7c673cae
FG
3881 }
3882
3883 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
3884 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3885 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
3886 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
3887
3888 /* VFTA - enable all vlan filters */
3889 for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
3890 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
3891 }
3892
3893 /*
3894 * Configure Rx packet plane (recycle mode; WSP) and
3895 * enable arbiter
3896 */
3897 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
3898 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
3899}
3900
3901static void
3902ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
3903 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3904{
3905 switch (hw->mac.type) {
3906 case ixgbe_mac_82598EB:
3907 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
3908 break;
3909 case ixgbe_mac_82599EB:
3910 case ixgbe_mac_X540:
3911 case ixgbe_mac_X550:
3912 case ixgbe_mac_X550EM_x:
3913 case ixgbe_mac_X550EM_a:
3914 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
3915 tsa, map);
3916 break;
3917 default:
3918 break;
3919 }
3920}
3921
3922static void
3923ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
3924 uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3925{
3926 switch (hw->mac.type) {
3927 case ixgbe_mac_82598EB:
3928 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
3929 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
3930 break;
3931 case ixgbe_mac_82599EB:
3932 case ixgbe_mac_X540:
3933 case ixgbe_mac_X550:
3934 case ixgbe_mac_X550EM_x:
3935 case ixgbe_mac_X550EM_a:
3936 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
3937 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
3938 break;
3939 default:
3940 break;
3941 }
3942}
3943
3944#define DCB_RX_CONFIG 1
3945#define DCB_TX_CONFIG 1
3946#define DCB_TX_PB 1024
3947/**
3948 * ixgbe_dcb_hw_configure - Enable DCB and configure
3949 * general DCB in VT mode and non-VT mode parameters
3950 * @dev: pointer to rte_eth_dev structure
3951 * @dcb_config: pointer to ixgbe_dcb_config structure
3952 */
3953static int
3954ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3955 struct ixgbe_dcb_config *dcb_config)
3956{
3957 int ret = 0;
3958 uint8_t i, pfc_en, nb_tcs;
3959 uint16_t pbsize, rx_buffer_size;
3960 uint8_t config_dcb_rx = 0;
3961 uint8_t config_dcb_tx = 0;
3962 uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3963 uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3964 uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3965 uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3966 uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
3967 struct ixgbe_dcb_tc_config *tc;
3968 uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3969 struct ixgbe_hw *hw =
3970 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11fdf7f2
TL
3971 struct ixgbe_bw_conf *bw_conf =
3972 IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
7c673cae
FG
3973
3974 switch (dev->data->dev_conf.rxmode.mq_mode) {
3975 case ETH_MQ_RX_VMDQ_DCB:
3976 dcb_config->vt_mode = true;
3977 if (hw->mac.type != ixgbe_mac_82598EB) {
3978 config_dcb_rx = DCB_RX_CONFIG;
3979 /*
3980 *get dcb and VT rx configuration parameters
3981 *from rte_eth_conf
3982 */
3983 ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
3984 /*Configure general VMDQ and DCB RX parameters*/
3985 ixgbe_vmdq_dcb_configure(dev);
3986 }
3987 break;
3988 case ETH_MQ_RX_DCB:
3989 case ETH_MQ_RX_DCB_RSS:
3990 dcb_config->vt_mode = false;
3991 config_dcb_rx = DCB_RX_CONFIG;
3992 /* Get dcb TX configuration parameters from rte_eth_conf */
3993 ixgbe_dcb_rx_config(dev, dcb_config);
3994 /*Configure general DCB RX parameters*/
11fdf7f2 3995 ixgbe_dcb_rx_hw_config(dev, dcb_config);
7c673cae
FG
3996 break;
3997 default:
3998 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3999 break;
4000 }
4001 switch (dev->data->dev_conf.txmode.mq_mode) {
4002 case ETH_MQ_TX_VMDQ_DCB:
4003 dcb_config->vt_mode = true;
4004 config_dcb_tx = DCB_TX_CONFIG;
4005 /* get DCB and VT TX configuration parameters
4006 * from rte_eth_conf
4007 */
4008 ixgbe_dcb_vt_tx_config(dev, dcb_config);
4009 /*Configure general VMDQ and DCB TX parameters*/
4010 ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
4011 break;
4012
4013 case ETH_MQ_TX_DCB:
4014 dcb_config->vt_mode = false;
4015 config_dcb_tx = DCB_TX_CONFIG;
4016 /*get DCB TX configuration parameters from rte_eth_conf*/
4017 ixgbe_dcb_tx_config(dev, dcb_config);
4018 /*Configure general DCB TX parameters*/
4019 ixgbe_dcb_tx_hw_config(dev, dcb_config);
4020 break;
4021 default:
4022 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
4023 break;
4024 }
4025
4026 nb_tcs = dcb_config->num_tcs.pfc_tcs;
4027 /* Unpack map */
4028 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
4029 if (nb_tcs == ETH_4_TCS) {
4030 /* Avoid un-configured priority mapping to TC0 */
4031 uint8_t j = 4;
4032 uint8_t mask = 0xFF;
4033
4034 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
4035 mask = (uint8_t)(mask & (~(1 << map[i])));
4036 for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
4037 if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
4038 map[j++] = i;
4039 mask >>= 1;
4040 }
4041 /* Re-configure 4 TCs BW */
4042 for (i = 0; i < nb_tcs; i++) {
4043 tc = &dcb_config->tc_config[i];
11fdf7f2
TL
4044 if (bw_conf->tc_num != nb_tcs)
4045 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4046 (uint8_t)(100 / nb_tcs);
7c673cae
FG
4047 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4048 (uint8_t)(100 / nb_tcs);
4049 }
4050 for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
4051 tc = &dcb_config->tc_config[i];
4052 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
4053 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
4054 }
11fdf7f2
TL
4055 } else {
4056 /* Re-configure 8 TCs BW */
4057 for (i = 0; i < nb_tcs; i++) {
4058 tc = &dcb_config->tc_config[i];
4059 if (bw_conf->tc_num != nb_tcs)
4060 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
4061 (uint8_t)(100 / nb_tcs + (i & 1));
4062 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
4063 (uint8_t)(100 / nb_tcs + (i & 1));
4064 }
7c673cae
FG
4065 }
4066
4067 switch (hw->mac.type) {
4068 case ixgbe_mac_X550:
4069 case ixgbe_mac_X550EM_x:
4070 case ixgbe_mac_X550EM_a:
4071 rx_buffer_size = X550_RX_BUFFER_SIZE;
4072 break;
4073 default:
4074 rx_buffer_size = NIC_RX_BUFFER_SIZE;
4075 break;
4076 }
4077
4078 if (config_dcb_rx) {
4079 /* Set RX buffer size */
4080 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4081 uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
4082
4083 for (i = 0; i < nb_tcs; i++) {
4084 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
4085 }
4086 /* zero alloc all unused TCs */
4087 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4088 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4089 }
4090 }
4091 if (config_dcb_tx) {
4092 /* Only support an equally distributed
4093 * Tx packet buffer strategy.
4094 */
4095 uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
4096 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
4097
4098 for (i = 0; i < nb_tcs; i++) {
4099 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4100 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4101 }
4102 /* Clear unused TCs, if any, to zero buffer size*/
4103 for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
4104 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4105 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4106 }
4107 }
4108
4109 /*Calculates traffic class credits*/
4110 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4111 IXGBE_DCB_TX_CONFIG);
4112 ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
4113 IXGBE_DCB_RX_CONFIG);
4114
4115 if (config_dcb_rx) {
4116 /* Unpack CEE standard containers */
4117 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
4118 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4119 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
4120 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
4121 /* Configure PG(ETS) RX */
4122 ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
4123 }
4124
4125 if (config_dcb_tx) {
4126 /* Unpack CEE standard containers */
4127 ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
4128 ixgbe_dcb_unpack_max_cee(dcb_config, max);
4129 ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
4130 ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
4131 /* Configure PG(ETS) TX */
4132 ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
4133 }
4134
4135 /*Configure queue statistics registers*/
4136 ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
4137
4138 /* Check if the PFC is supported */
4139 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
4140 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
4141 for (i = 0; i < nb_tcs; i++) {
4142 /*
4143 * If the TC count is 8,and the default high_water is 48,
4144 * the low_water is 16 as default.
4145 */
4146 hw->fc.high_water[i] = (pbsize * 3) / 4;
4147 hw->fc.low_water[i] = pbsize / 4;
4148 /* Enable pfc for this TC */
4149 tc = &dcb_config->tc_config[i];
4150 tc->pfc = ixgbe_dcb_pfc_enabled;
4151 }
4152 ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
4153 if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
4154 pfc_en &= 0x0F;
4155 ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
4156 }
4157
4158 return ret;
4159}
4160
4161/**
4162 * ixgbe_configure_dcb - Configure DCB Hardware
4163 * @dev: pointer to rte_eth_dev
4164 */
4165void ixgbe_configure_dcb(struct rte_eth_dev *dev)
4166{
4167 struct ixgbe_dcb_config *dcb_cfg =
4168 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
4169 struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
4170
4171 PMD_INIT_FUNC_TRACE();
4172
4173 /* check support mq_mode for DCB */
4174 if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
4175 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
4176 (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
4177 return;
4178
4179 if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
4180 return;
4181
4182 /** Configure DCB hardware **/
4183 ixgbe_dcb_hw_configure(dev, dcb_cfg);
4184}
4185
4186/*
4187 * VMDq only support for 10 GbE NIC.
4188 */
4189static void
4190ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
4191{
4192 struct rte_eth_vmdq_rx_conf *cfg;
4193 struct ixgbe_hw *hw;
4194 enum rte_eth_nb_pools num_pools;
4195 uint32_t mrqc, vt_ctl, vlanctrl;
4196 uint32_t vmolr = 0;
4197 int i;
4198
4199 PMD_INIT_FUNC_TRACE();
4200 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4201 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
4202 num_pools = cfg->nb_queue_pools;
4203
4204 ixgbe_rss_disable(dev);
4205
4206 /* MRQC: enable vmdq */
4207 mrqc = IXGBE_MRQC_VMDQEN;
4208 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4209
4210 /* PFVTCTL: turn on virtualisation and set the default pool */
4211 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
4212 if (cfg->enable_default_pool)
4213 vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
4214 else
4215 vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
4216
4217 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
4218
4219 for (i = 0; i < (int)num_pools; i++) {
4220 vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
4221 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
4222 }
4223
4224 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
4225 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4226 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
4227 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
4228
4229 /* VFTA - enable all vlan filters */
4230 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
4231 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), UINT32_MAX);
4232
4233 /* VFRE: pool enabling for receive - 64 */
4234 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), UINT32_MAX);
4235 if (num_pools == ETH_64_POOLS)
4236 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), UINT32_MAX);
4237
4238 /*
4239 * MPSAR - allow pools to read specific mac addresses
4240 * In this case, all pools should be able to read from mac addr 0
4241 */
4242 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), UINT32_MAX);
4243 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), UINT32_MAX);
4244
4245 /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
4246 for (i = 0; i < cfg->nb_pool_maps; i++) {
4247 /* set vlan id in VF register and set the valid bit */
4248 IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
4249 (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
4250 /*
4251 * Put the allowed pools in VFB reg. As we only have 16 or 64
4252 * pools, we only need to use the first half of the register
4253 * i.e. bits 0-31
4254 */
4255 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
4256 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
4257 (cfg->pool_map[i].pools & UINT32_MAX));
4258 else
4259 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
4260 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
4261
4262 }
4263
4264 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
4265 if (cfg->enable_loop_back) {
4266 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
4267 for (i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++)
4268 IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
4269 }
4270
4271 IXGBE_WRITE_FLUSH(hw);
4272}
4273
4274/*
4275 * ixgbe_dcb_config_tx_hw_config - Configure general VMDq TX parameters
4276 * @hw: pointer to hardware structure
4277 */
4278static void
4279ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
4280{
4281 uint32_t reg;
4282 uint32_t q;
4283
4284 PMD_INIT_FUNC_TRACE();
4285 /*PF VF Transmit Enable*/
4286 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), UINT32_MAX);
4287 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), UINT32_MAX);
4288
4289 /* Disable the Tx desc arbiter so that MTQC can be changed */
4290 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4291 reg |= IXGBE_RTTDCS_ARBDIS;
4292 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4293
4294 reg = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4295 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
4296
4297 /* Disable drop for all queues */
4298 for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
4299 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4300 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
4301
4302 /* Enable the Tx desc arbiter */
4303 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4304 reg &= ~IXGBE_RTTDCS_ARBDIS;
4305 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
4306
4307 IXGBE_WRITE_FLUSH(hw);
4308}
4309
4310static int __attribute__((cold))
4311ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
4312{
4313 struct ixgbe_rx_entry *rxe = rxq->sw_ring;
4314 uint64_t dma_addr;
4315 unsigned int i;
4316
4317 /* Initialize software ring entries */
4318 for (i = 0; i < rxq->nb_rx_desc; i++) {
4319 volatile union ixgbe_adv_rx_desc *rxd;
4320 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
4321
4322 if (mbuf == NULL) {
4323 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
4324 (unsigned) rxq->queue_id);
4325 return -ENOMEM;
4326 }
4327
7c673cae 4328 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
7c673cae
FG
4329 mbuf->port = rxq->port_id;
4330
4331 dma_addr =
11fdf7f2 4332 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
7c673cae
FG
4333 rxd = &rxq->rx_ring[i];
4334 rxd->read.hdr_addr = 0;
4335 rxd->read.pkt_addr = dma_addr;
4336 rxe[i].mbuf = mbuf;
4337 }
4338
4339 return 0;
4340}
4341
4342static int
4343ixgbe_config_vf_rss(struct rte_eth_dev *dev)
4344{
4345 struct ixgbe_hw *hw;
4346 uint32_t mrqc;
4347
4348 ixgbe_rss_configure(dev);
4349
4350 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4351
4352 /* MRQC: enable VF RSS */
4353 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
4354 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
4355 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4356 case ETH_64_POOLS:
4357 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
4358 break;
4359
4360 case ETH_32_POOLS:
4361 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
4362 break;
4363
4364 default:
4365 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
4366 return -EINVAL;
4367 }
4368
4369 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4370
4371 return 0;
4372}
4373
4374static int
4375ixgbe_config_vf_default(struct rte_eth_dev *dev)
4376{
4377 struct ixgbe_hw *hw =
4378 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4379
4380 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4381 case ETH_64_POOLS:
4382 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4383 IXGBE_MRQC_VMDQEN);
4384 break;
4385
4386 case ETH_32_POOLS:
4387 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4388 IXGBE_MRQC_VMDQRT4TCEN);
4389 break;
4390
4391 case ETH_16_POOLS:
4392 IXGBE_WRITE_REG(hw, IXGBE_MRQC,
4393 IXGBE_MRQC_VMDQRT8TCEN);
4394 break;
4395 default:
4396 PMD_INIT_LOG(ERR,
4397 "invalid pool number in IOV mode");
4398 break;
4399 }
4400 return 0;
4401}
4402
4403static int
4404ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
4405{
4406 struct ixgbe_hw *hw =
4407 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4408
4409 if (hw->mac.type == ixgbe_mac_82598EB)
4410 return 0;
4411
4412 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4413 /*
4414 * SRIOV inactive scheme
4415 * any DCB/RSS w/o VMDq multi-queue setting
4416 */
4417 switch (dev->data->dev_conf.rxmode.mq_mode) {
4418 case ETH_MQ_RX_RSS:
4419 case ETH_MQ_RX_DCB_RSS:
4420 case ETH_MQ_RX_VMDQ_RSS:
4421 ixgbe_rss_configure(dev);
4422 break;
4423
4424 case ETH_MQ_RX_VMDQ_DCB:
4425 ixgbe_vmdq_dcb_configure(dev);
4426 break;
4427
4428 case ETH_MQ_RX_VMDQ_ONLY:
4429 ixgbe_vmdq_rx_hw_configure(dev);
4430 break;
4431
4432 case ETH_MQ_RX_NONE:
4433 default:
4434 /* if mq_mode is none, disable rss mode.*/
4435 ixgbe_rss_disable(dev);
4436 break;
4437 }
4438 } else {
11fdf7f2
TL
4439 /* SRIOV active scheme
4440 * Support RSS together with SRIOV.
7c673cae
FG
4441 */
4442 switch (dev->data->dev_conf.rxmode.mq_mode) {
4443 case ETH_MQ_RX_RSS:
4444 case ETH_MQ_RX_VMDQ_RSS:
4445 ixgbe_config_vf_rss(dev);
4446 break;
4447 case ETH_MQ_RX_VMDQ_DCB:
11fdf7f2
TL
4448 case ETH_MQ_RX_DCB:
4449 /* In SRIOV, the configuration is the same as VMDq case */
7c673cae
FG
4450 ixgbe_vmdq_dcb_configure(dev);
4451 break;
11fdf7f2 4452 /* DCB/RSS together with SRIOV is not supported */
7c673cae 4453 case ETH_MQ_RX_VMDQ_DCB_RSS:
11fdf7f2 4454 case ETH_MQ_RX_DCB_RSS:
7c673cae
FG
4455 PMD_INIT_LOG(ERR,
4456 "Could not support DCB/RSS with VMDq & SRIOV");
4457 return -1;
4458 default:
4459 ixgbe_config_vf_default(dev);
4460 break;
4461 }
4462 }
4463
4464 return 0;
4465}
4466
4467static int
4468ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4469{
4470 struct ixgbe_hw *hw =
4471 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4472 uint32_t mtqc;
4473 uint32_t rttdcs;
4474
4475 if (hw->mac.type == ixgbe_mac_82598EB)
4476 return 0;
4477
4478 /* disable arbiter before setting MTQC */
4479 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
4480 rttdcs |= IXGBE_RTTDCS_ARBDIS;
4481 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4482
4483 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4484 /*
4485 * SRIOV inactive scheme
4486 * any DCB w/o VMDq multi-queue setting
4487 */
4488 if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
4489 ixgbe_vmdq_tx_hw_configure(hw);
4490 else {
4491 mtqc = IXGBE_MTQC_64Q_1PB;
4492 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4493 }
4494 } else {
4495 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4496
4497 /*
4498 * SRIOV active scheme
4499 * FIXME if support DCB together with VMDq & SRIOV
4500 */
4501 case ETH_64_POOLS:
4502 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF;
4503 break;
4504 case ETH_32_POOLS:
4505 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_32VF;
4506 break;
4507 case ETH_16_POOLS:
4508 mtqc = IXGBE_MTQC_VT_ENA | IXGBE_MTQC_RT_ENA |
4509 IXGBE_MTQC_8TC_8TQ;
4510 break;
4511 default:
4512 mtqc = IXGBE_MTQC_64Q_1PB;
4513 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4514 }
4515 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
4516 }
4517
4518 /* re-enable arbiter */
4519 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
4520 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
4521
4522 return 0;
4523}
4524
4525/**
4526 * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
4527 *
4528 * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
4529 * spec rev. 3.0 chapter 8.2.3.8.13.
4530 *
4531 * @pool Memory pool of the Rx queue
4532 */
4533static inline uint32_t
4534ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4535{
4536 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4537
4538 /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
4539 uint16_t maxdesc =
4540 IPV4_MAX_PKT_LEN /
4541 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4542
4543 if (maxdesc >= 16)
4544 return IXGBE_RSCCTL_MAXDESC_16;
4545 else if (maxdesc >= 8)
4546 return IXGBE_RSCCTL_MAXDESC_8;
4547 else if (maxdesc >= 4)
4548 return IXGBE_RSCCTL_MAXDESC_4;
4549 else
4550 return IXGBE_RSCCTL_MAXDESC_1;
4551}
4552
4553/**
4554 * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
4555 * interrupt
4556 *
4557 * (Taken from FreeBSD tree)
4558 * (yes this is all very magic and confusing :)
4559 *
4560 * @dev port handle
4561 * @entry the register array entry
4562 * @vector the MSIX vector for this queue
4563 * @type RX/TX/MISC
4564 */
4565static void
4566ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
4567{
4568 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4569 u32 ivar, index;
4570
4571 vector |= IXGBE_IVAR_ALLOC_VAL;
4572
4573 switch (hw->mac.type) {
4574
4575 case ixgbe_mac_82598EB:
4576 if (type == -1)
4577 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4578 else
4579 entry += (type * 64);
4580 index = (entry >> 2) & 0x1F;
4581 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4582 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4583 ivar |= (vector << (8 * (entry & 0x3)));
4584 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4585 break;
4586
4587 case ixgbe_mac_82599EB:
4588 case ixgbe_mac_X540:
4589 if (type == -1) { /* MISC IVAR */
4590 index = (entry & 1) * 8;
4591 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4592 ivar &= ~(0xFF << index);
4593 ivar |= (vector << index);
4594 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4595 } else { /* RX/TX IVARS */
4596 index = (16 * (entry & 1)) + (8 * type);
4597 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4598 ivar &= ~(0xFF << index);
4599 ivar |= (vector << index);
4600 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4601 }
4602
4603 break;
4604
4605 default:
4606 break;
4607 }
4608}
4609
4610void __attribute__((cold))
4611ixgbe_set_rx_function(struct rte_eth_dev *dev)
4612{
4613 uint16_t i, rx_using_sse;
4614 struct ixgbe_adapter *adapter =
4615 (struct ixgbe_adapter *)dev->data->dev_private;
4616
4617 /*
4618 * In order to allow Vector Rx there are a few configuration
4619 * conditions to be met and Rx Bulk Allocation should be allowed.
4620 */
4621 if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
4622 !adapter->rx_bulk_alloc_allowed) {
4623 PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
4624 "preconditions or RTE_IXGBE_INC_VECTOR is "
4625 "not enabled",
4626 dev->data->port_id);
4627
4628 adapter->rx_vec_allowed = false;
4629 }
4630
4631 /*
4632 * Initialize the appropriate LRO callback.
4633 *
4634 * If all queues satisfy the bulk allocation preconditions
4635 * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
4636 * Otherwise use a single allocation version.
4637 */
4638 if (dev->data->lro) {
4639 if (adapter->rx_bulk_alloc_allowed) {
4640 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4641 "allocation version");
4642 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4643 } else {
4644 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4645 "allocation version");
4646 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4647 }
4648 } else if (dev->data->scattered_rx) {
4649 /*
4650 * Set the non-LRO scattered callback: there are Vector and
4651 * single allocation versions.
4652 */
4653 if (adapter->rx_vec_allowed) {
4654 PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
4655 "callback (port=%d).",
4656 dev->data->port_id);
4657
4658 dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
4659 } else if (adapter->rx_bulk_alloc_allowed) {
4660 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4661 "allocation callback (port=%d).",
4662 dev->data->port_id);
4663 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
4664 } else {
4665 PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
4666 "single allocation) "
4667 "Scattered Rx callback "
4668 "(port=%d).",
4669 dev->data->port_id);
4670
4671 dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
4672 }
4673 /*
4674 * Below we set "simple" callbacks according to port/queues parameters.
4675 * If parameters allow we are going to choose between the following
4676 * callbacks:
4677 * - Vector
4678 * - Bulk Allocation
4679 * - Single buffer allocation (the simplest one)
4680 */
4681 } else if (adapter->rx_vec_allowed) {
4682 PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
4683 "burst size no less than %d (port=%d).",
4684 RTE_IXGBE_DESCS_PER_LOOP,
4685 dev->data->port_id);
4686
4687 dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
4688 } else if (adapter->rx_bulk_alloc_allowed) {
4689 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4690 "satisfied. Rx Burst Bulk Alloc function "
4691 "will be used on port=%d.",
4692 dev->data->port_id);
4693
4694 dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
4695 } else {
4696 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4697 "satisfied, or Scattered Rx is requested "
4698 "(port=%d).",
4699 dev->data->port_id);
4700
4701 dev->rx_pkt_burst = ixgbe_recv_pkts;
4702 }
4703
4704 /* Propagate information about RX function choice through all queues. */
4705
4706 rx_using_sse =
4707 (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
4708 dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
4709
4710 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4711 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4712
4713 rxq->rx_using_sse = rx_using_sse;
11fdf7f2
TL
4714#ifdef RTE_LIBRTE_SECURITY
4715 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4716 DEV_RX_OFFLOAD_SECURITY);
4717#endif
7c673cae
FG
4718 }
4719}
4720
4721/**
4722 * ixgbe_set_rsc - configure RSC related port HW registers
4723 *
4724 * Configures the port's RSC related registers according to the 4.6.7.2 chapter
4725 * of 82599 Spec (x540 configuration is virtually the same).
4726 *
4727 * @dev port handle
4728 *
4729 * Returns 0 in case of success or a non-zero error code
4730 */
4731static int
4732ixgbe_set_rsc(struct rte_eth_dev *dev)
4733{
4734 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4735 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4736 struct rte_eth_dev_info dev_info = { 0 };
4737 bool rsc_capable = false;
4738 uint16_t i;
4739 uint32_t rdrxctl;
11fdf7f2 4740 uint32_t rfctl;
7c673cae
FG
4741
4742 /* Sanity check */
4743 dev->dev_ops->dev_infos_get(dev, &dev_info);
4744 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
4745 rsc_capable = true;
4746
11fdf7f2 4747 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
7c673cae
FG
4748 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4749 "support it");
4750 return -EINVAL;
4751 }
4752
4753 /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
4754
9f95a23c 4755 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
11fdf7f2 4756 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
7c673cae
FG
4757 /*
4758 * According to chapter of 4.6.7.2.1 of the Spec Rev.
4759 * 3.0 RSC configuration requires HW CRC stripping being
4760 * enabled. If user requested both HW CRC stripping off
4761 * and RSC on - return an error.
4762 */
4763 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4764 "is disabled");
4765 return -EINVAL;
4766 }
4767
4768 /* RFCTL configuration */
11fdf7f2
TL
4769 rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
4770 if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
4771 /*
4772 * Since NFS packets coalescing is not supported - clear
4773 * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
4774 * enabled.
4775 */
4776 rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
4777 IXGBE_RFCTL_NFSR_DIS);
4778 else
4779 rfctl |= IXGBE_RFCTL_RSC_DIS;
4780 IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
7c673cae
FG
4781
4782 /* If LRO hasn't been requested - we are done here. */
11fdf7f2 4783 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
7c673cae
FG
4784 return 0;
4785
4786 /* Set RDRXCTL.RSCACKC bit */
4787 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
4788 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
4789 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
4790
4791 /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
4792 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4793 struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
4794 uint32_t srrctl =
4795 IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
4796 uint32_t rscctl =
4797 IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
4798 uint32_t psrtype =
4799 IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
4800 uint32_t eitr =
4801 IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
4802
4803 /*
4804 * ixgbe PMD doesn't support header-split at the moment.
4805 *
4806 * Following the 4.6.7.2.1 chapter of the 82599/x540
4807 * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
4808 * should be configured even if header split is not
4809 * enabled. We will configure it 128 bytes following the
4810 * recommendation in the spec.
4811 */
4812 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4813 srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
4814 IXGBE_SRRCTL_BSIZEHDR_MASK;
4815
4816 /*
4817 * TODO: Consider setting the Receive Descriptor Minimum
4818 * Threshold Size for an RSC case. This is not an obviously
4819 * beneficiary option but the one worth considering...
4820 */
4821
4822 rscctl |= IXGBE_RSCCTL_RSCEN;
4823 rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
4824 psrtype |= IXGBE_PSRTYPE_TCPHDR;
4825
4826 /*
4827 * RSC: Set ITR interval corresponding to 2K ints/s.
4828 *
4829 * Full-sized RSC aggregations for a 10Gb/s link will
4830 * arrive at about 20K aggregation/s rate.
4831 *
4832 * 2K inst/s rate will make only 10% of the
4833 * aggregations to be closed due to the interrupt timer
4834 * expiration for a streaming at wire-speed case.
4835 *
4836 * For a sparse streaming case this setting will yield
4837 * at most 500us latency for a single RSC aggregation.
4838 */
4839 eitr &= ~IXGBE_EITR_ITR_INT_MASK;
11fdf7f2
TL
4840 eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4841 eitr |= IXGBE_EITR_CNT_WDIS;
7c673cae
FG
4842
4843 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4844 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
4845 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
4846 IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
4847
4848 /*
4849 * RSC requires the mapping of the queue to the
4850 * interrupt vector.
4851 */
4852 ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
4853 }
4854
4855 dev->data->lro = 1;
4856
4857 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4858
4859 return 0;
4860}
4861
4862/*
4863 * Initializes Receive Unit.
4864 */
4865int __attribute__((cold))
4866ixgbe_dev_rx_init(struct rte_eth_dev *dev)
4867{
4868 struct ixgbe_hw *hw;
4869 struct ixgbe_rx_queue *rxq;
4870 uint64_t bus_addr;
4871 uint32_t rxctrl;
4872 uint32_t fctrl;
4873 uint32_t hlreg0;
4874 uint32_t maxfrs;
4875 uint32_t srrctl;
4876 uint32_t rdrxctl;
4877 uint32_t rxcsum;
4878 uint16_t buf_size;
4879 uint16_t i;
4880 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4881 int rc;
4882
4883 PMD_INIT_FUNC_TRACE();
4884 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4885
4886 /*
4887 * Make sure receives are disabled while setting
4888 * up the RX context (registers, descriptor rings, etc.).
4889 */
4890 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4891 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4892
4893 /* Enable receipt of broadcasted frames */
4894 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4895 fctrl |= IXGBE_FCTRL_BAM;
4896 fctrl |= IXGBE_FCTRL_DPF;
4897 fctrl |= IXGBE_FCTRL_PMCF;
4898 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4899
4900 /*
4901 * Configure CRC stripping, if any.
4902 */
4903 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
9f95a23c 4904 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
7c673cae 4905 hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
11fdf7f2
TL
4906 else
4907 hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
7c673cae
FG
4908
4909 /*
4910 * Configure jumbo frame support, if any.
4911 */
11fdf7f2 4912 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
7c673cae
FG
4913 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
4914 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
4915 maxfrs &= 0x0000FFFF;
4916 maxfrs |= (rx_conf->max_rx_pkt_len << 16);
4917 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
4918 } else
4919 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
4920
4921 /*
9f95a23c 4922 * If loopback mode is configured, set LPBK bit.
7c673cae 4923 */
9f95a23c
TL
4924 if (dev->data->dev_conf.lpbk_mode != 0) {
4925 rc = ixgbe_check_supported_loopback_mode(dev);
4926 if (rc < 0) {
4927 PMD_INIT_LOG(ERR, "Unsupported loopback mode");
4928 return rc;
4929 }
7c673cae 4930 hlreg0 |= IXGBE_HLREG0_LPBK;
9f95a23c 4931 } else {
7c673cae 4932 hlreg0 &= ~IXGBE_HLREG0_LPBK;
9f95a23c 4933 }
7c673cae
FG
4934
4935 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4936
11fdf7f2
TL
4937 /*
4938 * Assume no header split and no VLAN strip support
4939 * on any Rx queue first .
4940 */
4941 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
7c673cae
FG
4942 /* Setup RX queues */
4943 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4944 rxq = dev->data->rx_queues[i];
4945
4946 /*
4947 * Reset crc_len in case it was changed after queue setup by a
4948 * call to configure.
4949 */
9f95a23c
TL
4950 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
4951 rxq->crc_len = ETHER_CRC_LEN;
4952 else
4953 rxq->crc_len = 0;
7c673cae
FG
4954
4955 /* Setup the Base and Length of the Rx Descriptor Rings */
4956 bus_addr = rxq->rx_ring_phys_addr;
4957 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rxq->reg_idx),
4958 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
4959 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rxq->reg_idx),
4960 (uint32_t)(bus_addr >> 32));
4961 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rxq->reg_idx),
4962 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4963 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
4964 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
4965
4966 /* Configure the SRRCTL register */
11fdf7f2 4967 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
7c673cae
FG
4968
4969 /* Set if packets are dropped when no descriptors available */
4970 if (rxq->drop_en)
4971 srrctl |= IXGBE_SRRCTL_DROP_EN;
4972
4973 /*
4974 * Configure the RX buffer size in the BSIZEPACKET field of
4975 * the SRRCTL register of the queue.
4976 * The value is in 1 KB resolution. Valid values can be from
4977 * 1 KB to 16 KB.
4978 */
4979 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4980 RTE_PKTMBUF_HEADROOM);
4981 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
4982 IXGBE_SRRCTL_BSIZEPKT_MASK);
4983
4984 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
4985
4986 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
4987 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
4988
4989 /* It adds dual VLAN length for supporting dual VLAN */
4990 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
4991 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
4992 dev->data->scattered_rx = 1;
11fdf7f2
TL
4993 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
4994 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
7c673cae
FG
4995 }
4996
11fdf7f2 4997 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
7c673cae
FG
4998 dev->data->scattered_rx = 1;
4999
5000 /*
5001 * Device configured with multiple RX queues.
5002 */
5003 ixgbe_dev_mq_rx_configure(dev);
5004
5005 /*
5006 * Setup the Checksum Register.
5007 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
5008 * Enable IP/L4 checkum computation by hardware if requested to do so.
5009 */
5010 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
5011 rxcsum |= IXGBE_RXCSUM_PCSD;
11fdf7f2 5012 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
7c673cae
FG
5013 rxcsum |= IXGBE_RXCSUM_IPPCSE;
5014 else
5015 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
5016
5017 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
5018
5019 if (hw->mac.type == ixgbe_mac_82599EB ||
5020 hw->mac.type == ixgbe_mac_X540) {
5021 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
9f95a23c 5022 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
7c673cae 5023 rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
11fdf7f2
TL
5024 else
5025 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
7c673cae
FG
5026 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
5027 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
5028 }
5029
5030 rc = ixgbe_set_rsc(dev);
5031 if (rc)
5032 return rc;
5033
5034 ixgbe_set_rx_function(dev);
5035
5036 return 0;
5037}
5038
5039/*
5040 * Initializes Transmit Unit.
5041 */
5042void __attribute__((cold))
5043ixgbe_dev_tx_init(struct rte_eth_dev *dev)
5044{
5045 struct ixgbe_hw *hw;
5046 struct ixgbe_tx_queue *txq;
5047 uint64_t bus_addr;
5048 uint32_t hlreg0;
5049 uint32_t txctrl;
5050 uint16_t i;
5051
5052 PMD_INIT_FUNC_TRACE();
5053 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5054
5055 /* Enable TX CRC (checksum offload requirement) and hw padding
5056 * (TSO requirement)
5057 */
5058 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
5059 hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
5060 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
5061
5062 /* Setup the Base and Length of the Tx Descriptor Rings */
5063 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5064 txq = dev->data->tx_queues[i];
5065
5066 bus_addr = txq->tx_ring_phys_addr;
5067 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
5068 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5069 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx),
5070 (uint32_t)(bus_addr >> 32));
5071 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(txq->reg_idx),
5072 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5073 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5074 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
5075 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5076
5077 /*
5078 * Disable Tx Head Writeback RO bit, since this hoses
5079 * bookkeeping if things aren't delivered in order.
5080 */
5081 switch (hw->mac.type) {
5082 case ixgbe_mac_82598EB:
5083 txctrl = IXGBE_READ_REG(hw,
5084 IXGBE_DCA_TXCTRL(txq->reg_idx));
5085 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5086 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
5087 txctrl);
5088 break;
5089
5090 case ixgbe_mac_82599EB:
5091 case ixgbe_mac_X540:
5092 case ixgbe_mac_X550:
5093 case ixgbe_mac_X550EM_x:
5094 case ixgbe_mac_X550EM_a:
5095 default:
5096 txctrl = IXGBE_READ_REG(hw,
5097 IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
5098 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5099 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
5100 txctrl);
5101 break;
5102 }
5103 }
5104
5105 /* Device configured with multiple TX queues. */
5106 ixgbe_dev_mq_tx_configure(dev);
5107}
5108
9f95a23c
TL
5109/*
5110 * Check if requested loopback mode is supported
5111 */
5112int
5113ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev)
5114{
5115 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5116
5117 if (dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_TX_RX)
5118 if (hw->mac.type == ixgbe_mac_82599EB ||
5119 hw->mac.type == ixgbe_mac_X540 ||
5120 hw->mac.type == ixgbe_mac_X550 ||
5121 hw->mac.type == ixgbe_mac_X550EM_x ||
5122 hw->mac.type == ixgbe_mac_X550EM_a)
5123 return 0;
5124
5125 return -ENOTSUP;
5126}
5127
7c673cae
FG
5128/*
5129 * Set up link for 82599 loopback mode Tx->Rx.
5130 */
5131static inline void __attribute__((cold))
5132ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
5133{
5134 PMD_INIT_FUNC_TRACE();
5135
5136 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
5137 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
5138 IXGBE_SUCCESS) {
5139 PMD_INIT_LOG(ERR, "Could not enable loopback mode");
5140 /* ignore error */
5141 return;
5142 }
5143 }
5144
5145 /* Restart link */
5146 IXGBE_WRITE_REG(hw,
5147 IXGBE_AUTOC,
5148 IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU);
5149 ixgbe_reset_pipeline_82599(hw);
5150
5151 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
5152 msec_delay(50);
5153}
5154
5155
5156/*
5157 * Start Transmit and Receive Units.
5158 */
5159int __attribute__((cold))
5160ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
5161{
5162 struct ixgbe_hw *hw;
5163 struct ixgbe_tx_queue *txq;
5164 struct ixgbe_rx_queue *rxq;
5165 uint32_t txdctl;
5166 uint32_t dmatxctl;
5167 uint32_t rxctrl;
5168 uint16_t i;
5169 int ret = 0;
5170
5171 PMD_INIT_FUNC_TRACE();
5172 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5173
5174 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5175 txq = dev->data->tx_queues[i];
5176 /* Setup Transmit Threshold Registers */
5177 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5178 txdctl |= txq->pthresh & 0x7F;
5179 txdctl |= ((txq->hthresh & 0x7F) << 8);
5180 txdctl |= ((txq->wthresh & 0x7F) << 16);
5181 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5182 }
5183
5184 if (hw->mac.type != ixgbe_mac_82598EB) {
5185 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
5186 dmatxctl |= IXGBE_DMATXCTL_TE;
5187 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
5188 }
5189
5190 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5191 txq = dev->data->tx_queues[i];
5192 if (!txq->tx_deferred_start) {
5193 ret = ixgbe_dev_tx_queue_start(dev, i);
5194 if (ret < 0)
5195 return ret;
5196 }
5197 }
5198
5199 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5200 rxq = dev->data->rx_queues[i];
5201 if (!rxq->rx_deferred_start) {
5202 ret = ixgbe_dev_rx_queue_start(dev, i);
5203 if (ret < 0)
5204 return ret;
5205 }
5206 }
5207
5208 /* Enable Receive engine */
5209 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5210 if (hw->mac.type == ixgbe_mac_82598EB)
5211 rxctrl |= IXGBE_RXCTRL_DMBYPS;
5212 rxctrl |= IXGBE_RXCTRL_RXEN;
5213 hw->mac.ops.enable_rx_dma(hw, rxctrl);
5214
9f95a23c
TL
5215 /* If loopback mode is enabled, set up the link accordingly */
5216 if (dev->data->dev_conf.lpbk_mode != 0) {
5217 if (hw->mac.type == ixgbe_mac_82599EB)
5218 ixgbe_setup_loopback_link_82599(hw);
5219 else if (hw->mac.type == ixgbe_mac_X540 ||
5220 hw->mac.type == ixgbe_mac_X550 ||
5221 hw->mac.type == ixgbe_mac_X550EM_x ||
5222 hw->mac.type == ixgbe_mac_X550EM_a)
5223 ixgbe_setup_loopback_link_x540_x550(hw, true);
5224 }
7c673cae 5225
11fdf7f2
TL
5226#ifdef RTE_LIBRTE_SECURITY
5227 if ((dev->data->dev_conf.rxmode.offloads &
5228 DEV_RX_OFFLOAD_SECURITY) ||
5229 (dev->data->dev_conf.txmode.offloads &
5230 DEV_TX_OFFLOAD_SECURITY)) {
5231 ret = ixgbe_crypto_enable_ipsec(dev);
5232 if (ret != 0) {
5233 PMD_DRV_LOG(ERR,
5234 "ixgbe_crypto_enable_ipsec fails with %d.",
5235 ret);
5236 return ret;
5237 }
5238 }
5239#endif
5240
7c673cae
FG
5241 return 0;
5242}
5243
5244/*
5245 * Start Receive Units for specified queue.
5246 */
5247int __attribute__((cold))
5248ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5249{
5250 struct ixgbe_hw *hw;
5251 struct ixgbe_rx_queue *rxq;
5252 uint32_t rxdctl;
5253 int poll_ms;
5254
5255 PMD_INIT_FUNC_TRACE();
5256 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5257
11fdf7f2 5258 rxq = dev->data->rx_queues[rx_queue_id];
7c673cae 5259
11fdf7f2
TL
5260 /* Allocate buffers for descriptor rings */
5261 if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
5262 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
5263 rx_queue_id);
7c673cae 5264 return -1;
11fdf7f2
TL
5265 }
5266 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5267 rxdctl |= IXGBE_RXDCTL_ENABLE;
5268 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
5269
5270 /* Wait until RX Enable ready */
5271 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5272 do {
5273 rte_delay_ms(1);
5274 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5275 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5276 if (!poll_ms)
5277 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
5278 rte_wmb();
5279 IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
5280 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
5281 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
7c673cae
FG
5282
5283 return 0;
5284}
5285
5286/*
5287 * Stop Receive Units for specified queue.
5288 */
5289int __attribute__((cold))
5290ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
5291{
5292 struct ixgbe_hw *hw;
5293 struct ixgbe_adapter *adapter =
5294 (struct ixgbe_adapter *)dev->data->dev_private;
5295 struct ixgbe_rx_queue *rxq;
5296 uint32_t rxdctl;
5297 int poll_ms;
5298
5299 PMD_INIT_FUNC_TRACE();
5300 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5301
11fdf7f2 5302 rxq = dev->data->rx_queues[rx_queue_id];
7c673cae 5303
11fdf7f2
TL
5304 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5305 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
5306 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
7c673cae 5307
11fdf7f2
TL
5308 /* Wait until RX Enable bit clear */
5309 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5310 do {
5311 rte_delay_ms(1);
5312 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
5313 } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
5314 if (!poll_ms)
5315 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
7c673cae 5316
11fdf7f2 5317 rte_delay_us(RTE_IXGBE_WAIT_100_US);
7c673cae 5318
11fdf7f2
TL
5319 ixgbe_rx_queue_release_mbufs(rxq);
5320 ixgbe_reset_rx_queue(adapter, rxq);
5321 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
7c673cae
FG
5322
5323 return 0;
5324}
5325
5326
5327/*
5328 * Start Transmit Units for specified queue.
5329 */
5330int __attribute__((cold))
5331ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5332{
5333 struct ixgbe_hw *hw;
5334 struct ixgbe_tx_queue *txq;
5335 uint32_t txdctl;
5336 int poll_ms;
5337
5338 PMD_INIT_FUNC_TRACE();
5339 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5340
11fdf7f2 5341 txq = dev->data->tx_queues[tx_queue_id];
9f95a23c 5342 IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
11fdf7f2
TL
5343 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5344 txdctl |= IXGBE_TXDCTL_ENABLE;
5345 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
7c673cae 5346
11fdf7f2
TL
5347 /* Wait until TX Enable ready */
5348 if (hw->mac.type == ixgbe_mac_82599EB) {
5349 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5350 do {
5351 rte_delay_ms(1);
5352 txdctl = IXGBE_READ_REG(hw,
5353 IXGBE_TXDCTL(txq->reg_idx));
5354 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5355 if (!poll_ms)
5356 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
5357 tx_queue_id);
5358 }
5359 rte_wmb();
11fdf7f2
TL
5360 IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
5361 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
7c673cae
FG
5362
5363 return 0;
5364}
5365
5366/*
5367 * Stop Transmit Units for specified queue.
5368 */
5369int __attribute__((cold))
5370ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
5371{
5372 struct ixgbe_hw *hw;
5373 struct ixgbe_tx_queue *txq;
5374 uint32_t txdctl;
5375 uint32_t txtdh, txtdt;
5376 int poll_ms;
5377
5378 PMD_INIT_FUNC_TRACE();
5379 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5380
7c673cae
FG
5381 txq = dev->data->tx_queues[tx_queue_id];
5382
5383 /* Wait until TX queue is empty */
5384 if (hw->mac.type == ixgbe_mac_82599EB) {
5385 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5386 do {
5387 rte_delay_us(RTE_IXGBE_WAIT_100_US);
5388 txtdh = IXGBE_READ_REG(hw,
5389 IXGBE_TDH(txq->reg_idx));
5390 txtdt = IXGBE_READ_REG(hw,
5391 IXGBE_TDT(txq->reg_idx));
5392 } while (--poll_ms && (txtdh != txtdt));
5393 if (!poll_ms)
11fdf7f2
TL
5394 PMD_INIT_LOG(ERR,
5395 "Tx Queue %d is not empty when stopping.",
5396 tx_queue_id);
7c673cae
FG
5397 }
5398
5399 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
5400 txdctl &= ~IXGBE_TXDCTL_ENABLE;
5401 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
5402
5403 /* Wait until TX Enable bit clear */
5404 if (hw->mac.type == ixgbe_mac_82599EB) {
5405 poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
5406 do {
5407 rte_delay_ms(1);
5408 txdctl = IXGBE_READ_REG(hw,
5409 IXGBE_TXDCTL(txq->reg_idx));
5410 } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
5411 if (!poll_ms)
11fdf7f2
TL
5412 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
5413 tx_queue_id);
7c673cae
FG
5414 }
5415
5416 if (txq->ops != NULL) {
5417 txq->ops->release_mbufs(txq);
5418 txq->ops->reset(txq);
5419 }
5420 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5421
5422 return 0;
5423}
5424
5425void
5426ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5427 struct rte_eth_rxq_info *qinfo)
5428{
5429 struct ixgbe_rx_queue *rxq;
5430
5431 rxq = dev->data->rx_queues[queue_id];
5432
5433 qinfo->mp = rxq->mb_pool;
5434 qinfo->scattered_rx = dev->data->scattered_rx;
5435 qinfo->nb_desc = rxq->nb_rx_desc;
5436
5437 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
5438 qinfo->conf.rx_drop_en = rxq->drop_en;
5439 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
11fdf7f2 5440 qinfo->conf.offloads = rxq->offloads;
7c673cae
FG
5441}
5442
5443void
5444ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
5445 struct rte_eth_txq_info *qinfo)
5446{
5447 struct ixgbe_tx_queue *txq;
5448
5449 txq = dev->data->tx_queues[queue_id];
5450
5451 qinfo->nb_desc = txq->nb_tx_desc;
5452
5453 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
5454 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
5455 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
5456
5457 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
5458 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
11fdf7f2 5459 qinfo->conf.offloads = txq->offloads;
7c673cae
FG
5460 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
5461}
5462
5463/*
5464 * [VF] Initializes Receive Unit.
5465 */
5466int __attribute__((cold))
5467ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
5468{
5469 struct ixgbe_hw *hw;
5470 struct ixgbe_rx_queue *rxq;
11fdf7f2 5471 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
7c673cae
FG
5472 uint64_t bus_addr;
5473 uint32_t srrctl, psrtype = 0;
5474 uint16_t buf_size;
5475 uint16_t i;
5476 int ret;
5477
5478 PMD_INIT_FUNC_TRACE();
5479 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5480
5481 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
5482 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5483 "it should be power of 2");
5484 return -1;
5485 }
5486
5487 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
5488 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
5489 "it should be equal to or less than %d",
5490 hw->mac.max_rx_queues);
5491 return -1;
5492 }
5493
5494 /*
5495 * When the VF driver issues a IXGBE_VF_RESET request, the PF driver
5496 * disables the VF receipt of packets if the PF MTU is > 1500.
5497 * This is done to deal with 82599 limitations that imposes
5498 * the PF and all VFs to share the same MTU.
5499 * Then, the PF driver enables again the VF receipt of packet when
5500 * the VF driver issues a IXGBE_VF_SET_LPE request.
5501 * In the meantime, the VF device cannot be used, even if the VF driver
5502 * and the Guest VM network stack are ready to accept packets with a
5503 * size up to the PF MTU.
5504 * As a work-around to this PF behaviour, force the call to
5505 * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
5506 * VF packets received can work in all cases.
5507 */
5508 ixgbevf_rlpml_set_vf(hw,
5509 (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
5510
11fdf7f2
TL
5511 /*
5512 * Assume no header split and no VLAN strip support
5513 * on any Rx queue first .
5514 */
5515 rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
7c673cae
FG
5516 /* Setup RX queues */
5517 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5518 rxq = dev->data->rx_queues[i];
5519
5520 /* Allocate buffers for descriptor rings */
5521 ret = ixgbe_alloc_rx_queue_mbufs(rxq);
5522 if (ret)
5523 return ret;
5524
5525 /* Setup the Base and Length of the Rx Descriptor Rings */
5526 bus_addr = rxq->rx_ring_phys_addr;
5527
5528 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
5529 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5530 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
5531 (uint32_t)(bus_addr >> 32));
5532 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
5533 rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
5534 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
5535 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
5536
5537
5538 /* Configure the SRRCTL register */
11fdf7f2 5539 srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
7c673cae
FG
5540
5541 /* Set if packets are dropped when no descriptors available */
5542 if (rxq->drop_en)
5543 srrctl |= IXGBE_SRRCTL_DROP_EN;
5544
5545 /*
5546 * Configure the RX buffer size in the BSIZEPACKET field of
5547 * the SRRCTL register of the queue.
5548 * The value is in 1 KB resolution. Valid values can be from
5549 * 1 KB to 16 KB.
5550 */
5551 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
5552 RTE_PKTMBUF_HEADROOM);
5553 srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
5554 IXGBE_SRRCTL_BSIZEPKT_MASK);
5555
5556 /*
5557 * VF modification to write virtual function SRRCTL register
5558 */
5559 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
5560
5561 buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
5562 IXGBE_SRRCTL_BSIZEPKT_SHIFT);
5563
11fdf7f2 5564 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
7c673cae 5565 /* It adds dual VLAN length for supporting dual VLAN */
11fdf7f2 5566 (rxmode->max_rx_pkt_len +
7c673cae
FG
5567 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
5568 if (!dev->data->scattered_rx)
5569 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
5570 dev->data->scattered_rx = 1;
5571 }
7c673cae 5572
11fdf7f2
TL
5573 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
5574 rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
5575 }
7c673cae
FG
5576
5577 /* Set RQPL for VF RSS according to max Rx queue */
5578 psrtype |= (dev->data->nb_rx_queues >> 1) <<
5579 IXGBE_PSRTYPE_RQPL_SHIFT;
5580 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
5581
5582 ixgbe_set_rx_function(dev);
5583
5584 return 0;
5585}
5586
5587/*
5588 * [VF] Initializes Transmit Unit.
5589 */
5590void __attribute__((cold))
5591ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
5592{
5593 struct ixgbe_hw *hw;
5594 struct ixgbe_tx_queue *txq;
5595 uint64_t bus_addr;
5596 uint32_t txctrl;
5597 uint16_t i;
5598
5599 PMD_INIT_FUNC_TRACE();
5600 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5601
5602 /* Setup the Base and Length of the Tx Descriptor Rings */
5603 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5604 txq = dev->data->tx_queues[i];
5605 bus_addr = txq->tx_ring_phys_addr;
5606 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
5607 (uint32_t)(bus_addr & 0x00000000ffffffffULL));
5608 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
5609 (uint32_t)(bus_addr >> 32));
5610 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
5611 txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
5612 /* Setup the HW Tx Head and TX Tail descriptor pointers */
5613 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
5614 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
5615
5616 /*
5617 * Disable Tx Head Writeback RO bit, since this hoses
5618 * bookkeeping if things aren't delivered in order.
5619 */
5620 txctrl = IXGBE_READ_REG(hw,
5621 IXGBE_VFDCA_TXCTRL(i));
5622 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
5623 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
5624 txctrl);
5625 }
5626}
5627
5628/*
5629 * [VF] Start Transmit and Receive Units.
5630 */
5631void __attribute__((cold))
5632ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
5633{
5634 struct ixgbe_hw *hw;
5635 struct ixgbe_tx_queue *txq;
5636 struct ixgbe_rx_queue *rxq;
5637 uint32_t txdctl;
5638 uint32_t rxdctl;
5639 uint16_t i;
5640 int poll_ms;
5641
5642 PMD_INIT_FUNC_TRACE();
5643 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5644
5645 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5646 txq = dev->data->tx_queues[i];
5647 /* Setup Transmit Threshold Registers */
5648 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5649 txdctl |= txq->pthresh & 0x7F;
5650 txdctl |= ((txq->hthresh & 0x7F) << 8);
5651 txdctl |= ((txq->wthresh & 0x7F) << 16);
5652 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5653 }
5654
5655 for (i = 0; i < dev->data->nb_tx_queues; i++) {
5656
5657 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5658 txdctl |= IXGBE_TXDCTL_ENABLE;
5659 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
5660
5661 poll_ms = 10;
5662 /* Wait until TX Enable ready */
5663 do {
5664 rte_delay_ms(1);
5665 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
5666 } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
5667 if (!poll_ms)
5668 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
5669 }
5670 for (i = 0; i < dev->data->nb_rx_queues; i++) {
5671
5672 rxq = dev->data->rx_queues[i];
5673
5674 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5675 rxdctl |= IXGBE_RXDCTL_ENABLE;
5676 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
5677
5678 /* Wait until RX Enable ready */
5679 poll_ms = 10;
5680 do {
5681 rte_delay_ms(1);
5682 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
5683 } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
5684 if (!poll_ms)
5685 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5686 rte_wmb();
5687 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
5688
5689 }
5690}
5691
11fdf7f2
TL
5692int
5693ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
5694 const struct rte_flow_action_rss *in)
5695{
5696 if (in->key_len > RTE_DIM(out->key) ||
5697 in->queue_num > RTE_DIM(out->queue))
5698 return -EINVAL;
5699 out->conf = (struct rte_flow_action_rss){
5700 .func = in->func,
5701 .level = in->level,
5702 .types = in->types,
5703 .key_len = in->key_len,
5704 .queue_num = in->queue_num,
5705 .key = memcpy(out->key, in->key, in->key_len),
5706 .queue = memcpy(out->queue, in->queue,
5707 sizeof(*in->queue) * in->queue_num),
5708 };
5709 return 0;
5710}
5711
5712int
5713ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5714 const struct rte_flow_action_rss *with)
5715{
5716 return (comp->func == with->func &&
5717 comp->level == with->level &&
5718 comp->types == with->types &&
5719 comp->key_len == with->key_len &&
5720 comp->queue_num == with->queue_num &&
5721 !memcmp(comp->key, with->key, with->key_len) &&
5722 !memcmp(comp->queue, with->queue,
5723 sizeof(*with->queue) * with->queue_num));
5724}
5725
5726int
5727ixgbe_config_rss_filter(struct rte_eth_dev *dev,
5728 struct ixgbe_rte_flow_rss_conf *conf, bool add)
5729{
5730 struct ixgbe_hw *hw;
5731 uint32_t reta;
5732 uint16_t i;
5733 uint16_t j;
5734 uint16_t sp_reta_size;
5735 uint32_t reta_reg;
5736 struct rte_eth_rss_conf rss_conf = {
5737 .rss_key = conf->conf.key_len ?
5738 (void *)(uintptr_t)conf->conf.key : NULL,
5739 .rss_key_len = conf->conf.key_len,
5740 .rss_hf = conf->conf.types,
5741 };
5742 struct ixgbe_filter_info *filter_info =
5743 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5744
5745 PMD_INIT_FUNC_TRACE();
5746 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5747
5748 sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
5749
5750 if (!add) {
5751 if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
5752 &conf->conf)) {
5753 ixgbe_rss_disable(dev);
5754 memset(&filter_info->rss_info, 0,
5755 sizeof(struct ixgbe_rte_flow_rss_conf));
5756 return 0;
5757 }
5758 return -EINVAL;
5759 }
5760
5761 if (filter_info->rss_info.conf.queue_num)
5762 return -EINVAL;
5763 /* Fill in redirection table
5764 * The byte-swap is needed because NIC registers are in
5765 * little-endian order.
5766 */
5767 reta = 0;
5768 for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
5769 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
5770
5771 if (j == conf->conf.queue_num)
5772 j = 0;
5773 reta = (reta << 8) | conf->conf.queue[j];
5774 if ((i & 3) == 3)
5775 IXGBE_WRITE_REG(hw, reta_reg,
5776 rte_bswap32(reta));
5777 }
5778
5779 /* Configure the RSS key and the RSS protocols used to compute
5780 * the RSS hash of input packets.
5781 */
5782 if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
5783 ixgbe_rss_disable(dev);
9f95a23c 5784 return 0;
11fdf7f2
TL
5785 }
5786 if (rss_conf.rss_key == NULL)
5787 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5788 ixgbe_hw_rss_hash_set(hw, &rss_conf);
5789
5790 if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
5791 return -EINVAL;
5792
5793 return 0;
5794}
5795
7c673cae 5796/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
9f95a23c 5797__rte_weak int
7c673cae
FG
5798ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
5799{
5800 return -1;
5801}
5802
9f95a23c 5803__rte_weak uint16_t
7c673cae
FG
5804ixgbe_recv_pkts_vec(
5805 void __rte_unused *rx_queue,
5806 struct rte_mbuf __rte_unused **rx_pkts,
5807 uint16_t __rte_unused nb_pkts)
5808{
5809 return 0;
5810}
5811
9f95a23c 5812__rte_weak uint16_t
7c673cae
FG
5813ixgbe_recv_scattered_pkts_vec(
5814 void __rte_unused *rx_queue,
5815 struct rte_mbuf __rte_unused **rx_pkts,
5816 uint16_t __rte_unused nb_pkts)
5817{
5818 return 0;
5819}
5820
9f95a23c 5821__rte_weak int
7c673cae
FG
5822ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
5823{
5824 return -1;
5825}