]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/e1000/igb_rxtx.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / drivers / net / e1000 / igb_rxtx.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/queue.h>
35
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <stdint.h>
41 #include <stdarg.h>
42 #include <inttypes.h>
43
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_pci.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_mempool.h>
60 #include <rte_malloc.h>
61 #include <rte_mbuf.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
64 #include <rte_prefetch.h>
65 #include <rte_udp.h>
66 #include <rte_tcp.h>
67 #include <rte_sctp.h>
68 #include <rte_string_fns.h>
69
70 #include "e1000_logs.h"
71 #include "base/e1000_api.h"
72 #include "e1000_ethdev.h"
73
74 /* Bit Mask to indicate what bits required for building TX context */
75 #define IGB_TX_OFFLOAD_MASK ( \
76 PKT_TX_VLAN_PKT | \
77 PKT_TX_IP_CKSUM | \
78 PKT_TX_L4_MASK | \
79 PKT_TX_TCP_SEG)
80
81 /**
82 * Structure associated with each descriptor of the RX ring of a RX queue.
83 */
84 struct igb_rx_entry {
85 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
86 };
87
88 /**
89 * Structure associated with each descriptor of the TX ring of a TX queue.
90 */
91 struct igb_tx_entry {
92 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
93 uint16_t next_id; /**< Index of next descriptor in ring. */
94 uint16_t last_id; /**< Index of last scattered descriptor. */
95 };
96
97 /**
98 * Structure associated with each RX queue.
99 */
100 struct igb_rx_queue {
101 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
102 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
103 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
104 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
105 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
106 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
107 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
108 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
109 uint16_t nb_rx_desc; /**< number of RX descriptors. */
110 uint16_t rx_tail; /**< current value of RDT register. */
111 uint16_t nb_rx_hold; /**< number of held free RX desc. */
112 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
113 uint16_t queue_id; /**< RX queue index. */
114 uint16_t reg_idx; /**< RX queue register index. */
115 uint8_t port_id; /**< Device port identifier. */
116 uint8_t pthresh; /**< Prefetch threshold register. */
117 uint8_t hthresh; /**< Host threshold register. */
118 uint8_t wthresh; /**< Write-back threshold register. */
119 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
120 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
121 };
122
123 /**
124 * Hardware context number
125 */
126 enum igb_advctx_num {
127 IGB_CTX_0 = 0, /**< CTX0 */
128 IGB_CTX_1 = 1, /**< CTX1 */
129 IGB_CTX_NUM = 2, /**< CTX_NUM */
130 };
131
132 /** Offload features */
133 union igb_tx_offload {
134 uint64_t data;
135 struct {
136 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
137 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
138 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
139 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
140 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
141
142 /* uint64_t unused:8; */
143 };
144 };
145
146 /*
147 * Compare mask for igb_tx_offload.data,
148 * should be in sync with igb_tx_offload layout.
149 * */
150 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
151 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
152 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
153 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
154 /** Mac + IP + TCP + Mss mask. */
155 #define TX_TSO_CMP_MASK \
156 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
157
158 /**
159 * Strucutre to check if new context need be built
160 */
161 struct igb_advctx_info {
162 uint64_t flags; /**< ol_flags related to context build. */
163 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
164 union igb_tx_offload tx_offload;
165 /** compare mask for tx offload. */
166 union igb_tx_offload tx_offload_mask;
167 };
168
169 /**
170 * Structure associated with each TX queue.
171 */
172 struct igb_tx_queue {
173 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
174 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
175 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
176 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
177 uint32_t txd_type; /**< Device-specific TXD type */
178 uint16_t nb_tx_desc; /**< number of TX descriptors. */
179 uint16_t tx_tail; /**< Current value of TDT register. */
180 uint16_t tx_head;
181 /**< Index of first used TX descriptor. */
182 uint16_t queue_id; /**< TX queue index. */
183 uint16_t reg_idx; /**< TX queue register index. */
184 uint8_t port_id; /**< Device port identifier. */
185 uint8_t pthresh; /**< Prefetch threshold register. */
186 uint8_t hthresh; /**< Host threshold register. */
187 uint8_t wthresh; /**< Write-back threshold register. */
188 uint32_t ctx_curr;
189 /**< Current used hardware descriptor. */
190 uint32_t ctx_start;
191 /**< Start context position for transmit queue. */
192 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
193 /**< Hardware context history.*/
194 };
195
196 #if 1
197 #define RTE_PMD_USE_PREFETCH
198 #endif
199
200 #ifdef RTE_PMD_USE_PREFETCH
201 #define rte_igb_prefetch(p) rte_prefetch0(p)
202 #else
203 #define rte_igb_prefetch(p) do {} while(0)
204 #endif
205
206 #ifdef RTE_PMD_PACKET_PREFETCH
207 #define rte_packet_prefetch(p) rte_prefetch1(p)
208 #else
209 #define rte_packet_prefetch(p) do {} while(0)
210 #endif
211
212 /*
213 * Macro for VMDq feature for 1 GbE NIC.
214 */
215 #define E1000_VMOLR_SIZE (8)
216 #define IGB_TSO_MAX_HDRLEN (512)
217 #define IGB_TSO_MAX_MSS (9216)
218
219 /*********************************************************************
220 *
221 * TX function
222 *
223 **********************************************************************/
224
225 /*
226 *There're some limitations in hardware for TCP segmentation offload. We
227 *should check whether the parameters are valid.
228 */
229 static inline uint64_t
230 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
231 {
232 if (!(ol_req & PKT_TX_TCP_SEG))
233 return ol_req;
234 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
235 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
236 ol_req &= ~PKT_TX_TCP_SEG;
237 ol_req |= PKT_TX_TCP_CKSUM;
238 }
239 return ol_req;
240 }
241
242 /*
243 * Advanced context descriptor are almost same between igb/ixgbe
244 * This is a separate function, looking for optimization opportunity here
245 * Rework required to go with the pre-defined values.
246 */
247
248 static inline void
249 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
250 volatile struct e1000_adv_tx_context_desc *ctx_txd,
251 uint64_t ol_flags, union igb_tx_offload tx_offload)
252 {
253 uint32_t type_tucmd_mlhl;
254 uint32_t mss_l4len_idx;
255 uint32_t ctx_idx, ctx_curr;
256 uint32_t vlan_macip_lens;
257 union igb_tx_offload tx_offload_mask;
258
259 ctx_curr = txq->ctx_curr;
260 ctx_idx = ctx_curr + txq->ctx_start;
261
262 tx_offload_mask.data = 0;
263 type_tucmd_mlhl = 0;
264
265 /* Specify which HW CTX to upload. */
266 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
267
268 if (ol_flags & PKT_TX_VLAN_PKT)
269 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
270
271 /* check if TCP segmentation required for this packet */
272 if (ol_flags & PKT_TX_TCP_SEG) {
273 /* implies IP cksum in IPv4 */
274 if (ol_flags & PKT_TX_IP_CKSUM)
275 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
276 E1000_ADVTXD_TUCMD_L4T_TCP |
277 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
278 else
279 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
280 E1000_ADVTXD_TUCMD_L4T_TCP |
281 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
282
283 tx_offload_mask.data |= TX_TSO_CMP_MASK;
284 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
285 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
286 } else { /* no TSO, check if hardware checksum is needed */
287 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
288 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
289
290 if (ol_flags & PKT_TX_IP_CKSUM)
291 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
292
293 switch (ol_flags & PKT_TX_L4_MASK) {
294 case PKT_TX_UDP_CKSUM:
295 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
296 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
297 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
298 break;
299 case PKT_TX_TCP_CKSUM:
300 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
301 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
302 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
303 break;
304 case PKT_TX_SCTP_CKSUM:
305 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
306 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
307 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
308 break;
309 default:
310 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
311 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
312 break;
313 }
314 }
315
316 txq->ctx_cache[ctx_curr].flags = ol_flags;
317 txq->ctx_cache[ctx_curr].tx_offload.data =
318 tx_offload_mask.data & tx_offload.data;
319 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
320
321 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
322 vlan_macip_lens = (uint32_t)tx_offload.data;
323 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
324 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
325 ctx_txd->seqnum_seed = 0;
326 }
327
328 /*
329 * Check which hardware context can be used. Use the existing match
330 * or create a new context descriptor.
331 */
332 static inline uint32_t
333 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
334 union igb_tx_offload tx_offload)
335 {
336 /* If match with the current context */
337 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
338 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
339 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
340 return txq->ctx_curr;
341 }
342
343 /* If match with the second context */
344 txq->ctx_curr ^= 1;
345 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
346 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
347 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
348 return txq->ctx_curr;
349 }
350
351 /* Mismatch, use the previous context */
352 return IGB_CTX_NUM;
353 }
354
355 static inline uint32_t
356 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
357 {
358 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
359 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
360 uint32_t tmp;
361
362 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
363 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
364 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
365 return tmp;
366 }
367
368 static inline uint32_t
369 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
370 {
371 uint32_t cmdtype;
372 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
373 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
374 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
375 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
376 return cmdtype;
377 }
378
379 uint16_t
380 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
381 uint16_t nb_pkts)
382 {
383 struct igb_tx_queue *txq;
384 struct igb_tx_entry *sw_ring;
385 struct igb_tx_entry *txe, *txn;
386 volatile union e1000_adv_tx_desc *txr;
387 volatile union e1000_adv_tx_desc *txd;
388 struct rte_mbuf *tx_pkt;
389 struct rte_mbuf *m_seg;
390 uint64_t buf_dma_addr;
391 uint32_t olinfo_status;
392 uint32_t cmd_type_len;
393 uint32_t pkt_len;
394 uint16_t slen;
395 uint64_t ol_flags;
396 uint16_t tx_end;
397 uint16_t tx_id;
398 uint16_t tx_last;
399 uint16_t nb_tx;
400 uint64_t tx_ol_req;
401 uint32_t new_ctx = 0;
402 uint32_t ctx = 0;
403 union igb_tx_offload tx_offload = {0};
404
405 txq = tx_queue;
406 sw_ring = txq->sw_ring;
407 txr = txq->tx_ring;
408 tx_id = txq->tx_tail;
409 txe = &sw_ring[tx_id];
410
411 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
412 tx_pkt = *tx_pkts++;
413 pkt_len = tx_pkt->pkt_len;
414
415 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
416
417 /*
418 * The number of descriptors that must be allocated for a
419 * packet is the number of segments of that packet, plus 1
420 * Context Descriptor for the VLAN Tag Identifier, if any.
421 * Determine the last TX descriptor to allocate in the TX ring
422 * for the packet, starting from the current position (tx_id)
423 * in the ring.
424 */
425 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
426
427 ol_flags = tx_pkt->ol_flags;
428 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
429
430 /* If a Context Descriptor need be built . */
431 if (tx_ol_req) {
432 tx_offload.l2_len = tx_pkt->l2_len;
433 tx_offload.l3_len = tx_pkt->l3_len;
434 tx_offload.l4_len = tx_pkt->l4_len;
435 tx_offload.vlan_tci = tx_pkt->vlan_tci;
436 tx_offload.tso_segsz = tx_pkt->tso_segsz;
437 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
438
439 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
440 /* Only allocate context descriptor if required*/
441 new_ctx = (ctx == IGB_CTX_NUM);
442 ctx = txq->ctx_curr + txq->ctx_start;
443 tx_last = (uint16_t) (tx_last + new_ctx);
444 }
445 if (tx_last >= txq->nb_tx_desc)
446 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
447
448 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
449 " tx_first=%u tx_last=%u",
450 (unsigned) txq->port_id,
451 (unsigned) txq->queue_id,
452 (unsigned) pkt_len,
453 (unsigned) tx_id,
454 (unsigned) tx_last);
455
456 /*
457 * Check if there are enough free descriptors in the TX ring
458 * to transmit the next packet.
459 * This operation is based on the two following rules:
460 *
461 * 1- Only check that the last needed TX descriptor can be
462 * allocated (by construction, if that descriptor is free,
463 * all intermediate ones are also free).
464 *
465 * For this purpose, the index of the last TX descriptor
466 * used for a packet (the "last descriptor" of a packet)
467 * is recorded in the TX entries (the last one included)
468 * that are associated with all TX descriptors allocated
469 * for that packet.
470 *
471 * 2- Avoid to allocate the last free TX descriptor of the
472 * ring, in order to never set the TDT register with the
473 * same value stored in parallel by the NIC in the TDH
474 * register, which makes the TX engine of the NIC enter
475 * in a deadlock situation.
476 *
477 * By extension, avoid to allocate a free descriptor that
478 * belongs to the last set of free descriptors allocated
479 * to the same packet previously transmitted.
480 */
481
482 /*
483 * The "last descriptor" of the previously sent packet, if any,
484 * which used the last descriptor to allocate.
485 */
486 tx_end = sw_ring[tx_last].last_id;
487
488 /*
489 * The next descriptor following that "last descriptor" in the
490 * ring.
491 */
492 tx_end = sw_ring[tx_end].next_id;
493
494 /*
495 * The "last descriptor" associated with that next descriptor.
496 */
497 tx_end = sw_ring[tx_end].last_id;
498
499 /*
500 * Check that this descriptor is free.
501 */
502 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
503 if (nb_tx == 0)
504 return 0;
505 goto end_of_tx;
506 }
507
508 /*
509 * Set common flags of all TX Data Descriptors.
510 *
511 * The following bits must be set in all Data Descriptors:
512 * - E1000_ADVTXD_DTYP_DATA
513 * - E1000_ADVTXD_DCMD_DEXT
514 *
515 * The following bits must be set in the first Data Descriptor
516 * and are ignored in the other ones:
517 * - E1000_ADVTXD_DCMD_IFCS
518 * - E1000_ADVTXD_MAC_1588
519 * - E1000_ADVTXD_DCMD_VLE
520 *
521 * The following bits must only be set in the last Data
522 * Descriptor:
523 * - E1000_TXD_CMD_EOP
524 *
525 * The following bits can be set in any Data Descriptor, but
526 * are only set in the last Data Descriptor:
527 * - E1000_TXD_CMD_RS
528 */
529 cmd_type_len = txq->txd_type |
530 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
531 if (tx_ol_req & PKT_TX_TCP_SEG)
532 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
533 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
534 #if defined(RTE_LIBRTE_IEEE1588)
535 if (ol_flags & PKT_TX_IEEE1588_TMST)
536 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
537 #endif
538 if (tx_ol_req) {
539 /* Setup TX Advanced context descriptor if required */
540 if (new_ctx) {
541 volatile struct e1000_adv_tx_context_desc *
542 ctx_txd;
543
544 ctx_txd = (volatile struct
545 e1000_adv_tx_context_desc *)
546 &txr[tx_id];
547
548 txn = &sw_ring[txe->next_id];
549 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
550
551 if (txe->mbuf != NULL) {
552 rte_pktmbuf_free_seg(txe->mbuf);
553 txe->mbuf = NULL;
554 }
555
556 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
557
558 txe->last_id = tx_last;
559 tx_id = txe->next_id;
560 txe = txn;
561 }
562
563 /* Setup the TX Advanced Data Descriptor */
564 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
565 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
566 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
567 }
568
569 m_seg = tx_pkt;
570 do {
571 txn = &sw_ring[txe->next_id];
572 txd = &txr[tx_id];
573
574 if (txe->mbuf != NULL)
575 rte_pktmbuf_free_seg(txe->mbuf);
576 txe->mbuf = m_seg;
577
578 /*
579 * Set up transmit descriptor.
580 */
581 slen = (uint16_t) m_seg->data_len;
582 buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
583 txd->read.buffer_addr =
584 rte_cpu_to_le_64(buf_dma_addr);
585 txd->read.cmd_type_len =
586 rte_cpu_to_le_32(cmd_type_len | slen);
587 txd->read.olinfo_status =
588 rte_cpu_to_le_32(olinfo_status);
589 txe->last_id = tx_last;
590 tx_id = txe->next_id;
591 txe = txn;
592 m_seg = m_seg->next;
593 } while (m_seg != NULL);
594
595 /*
596 * The last packet data descriptor needs End Of Packet (EOP)
597 * and Report Status (RS).
598 */
599 txd->read.cmd_type_len |=
600 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
601 }
602 end_of_tx:
603 rte_wmb();
604
605 /*
606 * Set the Transmit Descriptor Tail (TDT).
607 */
608 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
609 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
610 (unsigned) txq->port_id, (unsigned) txq->queue_id,
611 (unsigned) tx_id, (unsigned) nb_tx);
612 txq->tx_tail = tx_id;
613
614 return nb_tx;
615 }
616
617 /*********************************************************************
618 *
619 * RX functions
620 *
621 **********************************************************************/
622 #define IGB_PACKET_TYPE_IPV4 0X01
623 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
624 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
625 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
626 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
627 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
628 #define IGB_PACKET_TYPE_IPV6 0X04
629 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
630 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
631 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
632 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
633 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
634 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
635 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
636 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
637 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
638 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
639 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
640 #define IGB_PACKET_TYPE_MAX 0X80
641 #define IGB_PACKET_TYPE_MASK 0X7F
642 #define IGB_PACKET_TYPE_SHIFT 0X04
643 static inline uint32_t
644 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
645 {
646 static const uint32_t
647 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
648 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
649 RTE_PTYPE_L3_IPV4,
650 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
651 RTE_PTYPE_L3_IPV4_EXT,
652 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
653 RTE_PTYPE_L3_IPV6,
654 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
655 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
656 RTE_PTYPE_INNER_L3_IPV6,
657 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
658 RTE_PTYPE_L3_IPV6_EXT,
659 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
660 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
661 RTE_PTYPE_INNER_L3_IPV6_EXT,
662 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
663 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
664 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
665 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
666 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
667 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
668 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
669 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
670 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
671 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
672 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
673 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
674 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
675 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
676 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
677 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
678 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
679 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
680 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
681 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
682 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
683 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
684 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
685 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
686 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
687 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
688 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
689 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
690 };
691 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
692 return RTE_PTYPE_UNKNOWN;
693
694 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
695
696 return ptype_table[pkt_info];
697 }
698
699 static inline uint64_t
700 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
701 {
702 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
703
704 #if defined(RTE_LIBRTE_IEEE1588)
705 static uint32_t ip_pkt_etqf_map[8] = {
706 0, 0, 0, PKT_RX_IEEE1588_PTP,
707 0, 0, 0, 0,
708 };
709
710 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
711 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
712
713 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
714 if (hw->mac.type == e1000_i210)
715 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
716 else
717 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
718 #else
719 RTE_SET_USED(rxq);
720 #endif
721
722 return pkt_flags;
723 }
724
725 static inline uint64_t
726 rx_desc_status_to_pkt_flags(uint32_t rx_status)
727 {
728 uint64_t pkt_flags;
729
730 /* Check if VLAN present */
731 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
732 PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
733
734 #if defined(RTE_LIBRTE_IEEE1588)
735 if (rx_status & E1000_RXD_STAT_TMST)
736 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
737 #endif
738 return pkt_flags;
739 }
740
741 static inline uint64_t
742 rx_desc_error_to_pkt_flags(uint32_t rx_status)
743 {
744 /*
745 * Bit 30: IPE, IPv4 checksum error
746 * Bit 29: L4I, L4I integrity error
747 */
748
749 static uint64_t error_to_pkt_flags_map[4] = {
750 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
751 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
752 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
753 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
754 };
755 return error_to_pkt_flags_map[(rx_status >>
756 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
757 }
758
759 uint16_t
760 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
761 uint16_t nb_pkts)
762 {
763 struct igb_rx_queue *rxq;
764 volatile union e1000_adv_rx_desc *rx_ring;
765 volatile union e1000_adv_rx_desc *rxdp;
766 struct igb_rx_entry *sw_ring;
767 struct igb_rx_entry *rxe;
768 struct rte_mbuf *rxm;
769 struct rte_mbuf *nmb;
770 union e1000_adv_rx_desc rxd;
771 uint64_t dma_addr;
772 uint32_t staterr;
773 uint32_t hlen_type_rss;
774 uint16_t pkt_len;
775 uint16_t rx_id;
776 uint16_t nb_rx;
777 uint16_t nb_hold;
778 uint64_t pkt_flags;
779
780 nb_rx = 0;
781 nb_hold = 0;
782 rxq = rx_queue;
783 rx_id = rxq->rx_tail;
784 rx_ring = rxq->rx_ring;
785 sw_ring = rxq->sw_ring;
786 while (nb_rx < nb_pkts) {
787 /*
788 * The order of operations here is important as the DD status
789 * bit must not be read after any other descriptor fields.
790 * rx_ring and rxdp are pointing to volatile data so the order
791 * of accesses cannot be reordered by the compiler. If they were
792 * not volatile, they could be reordered which could lead to
793 * using invalid descriptor fields when read from rxd.
794 */
795 rxdp = &rx_ring[rx_id];
796 staterr = rxdp->wb.upper.status_error;
797 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
798 break;
799 rxd = *rxdp;
800
801 /*
802 * End of packet.
803 *
804 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
805 * likely to be invalid and to be dropped by the various
806 * validation checks performed by the network stack.
807 *
808 * Allocate a new mbuf to replenish the RX ring descriptor.
809 * If the allocation fails:
810 * - arrange for that RX descriptor to be the first one
811 * being parsed the next time the receive function is
812 * invoked [on the same queue].
813 *
814 * - Stop parsing the RX ring and return immediately.
815 *
816 * This policy do not drop the packet received in the RX
817 * descriptor for which the allocation of a new mbuf failed.
818 * Thus, it allows that packet to be later retrieved if
819 * mbuf have been freed in the mean time.
820 * As a side effect, holding RX descriptors instead of
821 * systematically giving them back to the NIC may lead to
822 * RX ring exhaustion situations.
823 * However, the NIC can gracefully prevent such situations
824 * to happen by sending specific "back-pressure" flow control
825 * frames to its peer(s).
826 */
827 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
828 "staterr=0x%x pkt_len=%u",
829 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
830 (unsigned) rx_id, (unsigned) staterr,
831 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
832
833 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
834 if (nmb == NULL) {
835 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
836 "queue_id=%u", (unsigned) rxq->port_id,
837 (unsigned) rxq->queue_id);
838 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
839 break;
840 }
841
842 nb_hold++;
843 rxe = &sw_ring[rx_id];
844 rx_id++;
845 if (rx_id == rxq->nb_rx_desc)
846 rx_id = 0;
847
848 /* Prefetch next mbuf while processing current one. */
849 rte_igb_prefetch(sw_ring[rx_id].mbuf);
850
851 /*
852 * When next RX descriptor is on a cache-line boundary,
853 * prefetch the next 4 RX descriptors and the next 8 pointers
854 * to mbufs.
855 */
856 if ((rx_id & 0x3) == 0) {
857 rte_igb_prefetch(&rx_ring[rx_id]);
858 rte_igb_prefetch(&sw_ring[rx_id]);
859 }
860
861 rxm = rxe->mbuf;
862 rxe->mbuf = nmb;
863 dma_addr =
864 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
865 rxdp->read.hdr_addr = 0;
866 rxdp->read.pkt_addr = dma_addr;
867
868 /*
869 * Initialize the returned mbuf.
870 * 1) setup generic mbuf fields:
871 * - number of segments,
872 * - next segment,
873 * - packet length,
874 * - RX port identifier.
875 * 2) integrate hardware offload data, if any:
876 * - RSS flag & hash,
877 * - IP checksum flag,
878 * - VLAN TCI, if any,
879 * - error flags.
880 */
881 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
882 rxq->crc_len);
883 rxm->data_off = RTE_PKTMBUF_HEADROOM;
884 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
885 rxm->nb_segs = 1;
886 rxm->next = NULL;
887 rxm->pkt_len = pkt_len;
888 rxm->data_len = pkt_len;
889 rxm->port = rxq->port_id;
890
891 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
892 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
893 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
894 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
895
896 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
897 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
898 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
899 rxm->ol_flags = pkt_flags;
900 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
901 lo_dword.hs_rss.pkt_info);
902
903 /*
904 * Store the mbuf address into the next entry of the array
905 * of returned packets.
906 */
907 rx_pkts[nb_rx++] = rxm;
908 }
909 rxq->rx_tail = rx_id;
910
911 /*
912 * If the number of free RX descriptors is greater than the RX free
913 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
914 * register.
915 * Update the RDT with the value of the last processed RX descriptor
916 * minus 1, to guarantee that the RDT register is never equal to the
917 * RDH register, which creates a "full" ring situtation from the
918 * hardware point of view...
919 */
920 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
921 if (nb_hold > rxq->rx_free_thresh) {
922 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
923 "nb_hold=%u nb_rx=%u",
924 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
925 (unsigned) rx_id, (unsigned) nb_hold,
926 (unsigned) nb_rx);
927 rx_id = (uint16_t) ((rx_id == 0) ?
928 (rxq->nb_rx_desc - 1) : (rx_id - 1));
929 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
930 nb_hold = 0;
931 }
932 rxq->nb_rx_hold = nb_hold;
933 return nb_rx;
934 }
935
936 uint16_t
937 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
938 uint16_t nb_pkts)
939 {
940 struct igb_rx_queue *rxq;
941 volatile union e1000_adv_rx_desc *rx_ring;
942 volatile union e1000_adv_rx_desc *rxdp;
943 struct igb_rx_entry *sw_ring;
944 struct igb_rx_entry *rxe;
945 struct rte_mbuf *first_seg;
946 struct rte_mbuf *last_seg;
947 struct rte_mbuf *rxm;
948 struct rte_mbuf *nmb;
949 union e1000_adv_rx_desc rxd;
950 uint64_t dma; /* Physical address of mbuf data buffer */
951 uint32_t staterr;
952 uint32_t hlen_type_rss;
953 uint16_t rx_id;
954 uint16_t nb_rx;
955 uint16_t nb_hold;
956 uint16_t data_len;
957 uint64_t pkt_flags;
958
959 nb_rx = 0;
960 nb_hold = 0;
961 rxq = rx_queue;
962 rx_id = rxq->rx_tail;
963 rx_ring = rxq->rx_ring;
964 sw_ring = rxq->sw_ring;
965
966 /*
967 * Retrieve RX context of current packet, if any.
968 */
969 first_seg = rxq->pkt_first_seg;
970 last_seg = rxq->pkt_last_seg;
971
972 while (nb_rx < nb_pkts) {
973 next_desc:
974 /*
975 * The order of operations here is important as the DD status
976 * bit must not be read after any other descriptor fields.
977 * rx_ring and rxdp are pointing to volatile data so the order
978 * of accesses cannot be reordered by the compiler. If they were
979 * not volatile, they could be reordered which could lead to
980 * using invalid descriptor fields when read from rxd.
981 */
982 rxdp = &rx_ring[rx_id];
983 staterr = rxdp->wb.upper.status_error;
984 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
985 break;
986 rxd = *rxdp;
987
988 /*
989 * Descriptor done.
990 *
991 * Allocate a new mbuf to replenish the RX ring descriptor.
992 * If the allocation fails:
993 * - arrange for that RX descriptor to be the first one
994 * being parsed the next time the receive function is
995 * invoked [on the same queue].
996 *
997 * - Stop parsing the RX ring and return immediately.
998 *
999 * This policy does not drop the packet received in the RX
1000 * descriptor for which the allocation of a new mbuf failed.
1001 * Thus, it allows that packet to be later retrieved if
1002 * mbuf have been freed in the mean time.
1003 * As a side effect, holding RX descriptors instead of
1004 * systematically giving them back to the NIC may lead to
1005 * RX ring exhaustion situations.
1006 * However, the NIC can gracefully prevent such situations
1007 * to happen by sending specific "back-pressure" flow control
1008 * frames to its peer(s).
1009 */
1010 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1011 "staterr=0x%x data_len=%u",
1012 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1013 (unsigned) rx_id, (unsigned) staterr,
1014 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1015
1016 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1017 if (nmb == NULL) {
1018 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1019 "queue_id=%u", (unsigned) rxq->port_id,
1020 (unsigned) rxq->queue_id);
1021 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1022 break;
1023 }
1024
1025 nb_hold++;
1026 rxe = &sw_ring[rx_id];
1027 rx_id++;
1028 if (rx_id == rxq->nb_rx_desc)
1029 rx_id = 0;
1030
1031 /* Prefetch next mbuf while processing current one. */
1032 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1033
1034 /*
1035 * When next RX descriptor is on a cache-line boundary,
1036 * prefetch the next 4 RX descriptors and the next 8 pointers
1037 * to mbufs.
1038 */
1039 if ((rx_id & 0x3) == 0) {
1040 rte_igb_prefetch(&rx_ring[rx_id]);
1041 rte_igb_prefetch(&sw_ring[rx_id]);
1042 }
1043
1044 /*
1045 * Update RX descriptor with the physical address of the new
1046 * data buffer of the new allocated mbuf.
1047 */
1048 rxm = rxe->mbuf;
1049 rxe->mbuf = nmb;
1050 dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
1051 rxdp->read.pkt_addr = dma;
1052 rxdp->read.hdr_addr = 0;
1053
1054 /*
1055 * Set data length & data buffer address of mbuf.
1056 */
1057 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1058 rxm->data_len = data_len;
1059 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1060
1061 /*
1062 * If this is the first buffer of the received packet,
1063 * set the pointer to the first mbuf of the packet and
1064 * initialize its context.
1065 * Otherwise, update the total length and the number of segments
1066 * of the current scattered packet, and update the pointer to
1067 * the last mbuf of the current packet.
1068 */
1069 if (first_seg == NULL) {
1070 first_seg = rxm;
1071 first_seg->pkt_len = data_len;
1072 first_seg->nb_segs = 1;
1073 } else {
1074 first_seg->pkt_len += data_len;
1075 first_seg->nb_segs++;
1076 last_seg->next = rxm;
1077 }
1078
1079 /*
1080 * If this is not the last buffer of the received packet,
1081 * update the pointer to the last mbuf of the current scattered
1082 * packet and continue to parse the RX ring.
1083 */
1084 if (! (staterr & E1000_RXD_STAT_EOP)) {
1085 last_seg = rxm;
1086 goto next_desc;
1087 }
1088
1089 /*
1090 * This is the last buffer of the received packet.
1091 * If the CRC is not stripped by the hardware:
1092 * - Subtract the CRC length from the total packet length.
1093 * - If the last buffer only contains the whole CRC or a part
1094 * of it, free the mbuf associated to the last buffer.
1095 * If part of the CRC is also contained in the previous
1096 * mbuf, subtract the length of that CRC part from the
1097 * data length of the previous mbuf.
1098 */
1099 rxm->next = NULL;
1100 if (unlikely(rxq->crc_len > 0)) {
1101 first_seg->pkt_len -= ETHER_CRC_LEN;
1102 if (data_len <= ETHER_CRC_LEN) {
1103 rte_pktmbuf_free_seg(rxm);
1104 first_seg->nb_segs--;
1105 last_seg->data_len = (uint16_t)
1106 (last_seg->data_len -
1107 (ETHER_CRC_LEN - data_len));
1108 last_seg->next = NULL;
1109 } else
1110 rxm->data_len =
1111 (uint16_t) (data_len - ETHER_CRC_LEN);
1112 }
1113
1114 /*
1115 * Initialize the first mbuf of the returned packet:
1116 * - RX port identifier,
1117 * - hardware offload data, if any:
1118 * - RSS flag & hash,
1119 * - IP checksum flag,
1120 * - VLAN TCI, if any,
1121 * - error flags.
1122 */
1123 first_seg->port = rxq->port_id;
1124 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1125
1126 /*
1127 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
1128 * set in the pkt_flags field.
1129 */
1130 first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
1131 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1132 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1133 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1134 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1135 first_seg->ol_flags = pkt_flags;
1136 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1137 lower.lo_dword.hs_rss.pkt_info);
1138
1139 /* Prefetch data of first segment, if configured to do so. */
1140 rte_packet_prefetch((char *)first_seg->buf_addr +
1141 first_seg->data_off);
1142
1143 /*
1144 * Store the mbuf address into the next entry of the array
1145 * of returned packets.
1146 */
1147 rx_pkts[nb_rx++] = first_seg;
1148
1149 /*
1150 * Setup receipt context for a new packet.
1151 */
1152 first_seg = NULL;
1153 }
1154
1155 /*
1156 * Record index of the next RX descriptor to probe.
1157 */
1158 rxq->rx_tail = rx_id;
1159
1160 /*
1161 * Save receive context.
1162 */
1163 rxq->pkt_first_seg = first_seg;
1164 rxq->pkt_last_seg = last_seg;
1165
1166 /*
1167 * If the number of free RX descriptors is greater than the RX free
1168 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1169 * register.
1170 * Update the RDT with the value of the last processed RX descriptor
1171 * minus 1, to guarantee that the RDT register is never equal to the
1172 * RDH register, which creates a "full" ring situtation from the
1173 * hardware point of view...
1174 */
1175 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1176 if (nb_hold > rxq->rx_free_thresh) {
1177 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1178 "nb_hold=%u nb_rx=%u",
1179 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1180 (unsigned) rx_id, (unsigned) nb_hold,
1181 (unsigned) nb_rx);
1182 rx_id = (uint16_t) ((rx_id == 0) ?
1183 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1184 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1185 nb_hold = 0;
1186 }
1187 rxq->nb_rx_hold = nb_hold;
1188 return nb_rx;
1189 }
1190
1191 /*
1192 * Maximum number of Ring Descriptors.
1193 *
1194 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1195 * desscriptors should meet the following condition:
1196 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1197 */
1198
1199 static void
1200 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1201 {
1202 unsigned i;
1203
1204 if (txq->sw_ring != NULL) {
1205 for (i = 0; i < txq->nb_tx_desc; i++) {
1206 if (txq->sw_ring[i].mbuf != NULL) {
1207 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1208 txq->sw_ring[i].mbuf = NULL;
1209 }
1210 }
1211 }
1212 }
1213
1214 static void
1215 igb_tx_queue_release(struct igb_tx_queue *txq)
1216 {
1217 if (txq != NULL) {
1218 igb_tx_queue_release_mbufs(txq);
1219 rte_free(txq->sw_ring);
1220 rte_free(txq);
1221 }
1222 }
1223
1224 void
1225 eth_igb_tx_queue_release(void *txq)
1226 {
1227 igb_tx_queue_release(txq);
1228 }
1229
1230 static void
1231 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1232 {
1233 txq->tx_head = 0;
1234 txq->tx_tail = 0;
1235 txq->ctx_curr = 0;
1236 memset((void*)&txq->ctx_cache, 0,
1237 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1238 }
1239
1240 static void
1241 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1242 {
1243 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1244 struct igb_tx_entry *txe = txq->sw_ring;
1245 uint16_t i, prev;
1246 struct e1000_hw *hw;
1247
1248 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1249 /* Zero out HW ring memory */
1250 for (i = 0; i < txq->nb_tx_desc; i++) {
1251 txq->tx_ring[i] = zeroed_desc;
1252 }
1253
1254 /* Initialize ring entries */
1255 prev = (uint16_t)(txq->nb_tx_desc - 1);
1256 for (i = 0; i < txq->nb_tx_desc; i++) {
1257 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1258
1259 txd->wb.status = E1000_TXD_STAT_DD;
1260 txe[i].mbuf = NULL;
1261 txe[i].last_id = i;
1262 txe[prev].next_id = i;
1263 prev = i;
1264 }
1265
1266 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1267 /* 82575 specific, each tx queue will use 2 hw contexts */
1268 if (hw->mac.type == e1000_82575)
1269 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1270
1271 igb_reset_tx_queue_stat(txq);
1272 }
1273
1274 int
1275 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1276 uint16_t queue_idx,
1277 uint16_t nb_desc,
1278 unsigned int socket_id,
1279 const struct rte_eth_txconf *tx_conf)
1280 {
1281 const struct rte_memzone *tz;
1282 struct igb_tx_queue *txq;
1283 struct e1000_hw *hw;
1284 uint32_t size;
1285
1286 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1287
1288 /*
1289 * Validate number of transmit descriptors.
1290 * It must not exceed hardware maximum, and must be multiple
1291 * of E1000_ALIGN.
1292 */
1293 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1294 (nb_desc > E1000_MAX_RING_DESC) ||
1295 (nb_desc < E1000_MIN_RING_DESC)) {
1296 return -EINVAL;
1297 }
1298
1299 /*
1300 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1301 * driver.
1302 */
1303 if (tx_conf->tx_free_thresh != 0)
1304 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1305 "used for the 1G driver.");
1306 if (tx_conf->tx_rs_thresh != 0)
1307 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1308 "used for the 1G driver.");
1309 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1310 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1311 "consider setting the TX WTHRESH value to 4, 8, "
1312 "or 16.");
1313
1314 /* Free memory prior to re-allocation if needed */
1315 if (dev->data->tx_queues[queue_idx] != NULL) {
1316 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1317 dev->data->tx_queues[queue_idx] = NULL;
1318 }
1319
1320 /* First allocate the tx queue data structure */
1321 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1322 RTE_CACHE_LINE_SIZE);
1323 if (txq == NULL)
1324 return -ENOMEM;
1325
1326 /*
1327 * Allocate TX ring hardware descriptors. A memzone large enough to
1328 * handle the maximum ring size is allocated in order to allow for
1329 * resizing in later calls to the queue setup function.
1330 */
1331 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1332 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1333 E1000_ALIGN, socket_id);
1334 if (tz == NULL) {
1335 igb_tx_queue_release(txq);
1336 return -ENOMEM;
1337 }
1338
1339 txq->nb_tx_desc = nb_desc;
1340 txq->pthresh = tx_conf->tx_thresh.pthresh;
1341 txq->hthresh = tx_conf->tx_thresh.hthresh;
1342 txq->wthresh = tx_conf->tx_thresh.wthresh;
1343 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1344 txq->wthresh = 1;
1345 txq->queue_id = queue_idx;
1346 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1347 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1348 txq->port_id = dev->data->port_id;
1349
1350 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1351 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1352
1353 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1354 /* Allocate software ring */
1355 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1356 sizeof(struct igb_tx_entry) * nb_desc,
1357 RTE_CACHE_LINE_SIZE);
1358 if (txq->sw_ring == NULL) {
1359 igb_tx_queue_release(txq);
1360 return -ENOMEM;
1361 }
1362 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1363 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1364
1365 igb_reset_tx_queue(txq, dev);
1366 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1367 dev->data->tx_queues[queue_idx] = txq;
1368
1369 return 0;
1370 }
1371
1372 static void
1373 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1374 {
1375 unsigned i;
1376
1377 if (rxq->sw_ring != NULL) {
1378 for (i = 0; i < rxq->nb_rx_desc; i++) {
1379 if (rxq->sw_ring[i].mbuf != NULL) {
1380 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1381 rxq->sw_ring[i].mbuf = NULL;
1382 }
1383 }
1384 }
1385 }
1386
1387 static void
1388 igb_rx_queue_release(struct igb_rx_queue *rxq)
1389 {
1390 if (rxq != NULL) {
1391 igb_rx_queue_release_mbufs(rxq);
1392 rte_free(rxq->sw_ring);
1393 rte_free(rxq);
1394 }
1395 }
1396
1397 void
1398 eth_igb_rx_queue_release(void *rxq)
1399 {
1400 igb_rx_queue_release(rxq);
1401 }
1402
1403 static void
1404 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1405 {
1406 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1407 unsigned i;
1408
1409 /* Zero out HW ring memory */
1410 for (i = 0; i < rxq->nb_rx_desc; i++) {
1411 rxq->rx_ring[i] = zeroed_desc;
1412 }
1413
1414 rxq->rx_tail = 0;
1415 rxq->pkt_first_seg = NULL;
1416 rxq->pkt_last_seg = NULL;
1417 }
1418
1419 int
1420 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1421 uint16_t queue_idx,
1422 uint16_t nb_desc,
1423 unsigned int socket_id,
1424 const struct rte_eth_rxconf *rx_conf,
1425 struct rte_mempool *mp)
1426 {
1427 const struct rte_memzone *rz;
1428 struct igb_rx_queue *rxq;
1429 struct e1000_hw *hw;
1430 unsigned int size;
1431
1432 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1433
1434 /*
1435 * Validate number of receive descriptors.
1436 * It must not exceed hardware maximum, and must be multiple
1437 * of E1000_ALIGN.
1438 */
1439 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1440 (nb_desc > E1000_MAX_RING_DESC) ||
1441 (nb_desc < E1000_MIN_RING_DESC)) {
1442 return -EINVAL;
1443 }
1444
1445 /* Free memory prior to re-allocation if needed */
1446 if (dev->data->rx_queues[queue_idx] != NULL) {
1447 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1448 dev->data->rx_queues[queue_idx] = NULL;
1449 }
1450
1451 /* First allocate the RX queue data structure. */
1452 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1453 RTE_CACHE_LINE_SIZE);
1454 if (rxq == NULL)
1455 return -ENOMEM;
1456 rxq->mb_pool = mp;
1457 rxq->nb_rx_desc = nb_desc;
1458 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1459 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1460 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1461 if (rxq->wthresh > 0 &&
1462 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1463 rxq->wthresh = 1;
1464 rxq->drop_en = rx_conf->rx_drop_en;
1465 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1466 rxq->queue_id = queue_idx;
1467 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1468 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1469 rxq->port_id = dev->data->port_id;
1470 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1471 ETHER_CRC_LEN);
1472
1473 /*
1474 * Allocate RX ring hardware descriptors. A memzone large enough to
1475 * handle the maximum ring size is allocated in order to allow for
1476 * resizing in later calls to the queue setup function.
1477 */
1478 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1479 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1480 E1000_ALIGN, socket_id);
1481 if (rz == NULL) {
1482 igb_rx_queue_release(rxq);
1483 return -ENOMEM;
1484 }
1485 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1486 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1487 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1488 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1489
1490 /* Allocate software ring. */
1491 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1492 sizeof(struct igb_rx_entry) * nb_desc,
1493 RTE_CACHE_LINE_SIZE);
1494 if (rxq->sw_ring == NULL) {
1495 igb_rx_queue_release(rxq);
1496 return -ENOMEM;
1497 }
1498 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1499 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1500
1501 dev->data->rx_queues[queue_idx] = rxq;
1502 igb_reset_rx_queue(rxq);
1503
1504 return 0;
1505 }
1506
1507 uint32_t
1508 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1509 {
1510 #define IGB_RXQ_SCAN_INTERVAL 4
1511 volatile union e1000_adv_rx_desc *rxdp;
1512 struct igb_rx_queue *rxq;
1513 uint32_t desc = 0;
1514
1515 if (rx_queue_id >= dev->data->nb_rx_queues) {
1516 PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
1517 return 0;
1518 }
1519
1520 rxq = dev->data->rx_queues[rx_queue_id];
1521 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1522
1523 while ((desc < rxq->nb_rx_desc) &&
1524 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1525 desc += IGB_RXQ_SCAN_INTERVAL;
1526 rxdp += IGB_RXQ_SCAN_INTERVAL;
1527 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1528 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1529 desc - rxq->nb_rx_desc]);
1530 }
1531
1532 return desc;
1533 }
1534
1535 int
1536 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1537 {
1538 volatile union e1000_adv_rx_desc *rxdp;
1539 struct igb_rx_queue *rxq = rx_queue;
1540 uint32_t desc;
1541
1542 if (unlikely(offset >= rxq->nb_rx_desc))
1543 return 0;
1544 desc = rxq->rx_tail + offset;
1545 if (desc >= rxq->nb_rx_desc)
1546 desc -= rxq->nb_rx_desc;
1547
1548 rxdp = &rxq->rx_ring[desc];
1549 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1550 }
1551
1552 void
1553 igb_dev_clear_queues(struct rte_eth_dev *dev)
1554 {
1555 uint16_t i;
1556 struct igb_tx_queue *txq;
1557 struct igb_rx_queue *rxq;
1558
1559 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1560 txq = dev->data->tx_queues[i];
1561 if (txq != NULL) {
1562 igb_tx_queue_release_mbufs(txq);
1563 igb_reset_tx_queue(txq, dev);
1564 }
1565 }
1566
1567 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1568 rxq = dev->data->rx_queues[i];
1569 if (rxq != NULL) {
1570 igb_rx_queue_release_mbufs(rxq);
1571 igb_reset_rx_queue(rxq);
1572 }
1573 }
1574 }
1575
1576 void
1577 igb_dev_free_queues(struct rte_eth_dev *dev)
1578 {
1579 uint16_t i;
1580
1581 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1582 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1583 dev->data->rx_queues[i] = NULL;
1584 }
1585 dev->data->nb_rx_queues = 0;
1586
1587 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1588 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1589 dev->data->tx_queues[i] = NULL;
1590 }
1591 dev->data->nb_tx_queues = 0;
1592 }
1593
1594 /**
1595 * Receive Side Scaling (RSS).
1596 * See section 7.1.1.7 in the following document:
1597 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1598 *
1599 * Principles:
1600 * The source and destination IP addresses of the IP header and the source and
1601 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1602 * against a configurable random key to compute a 32-bit RSS hash result.
1603 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1604 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1605 * RSS output index which is used as the RX queue index where to store the
1606 * received packets.
1607 * The following output is supplied in the RX write-back descriptor:
1608 * - 32-bit result of the Microsoft RSS hash function,
1609 * - 4-bit RSS type field.
1610 */
1611
1612 /*
1613 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1614 * Used as the default key.
1615 */
1616 static uint8_t rss_intel_key[40] = {
1617 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1618 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1619 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1620 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1621 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1622 };
1623
1624 static void
1625 igb_rss_disable(struct rte_eth_dev *dev)
1626 {
1627 struct e1000_hw *hw;
1628 uint32_t mrqc;
1629
1630 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1631 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1632 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1633 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1634 }
1635
1636 static void
1637 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1638 {
1639 uint8_t *hash_key;
1640 uint32_t rss_key;
1641 uint32_t mrqc;
1642 uint64_t rss_hf;
1643 uint16_t i;
1644
1645 hash_key = rss_conf->rss_key;
1646 if (hash_key != NULL) {
1647 /* Fill in RSS hash key */
1648 for (i = 0; i < 10; i++) {
1649 rss_key = hash_key[(i * 4)];
1650 rss_key |= hash_key[(i * 4) + 1] << 8;
1651 rss_key |= hash_key[(i * 4) + 2] << 16;
1652 rss_key |= hash_key[(i * 4) + 3] << 24;
1653 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1654 }
1655 }
1656
1657 /* Set configured hashing protocols in MRQC register */
1658 rss_hf = rss_conf->rss_hf;
1659 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1660 if (rss_hf & ETH_RSS_IPV4)
1661 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1662 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1663 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1664 if (rss_hf & ETH_RSS_IPV6)
1665 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1666 if (rss_hf & ETH_RSS_IPV6_EX)
1667 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1668 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1669 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1670 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1671 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1672 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1673 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1674 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1675 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1676 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1677 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1678 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1679 }
1680
1681 int
1682 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1683 struct rte_eth_rss_conf *rss_conf)
1684 {
1685 struct e1000_hw *hw;
1686 uint32_t mrqc;
1687 uint64_t rss_hf;
1688
1689 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1690
1691 /*
1692 * Before changing anything, first check that the update RSS operation
1693 * does not attempt to disable RSS, if RSS was enabled at
1694 * initialization time, or does not attempt to enable RSS, if RSS was
1695 * disabled at initialization time.
1696 */
1697 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1698 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1699 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1700 if (rss_hf != 0) /* Enable RSS */
1701 return -(EINVAL);
1702 return 0; /* Nothing to do */
1703 }
1704 /* RSS enabled */
1705 if (rss_hf == 0) /* Disable RSS */
1706 return -(EINVAL);
1707 igb_hw_rss_hash_set(hw, rss_conf);
1708 return 0;
1709 }
1710
1711 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
1712 struct rte_eth_rss_conf *rss_conf)
1713 {
1714 struct e1000_hw *hw;
1715 uint8_t *hash_key;
1716 uint32_t rss_key;
1717 uint32_t mrqc;
1718 uint64_t rss_hf;
1719 uint16_t i;
1720
1721 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1722 hash_key = rss_conf->rss_key;
1723 if (hash_key != NULL) {
1724 /* Return RSS hash key */
1725 for (i = 0; i < 10; i++) {
1726 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
1727 hash_key[(i * 4)] = rss_key & 0x000000FF;
1728 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
1729 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
1730 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
1731 }
1732 }
1733
1734 /* Get RSS functions configured in MRQC register */
1735 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1736 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
1737 rss_conf->rss_hf = 0;
1738 return 0;
1739 }
1740 rss_hf = 0;
1741 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
1742 rss_hf |= ETH_RSS_IPV4;
1743 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
1744 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1745 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
1746 rss_hf |= ETH_RSS_IPV6;
1747 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
1748 rss_hf |= ETH_RSS_IPV6_EX;
1749 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
1750 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
1751 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
1752 rss_hf |= ETH_RSS_IPV6_TCP_EX;
1753 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
1754 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
1755 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
1756 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
1757 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
1758 rss_hf |= ETH_RSS_IPV6_UDP_EX;
1759 rss_conf->rss_hf = rss_hf;
1760 return 0;
1761 }
1762
1763 static void
1764 igb_rss_configure(struct rte_eth_dev *dev)
1765 {
1766 struct rte_eth_rss_conf rss_conf;
1767 struct e1000_hw *hw;
1768 uint32_t shift;
1769 uint16_t i;
1770
1771 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1772
1773 /* Fill in redirection table. */
1774 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1775 for (i = 0; i < 128; i++) {
1776 union e1000_reta {
1777 uint32_t dword;
1778 uint8_t bytes[4];
1779 } reta;
1780 uint8_t q_idx;
1781
1782 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1783 i % dev->data->nb_rx_queues : 0);
1784 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1785 if ((i & 3) == 3)
1786 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1787 }
1788
1789 /*
1790 * Configure the RSS key and the RSS protocols used to compute
1791 * the RSS hash of input packets.
1792 */
1793 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
1794 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
1795 igb_rss_disable(dev);
1796 return;
1797 }
1798 if (rss_conf.rss_key == NULL)
1799 rss_conf.rss_key = rss_intel_key; /* Default hash key */
1800 igb_hw_rss_hash_set(hw, &rss_conf);
1801 }
1802
1803 /*
1804 * Check if the mac type support VMDq or not.
1805 * Return 1 if it supports, otherwise, return 0.
1806 */
1807 static int
1808 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
1809 {
1810 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1811
1812 switch (hw->mac.type) {
1813 case e1000_82576:
1814 case e1000_82580:
1815 case e1000_i350:
1816 return 1;
1817 case e1000_82540:
1818 case e1000_82541:
1819 case e1000_82542:
1820 case e1000_82543:
1821 case e1000_82544:
1822 case e1000_82545:
1823 case e1000_82546:
1824 case e1000_82547:
1825 case e1000_82571:
1826 case e1000_82572:
1827 case e1000_82573:
1828 case e1000_82574:
1829 case e1000_82583:
1830 case e1000_i210:
1831 case e1000_i211:
1832 default:
1833 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
1834 return 0;
1835 }
1836 }
1837
1838 static int
1839 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
1840 {
1841 struct rte_eth_vmdq_rx_conf *cfg;
1842 struct e1000_hw *hw;
1843 uint32_t mrqc, vt_ctl, vmolr, rctl;
1844 int i;
1845
1846 PMD_INIT_FUNC_TRACE();
1847
1848 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1849 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1850
1851 /* Check if mac type can support VMDq, return value of 0 means NOT support */
1852 if (igb_is_vmdq_supported(dev) == 0)
1853 return -1;
1854
1855 igb_rss_disable(dev);
1856
1857 /* RCTL: eanble VLAN filter */
1858 rctl = E1000_READ_REG(hw, E1000_RCTL);
1859 rctl |= E1000_RCTL_VFE;
1860 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1861
1862 /* MRQC: enable vmdq */
1863 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1864 mrqc |= E1000_MRQC_ENABLE_VMDQ;
1865 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1866
1867 /* VTCTL: pool selection according to VLAN tag */
1868 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1869 if (cfg->enable_default_pool)
1870 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1871 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
1872 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1873
1874 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1875 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1876 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
1877 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
1878 E1000_VMOLR_MPME);
1879
1880 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
1881 vmolr |= E1000_VMOLR_AUPE;
1882 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
1883 vmolr |= E1000_VMOLR_ROMPE;
1884 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
1885 vmolr |= E1000_VMOLR_ROPE;
1886 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
1887 vmolr |= E1000_VMOLR_BAM;
1888 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
1889 vmolr |= E1000_VMOLR_MPME;
1890
1891 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1892 }
1893
1894 /*
1895 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
1896 * Both 82576 and 82580 support it
1897 */
1898 if (hw->mac.type != e1000_i350) {
1899 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
1900 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
1901 vmolr |= E1000_VMOLR_STRVLAN;
1902 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
1903 }
1904 }
1905
1906 /* VFTA - enable all vlan filters */
1907 for (i = 0; i < IGB_VFTA_SIZE; i++)
1908 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
1909
1910 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
1911 if (hw->mac.type != e1000_82580)
1912 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
1913
1914 /*
1915 * RAH/RAL - allow pools to read specific mac addresses
1916 * In this case, all pools should be able to read from mac addr 0
1917 */
1918 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
1919 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
1920
1921 /* VLVF: set up filters for vlan tags as configured */
1922 for (i = 0; i < cfg->nb_pool_maps; i++) {
1923 /* set vlan id in VF register and set the valid bit */
1924 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
1925 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
1926 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
1927 E1000_VLVF_POOLSEL_MASK)));
1928 }
1929
1930 E1000_WRITE_FLUSH(hw);
1931
1932 return 0;
1933 }
1934
1935
1936 /*********************************************************************
1937 *
1938 * Enable receive unit.
1939 *
1940 **********************************************************************/
1941
1942 static int
1943 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1944 {
1945 struct igb_rx_entry *rxe = rxq->sw_ring;
1946 uint64_t dma_addr;
1947 unsigned i;
1948
1949 /* Initialize software ring entries. */
1950 for (i = 0; i < rxq->nb_rx_desc; i++) {
1951 volatile union e1000_adv_rx_desc *rxd;
1952 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1953
1954 if (mbuf == NULL) {
1955 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1956 "queue_id=%hu", rxq->queue_id);
1957 return -ENOMEM;
1958 }
1959 dma_addr =
1960 rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
1961 rxd = &rxq->rx_ring[i];
1962 rxd->read.hdr_addr = 0;
1963 rxd->read.pkt_addr = dma_addr;
1964 rxe[i].mbuf = mbuf;
1965 }
1966
1967 return 0;
1968 }
1969
1970 #define E1000_MRQC_DEF_Q_SHIFT (3)
1971 static int
1972 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
1973 {
1974 struct e1000_hw *hw =
1975 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1976 uint32_t mrqc;
1977
1978 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
1979 /*
1980 * SRIOV active scheme
1981 * FIXME if support RSS together with VMDq & SRIOV
1982 */
1983 mrqc = E1000_MRQC_ENABLE_VMDQ;
1984 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
1985 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
1986 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1987 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
1988 /*
1989 * SRIOV inactive scheme
1990 */
1991 switch (dev->data->dev_conf.rxmode.mq_mode) {
1992 case ETH_MQ_RX_RSS:
1993 igb_rss_configure(dev);
1994 break;
1995 case ETH_MQ_RX_VMDQ_ONLY:
1996 /*Configure general VMDQ only RX parameters*/
1997 igb_vmdq_rx_hw_configure(dev);
1998 break;
1999 case ETH_MQ_RX_NONE:
2000 /* if mq_mode is none, disable rss mode.*/
2001 default:
2002 igb_rss_disable(dev);
2003 break;
2004 }
2005 }
2006
2007 return 0;
2008 }
2009
2010 int
2011 eth_igb_rx_init(struct rte_eth_dev *dev)
2012 {
2013 struct e1000_hw *hw;
2014 struct igb_rx_queue *rxq;
2015 uint32_t rctl;
2016 uint32_t rxcsum;
2017 uint32_t srrctl;
2018 uint16_t buf_size;
2019 uint16_t rctl_bsize;
2020 uint16_t i;
2021 int ret;
2022
2023 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2024 srrctl = 0;
2025
2026 /*
2027 * Make sure receives are disabled while setting
2028 * up the descriptor ring.
2029 */
2030 rctl = E1000_READ_REG(hw, E1000_RCTL);
2031 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2032
2033 /*
2034 * Configure support of jumbo frames, if any.
2035 */
2036 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
2037 rctl |= E1000_RCTL_LPE;
2038
2039 /*
2040 * Set maximum packet length by default, and might be updated
2041 * together with enabling/disabling dual VLAN.
2042 */
2043 E1000_WRITE_REG(hw, E1000_RLPML,
2044 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2045 VLAN_TAG_SIZE);
2046 } else
2047 rctl &= ~E1000_RCTL_LPE;
2048
2049 /* Configure and enable each RX queue. */
2050 rctl_bsize = 0;
2051 dev->rx_pkt_burst = eth_igb_recv_pkts;
2052 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2053 uint64_t bus_addr;
2054 uint32_t rxdctl;
2055
2056 rxq = dev->data->rx_queues[i];
2057
2058 /* Allocate buffers for descriptor rings and set up queue */
2059 ret = igb_alloc_rx_queue_mbufs(rxq);
2060 if (ret)
2061 return ret;
2062
2063 /*
2064 * Reset crc_len in case it was changed after queue setup by a
2065 * call to configure
2066 */
2067 rxq->crc_len =
2068 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
2069 0 : ETHER_CRC_LEN);
2070
2071 bus_addr = rxq->rx_ring_phys_addr;
2072 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2073 rxq->nb_rx_desc *
2074 sizeof(union e1000_adv_rx_desc));
2075 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2076 (uint32_t)(bus_addr >> 32));
2077 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2078
2079 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2080
2081 /*
2082 * Configure RX buffer size.
2083 */
2084 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2085 RTE_PKTMBUF_HEADROOM);
2086 if (buf_size >= 1024) {
2087 /*
2088 * Configure the BSIZEPACKET field of the SRRCTL
2089 * register of the queue.
2090 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2091 * If this field is equal to 0b, then RCTL.BSIZE
2092 * determines the RX packet buffer size.
2093 */
2094 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2095 E1000_SRRCTL_BSIZEPKT_MASK);
2096 buf_size = (uint16_t) ((srrctl &
2097 E1000_SRRCTL_BSIZEPKT_MASK) <<
2098 E1000_SRRCTL_BSIZEPKT_SHIFT);
2099
2100 /* It adds dual VLAN length for supporting dual VLAN */
2101 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2102 2 * VLAN_TAG_SIZE) > buf_size){
2103 if (!dev->data->scattered_rx)
2104 PMD_INIT_LOG(DEBUG,
2105 "forcing scatter mode");
2106 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2107 dev->data->scattered_rx = 1;
2108 }
2109 } else {
2110 /*
2111 * Use BSIZE field of the device RCTL register.
2112 */
2113 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2114 rctl_bsize = buf_size;
2115 if (!dev->data->scattered_rx)
2116 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2117 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2118 dev->data->scattered_rx = 1;
2119 }
2120
2121 /* Set if packets are dropped when no descriptors available */
2122 if (rxq->drop_en)
2123 srrctl |= E1000_SRRCTL_DROP_EN;
2124
2125 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2126
2127 /* Enable this RX queue. */
2128 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2129 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2130 rxdctl &= 0xFFF00000;
2131 rxdctl |= (rxq->pthresh & 0x1F);
2132 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2133 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2134 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2135 }
2136
2137 if (dev->data->dev_conf.rxmode.enable_scatter) {
2138 if (!dev->data->scattered_rx)
2139 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2140 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2141 dev->data->scattered_rx = 1;
2142 }
2143
2144 /*
2145 * Setup BSIZE field of RCTL register, if needed.
2146 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2147 * register, since the code above configures the SRRCTL register of
2148 * the RX queue in such a case.
2149 * All configurable sizes are:
2150 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2151 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2152 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2153 * 2048: rctl |= E1000_RCTL_SZ_2048;
2154 * 1024: rctl |= E1000_RCTL_SZ_1024;
2155 * 512: rctl |= E1000_RCTL_SZ_512;
2156 * 256: rctl |= E1000_RCTL_SZ_256;
2157 */
2158 if (rctl_bsize > 0) {
2159 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2160 rctl |= E1000_RCTL_SZ_512;
2161 else /* 256 <= buf_size < 512 - use 256 */
2162 rctl |= E1000_RCTL_SZ_256;
2163 }
2164
2165 /*
2166 * Configure RSS if device configured with multiple RX queues.
2167 */
2168 igb_dev_mq_rx_configure(dev);
2169
2170 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2171 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2172
2173 /*
2174 * Setup the Checksum Register.
2175 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2176 */
2177 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2178 rxcsum |= E1000_RXCSUM_PCSD;
2179
2180 /* Enable both L3/L4 rx checksum offload */
2181 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
2182 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2183 else
2184 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2185 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2186
2187 /* Setup the Receive Control Register. */
2188 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
2189 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2190
2191 /* set STRCRC bit in all queues */
2192 if (hw->mac.type == e1000_i350 ||
2193 hw->mac.type == e1000_i210 ||
2194 hw->mac.type == e1000_i211 ||
2195 hw->mac.type == e1000_i354) {
2196 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2197 rxq = dev->data->rx_queues[i];
2198 uint32_t dvmolr = E1000_READ_REG(hw,
2199 E1000_DVMOLR(rxq->reg_idx));
2200 dvmolr |= E1000_DVMOLR_STRCRC;
2201 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2202 }
2203 }
2204 } else {
2205 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2206
2207 /* clear STRCRC bit in all queues */
2208 if (hw->mac.type == e1000_i350 ||
2209 hw->mac.type == e1000_i210 ||
2210 hw->mac.type == e1000_i211 ||
2211 hw->mac.type == e1000_i354) {
2212 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2213 rxq = dev->data->rx_queues[i];
2214 uint32_t dvmolr = E1000_READ_REG(hw,
2215 E1000_DVMOLR(rxq->reg_idx));
2216 dvmolr &= ~E1000_DVMOLR_STRCRC;
2217 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2218 }
2219 }
2220 }
2221
2222 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2223 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2224 E1000_RCTL_RDMTS_HALF |
2225 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2226
2227 /* Make sure VLAN Filters are off. */
2228 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2229 rctl &= ~E1000_RCTL_VFE;
2230 /* Don't store bad packets. */
2231 rctl &= ~E1000_RCTL_SBP;
2232
2233 /* Enable Receives. */
2234 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2235
2236 /*
2237 * Setup the HW Rx Head and Tail Descriptor Pointers.
2238 * This needs to be done after enable.
2239 */
2240 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2241 rxq = dev->data->rx_queues[i];
2242 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2243 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2244 }
2245
2246 return 0;
2247 }
2248
2249 /*********************************************************************
2250 *
2251 * Enable transmit unit.
2252 *
2253 **********************************************************************/
2254 void
2255 eth_igb_tx_init(struct rte_eth_dev *dev)
2256 {
2257 struct e1000_hw *hw;
2258 struct igb_tx_queue *txq;
2259 uint32_t tctl;
2260 uint32_t txdctl;
2261 uint16_t i;
2262
2263 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2264
2265 /* Setup the Base and Length of the Tx Descriptor Rings. */
2266 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2267 uint64_t bus_addr;
2268 txq = dev->data->tx_queues[i];
2269 bus_addr = txq->tx_ring_phys_addr;
2270
2271 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2272 txq->nb_tx_desc *
2273 sizeof(union e1000_adv_tx_desc));
2274 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2275 (uint32_t)(bus_addr >> 32));
2276 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2277
2278 /* Setup the HW Tx Head and Tail descriptor pointers. */
2279 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2280 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2281
2282 /* Setup Transmit threshold registers. */
2283 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2284 txdctl |= txq->pthresh & 0x1F;
2285 txdctl |= ((txq->hthresh & 0x1F) << 8);
2286 txdctl |= ((txq->wthresh & 0x1F) << 16);
2287 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2288 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2289 }
2290
2291 /* Program the Transmit Control Register. */
2292 tctl = E1000_READ_REG(hw, E1000_TCTL);
2293 tctl &= ~E1000_TCTL_CT;
2294 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2295 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2296
2297 e1000_config_collision_dist(hw);
2298
2299 /* This write will effectively turn on the transmit unit. */
2300 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2301 }
2302
2303 /*********************************************************************
2304 *
2305 * Enable VF receive unit.
2306 *
2307 **********************************************************************/
2308 int
2309 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2310 {
2311 struct e1000_hw *hw;
2312 struct igb_rx_queue *rxq;
2313 uint32_t srrctl;
2314 uint16_t buf_size;
2315 uint16_t rctl_bsize;
2316 uint16_t i;
2317 int ret;
2318
2319 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2320
2321 /* setup MTU */
2322 e1000_rlpml_set_vf(hw,
2323 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2324 VLAN_TAG_SIZE));
2325
2326 /* Configure and enable each RX queue. */
2327 rctl_bsize = 0;
2328 dev->rx_pkt_burst = eth_igb_recv_pkts;
2329 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2330 uint64_t bus_addr;
2331 uint32_t rxdctl;
2332
2333 rxq = dev->data->rx_queues[i];
2334
2335 /* Allocate buffers for descriptor rings and set up queue */
2336 ret = igb_alloc_rx_queue_mbufs(rxq);
2337 if (ret)
2338 return ret;
2339
2340 bus_addr = rxq->rx_ring_phys_addr;
2341 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2342 rxq->nb_rx_desc *
2343 sizeof(union e1000_adv_rx_desc));
2344 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2345 (uint32_t)(bus_addr >> 32));
2346 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2347
2348 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2349
2350 /*
2351 * Configure RX buffer size.
2352 */
2353 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2354 RTE_PKTMBUF_HEADROOM);
2355 if (buf_size >= 1024) {
2356 /*
2357 * Configure the BSIZEPACKET field of the SRRCTL
2358 * register of the queue.
2359 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2360 * If this field is equal to 0b, then RCTL.BSIZE
2361 * determines the RX packet buffer size.
2362 */
2363 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2364 E1000_SRRCTL_BSIZEPKT_MASK);
2365 buf_size = (uint16_t) ((srrctl &
2366 E1000_SRRCTL_BSIZEPKT_MASK) <<
2367 E1000_SRRCTL_BSIZEPKT_SHIFT);
2368
2369 /* It adds dual VLAN length for supporting dual VLAN */
2370 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2371 2 * VLAN_TAG_SIZE) > buf_size){
2372 if (!dev->data->scattered_rx)
2373 PMD_INIT_LOG(DEBUG,
2374 "forcing scatter mode");
2375 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2376 dev->data->scattered_rx = 1;
2377 }
2378 } else {
2379 /*
2380 * Use BSIZE field of the device RCTL register.
2381 */
2382 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2383 rctl_bsize = buf_size;
2384 if (!dev->data->scattered_rx)
2385 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2386 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2387 dev->data->scattered_rx = 1;
2388 }
2389
2390 /* Set if packets are dropped when no descriptors available */
2391 if (rxq->drop_en)
2392 srrctl |= E1000_SRRCTL_DROP_EN;
2393
2394 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2395
2396 /* Enable this RX queue. */
2397 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2398 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2399 rxdctl &= 0xFFF00000;
2400 rxdctl |= (rxq->pthresh & 0x1F);
2401 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2402 if (hw->mac.type == e1000_vfadapt) {
2403 /*
2404 * Workaround of 82576 VF Erratum
2405 * force set WTHRESH to 1
2406 * to avoid Write-Back not triggered sometimes
2407 */
2408 rxdctl |= 0x10000;
2409 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2410 }
2411 else
2412 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2413 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2414 }
2415
2416 if (dev->data->dev_conf.rxmode.enable_scatter) {
2417 if (!dev->data->scattered_rx)
2418 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2419 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2420 dev->data->scattered_rx = 1;
2421 }
2422
2423 /*
2424 * Setup the HW Rx Head and Tail Descriptor Pointers.
2425 * This needs to be done after enable.
2426 */
2427 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2428 rxq = dev->data->rx_queues[i];
2429 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2430 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2431 }
2432
2433 return 0;
2434 }
2435
2436 /*********************************************************************
2437 *
2438 * Enable VF transmit unit.
2439 *
2440 **********************************************************************/
2441 void
2442 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2443 {
2444 struct e1000_hw *hw;
2445 struct igb_tx_queue *txq;
2446 uint32_t txdctl;
2447 uint16_t i;
2448
2449 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2450
2451 /* Setup the Base and Length of the Tx Descriptor Rings. */
2452 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2453 uint64_t bus_addr;
2454
2455 txq = dev->data->tx_queues[i];
2456 bus_addr = txq->tx_ring_phys_addr;
2457 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2458 txq->nb_tx_desc *
2459 sizeof(union e1000_adv_tx_desc));
2460 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2461 (uint32_t)(bus_addr >> 32));
2462 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2463
2464 /* Setup the HW Tx Head and Tail descriptor pointers. */
2465 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2466 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2467
2468 /* Setup Transmit threshold registers. */
2469 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2470 txdctl |= txq->pthresh & 0x1F;
2471 txdctl |= ((txq->hthresh & 0x1F) << 8);
2472 if (hw->mac.type == e1000_82576) {
2473 /*
2474 * Workaround of 82576 VF Erratum
2475 * force set WTHRESH to 1
2476 * to avoid Write-Back not triggered sometimes
2477 */
2478 txdctl |= 0x10000;
2479 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2480 }
2481 else
2482 txdctl |= ((txq->wthresh & 0x1F) << 16);
2483 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2484 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2485 }
2486
2487 }
2488
2489 void
2490 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2491 struct rte_eth_rxq_info *qinfo)
2492 {
2493 struct igb_rx_queue *rxq;
2494
2495 rxq = dev->data->rx_queues[queue_id];
2496
2497 qinfo->mp = rxq->mb_pool;
2498 qinfo->scattered_rx = dev->data->scattered_rx;
2499 qinfo->nb_desc = rxq->nb_rx_desc;
2500
2501 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2502 qinfo->conf.rx_drop_en = rxq->drop_en;
2503 }
2504
2505 void
2506 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2507 struct rte_eth_txq_info *qinfo)
2508 {
2509 struct igb_tx_queue *txq;
2510
2511 txq = dev->data->tx_queues[queue_id];
2512
2513 qinfo->nb_desc = txq->nb_tx_desc;
2514
2515 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2516 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2517 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2518 }