1 /* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
2 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * Copyright (c) 2014, Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_prefetch.h>
37 #include "enic_compat.h"
38 #include "rq_enet_desc.h"
40 #include <rte_ether.h>
44 #define RTE_PMD_USE_PREFETCH
46 #ifdef RTE_PMD_USE_PREFETCH
47 /*Prefetch a cache line into all cache levels. */
48 #define rte_enic_prefetch(p) rte_prefetch0(p)
50 #define rte_enic_prefetch(p) do {} while (0)
53 #ifdef RTE_PMD_PACKET_PREFETCH
54 #define rte_packet_prefetch(p) rte_prefetch1(p)
56 #define rte_packet_prefetch(p) do {} while (0)
59 static inline uint16_t
60 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc
*crd
)
62 return le16_to_cpu(crd
->completed_index_flags
) & ~CQ_DESC_COMP_NDX_MASK
;
65 static inline uint16_t
66 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc
*crd
)
68 return le16_to_cpu(crd
->bytes_written_flags
) &
69 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK
;
73 enic_cq_rx_desc_packet_error(uint16_t bwflags
)
75 return (bwflags
& CQ_ENET_RQ_DESC_FLAGS_TRUNCATED
) ==
76 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED
;
80 enic_cq_rx_desc_eop(uint16_t ciflags
)
82 return (ciflags
& CQ_ENET_RQ_DESC_FLAGS_EOP
)
83 == CQ_ENET_RQ_DESC_FLAGS_EOP
;
87 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc
*cqrd
)
89 return (le16_to_cpu(cqrd
->q_number_rss_type_flags
) &
90 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC
) ==
91 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC
;
95 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc
*cqrd
)
97 return (cqrd
->flags
& CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK
) ==
98 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK
;
101 static inline uint8_t
102 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc
*cqrd
)
104 return (cqrd
->flags
& CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK
) ==
105 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK
;
108 static inline uint8_t
109 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc
*cqrd
)
111 return (uint8_t)((le16_to_cpu(cqrd
->q_number_rss_type_flags
) >>
112 CQ_DESC_Q_NUM_BITS
) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK
);
115 static inline uint32_t
116 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc
*cqrd
)
118 return le32_to_cpu(cqrd
->rss_hash
);
121 static inline uint16_t
122 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc
*cqrd
)
124 return le16_to_cpu(cqrd
->vlan
);
127 static inline uint16_t
128 enic_cq_rx_desc_n_bytes(struct cq_desc
*cqd
)
130 struct cq_enet_rq_desc
*cqrd
= (struct cq_enet_rq_desc
*)cqd
;
131 return le16_to_cpu(cqrd
->bytes_written_flags
) &
132 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK
;
135 /* Find the offset to L5. This is needed by enic TSO implementation.
136 * Return 0 if not a TCP packet or can't figure out the length.
138 static inline uint8_t tso_header_len(struct rte_mbuf
*mbuf
)
140 struct ether_hdr
*eh
;
142 struct ipv4_hdr
*ip4
;
143 struct ipv6_hdr
*ip6
;
148 /* offset past Ethernet header */
149 eh
= rte_pktmbuf_mtod(mbuf
, struct ether_hdr
*);
150 ether_type
= eh
->ether_type
;
151 hdr_len
= sizeof(struct ether_hdr
);
152 if (ether_type
== rte_cpu_to_be_16(ETHER_TYPE_VLAN
)) {
153 vh
= rte_pktmbuf_mtod_offset(mbuf
, struct vlan_hdr
*, hdr_len
);
154 ether_type
= vh
->eth_proto
;
155 hdr_len
+= sizeof(struct vlan_hdr
);
158 /* offset past IP header */
159 switch (rte_be_to_cpu_16(ether_type
)) {
160 case ETHER_TYPE_IPv4
:
161 ip4
= rte_pktmbuf_mtod_offset(mbuf
, struct ipv4_hdr
*, hdr_len
);
162 if (ip4
->next_proto_id
!= IPPROTO_TCP
)
164 hdr_len
+= (ip4
->version_ihl
& 0xf) * 4;
166 case ETHER_TYPE_IPv6
:
167 ip6
= rte_pktmbuf_mtod_offset(mbuf
, struct ipv6_hdr
*, hdr_len
);
168 if (ip6
->proto
!= IPPROTO_TCP
)
170 hdr_len
+= sizeof(struct ipv6_hdr
);
176 if ((hdr_len
+ sizeof(struct tcp_hdr
)) > mbuf
->pkt_len
)
179 /* offset past TCP header */
180 th
= rte_pktmbuf_mtod_offset(mbuf
, struct tcp_hdr
*, hdr_len
);
181 hdr_len
+= (th
->data_off
>> 4) * 4;
183 if (hdr_len
> mbuf
->pkt_len
)
189 static inline uint8_t
190 enic_cq_rx_check_err(struct cq_desc
*cqd
)
192 struct cq_enet_rq_desc
*cqrd
= (struct cq_enet_rq_desc
*)cqd
;
195 bwflags
= enic_cq_rx_desc_bwflags(cqrd
);
196 if (unlikely(enic_cq_rx_desc_packet_error(bwflags
)))
201 /* Lookup table to translate RX CQ flags to mbuf flags. */
202 static inline uint32_t
203 enic_cq_rx_flags_to_pkt_type(struct cq_desc
*cqd
)
205 struct cq_enet_rq_desc
*cqrd
= (struct cq_enet_rq_desc
*)cqd
;
206 uint8_t cqrd_flags
= cqrd
->flags
;
207 static const uint32_t cq_type_table
[128] __rte_cache_aligned
= {
208 [0x00] = RTE_PTYPE_UNKNOWN
,
209 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
| RTE_PTYPE_L4_NONFRAG
,
210 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
| RTE_PTYPE_L4_UDP
,
211 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
| RTE_PTYPE_L4_TCP
,
212 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
| RTE_PTYPE_L4_FRAG
,
213 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
| RTE_PTYPE_L4_UDP
,
214 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
| RTE_PTYPE_L4_TCP
,
215 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
| RTE_PTYPE_L4_NONFRAG
,
216 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
| RTE_PTYPE_L4_UDP
,
217 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
| RTE_PTYPE_L4_TCP
,
218 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
| RTE_PTYPE_L4_FRAG
,
219 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
| RTE_PTYPE_L4_UDP
,
220 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
| RTE_PTYPE_L4_TCP
,
221 /* All others reserved */
223 cqrd_flags
&= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
224 | CQ_ENET_RQ_DESC_FLAGS_IPV4
| CQ_ENET_RQ_DESC_FLAGS_IPV6
225 | CQ_ENET_RQ_DESC_FLAGS_TCP
| CQ_ENET_RQ_DESC_FLAGS_UDP
;
226 return cq_type_table
[cqrd_flags
];
230 enic_cq_rx_to_pkt_flags(struct cq_desc
*cqd
, struct rte_mbuf
*mbuf
)
232 struct cq_enet_rq_desc
*cqrd
= (struct cq_enet_rq_desc
*)cqd
;
233 uint16_t ciflags
, bwflags
, pkt_flags
= 0, vlan_tci
;
234 ciflags
= enic_cq_rx_desc_ciflags(cqrd
);
235 bwflags
= enic_cq_rx_desc_bwflags(cqrd
);
236 vlan_tci
= enic_cq_rx_desc_vlan(cqrd
);
240 /* flags are meaningless if !EOP */
241 if (unlikely(!enic_cq_rx_desc_eop(ciflags
)))
242 goto mbuf_flags_done
;
244 /* VLAN STRIPPED flag. The L2 packet type updated here also */
245 if (bwflags
& CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED
) {
246 pkt_flags
|= PKT_RX_VLAN_PKT
| PKT_RX_VLAN_STRIPPED
;
247 mbuf
->packet_type
|= RTE_PTYPE_L2_ETHER
;
250 mbuf
->packet_type
|= RTE_PTYPE_L2_ETHER_VLAN
;
252 mbuf
->packet_type
|= RTE_PTYPE_L2_ETHER
;
254 mbuf
->vlan_tci
= vlan_tci
;
257 if (enic_cq_rx_desc_rss_type(cqrd
)) {
258 pkt_flags
|= PKT_RX_RSS_HASH
;
259 mbuf
->hash
.rss
= enic_cq_rx_desc_rss_hash(cqrd
);
263 if (mbuf
->packet_type
& RTE_PTYPE_L3_IPV4
) {
264 if (enic_cq_rx_desc_csum_not_calc(cqrd
))
265 pkt_flags
|= (PKT_RX_IP_CKSUM_UNKNOWN
&
266 PKT_RX_L4_CKSUM_UNKNOWN
);
269 l4_flags
= mbuf
->packet_type
& RTE_PTYPE_L4_MASK
;
271 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd
))
272 pkt_flags
|= PKT_RX_IP_CKSUM_GOOD
;
274 pkt_flags
|= PKT_RX_IP_CKSUM_BAD
;
276 if (l4_flags
& (RTE_PTYPE_L4_UDP
| RTE_PTYPE_L4_TCP
)) {
277 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd
))
278 pkt_flags
|= PKT_RX_L4_CKSUM_GOOD
;
280 pkt_flags
|= PKT_RX_L4_CKSUM_BAD
;
286 mbuf
->ol_flags
= pkt_flags
;
289 /* dummy receive function to replace actual function in
290 * order to do safe reconfiguration operations.
293 enic_dummy_recv_pkts(__rte_unused
void *rx_queue
,
294 __rte_unused
struct rte_mbuf
**rx_pkts
,
295 __rte_unused
uint16_t nb_pkts
)
301 enic_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
304 struct vnic_rq
*sop_rq
= rx_queue
;
305 struct vnic_rq
*data_rq
;
307 struct enic
*enic
= vnic_dev_priv(sop_rq
->vdev
);
311 struct rte_mbuf
*nmb
, *rxmb
;
314 volatile struct cq_desc
*cqd_ptr
;
317 struct rte_mbuf
*first_seg
= sop_rq
->pkt_first_seg
;
318 struct rte_mbuf
*last_seg
= sop_rq
->pkt_last_seg
;
320 cq
= &enic
->cq
[enic_cq_rq(enic
, sop_rq
->index
)];
321 cq_idx
= cq
->to_clean
; /* index of cqd, rqd, mbuf_table */
322 cqd_ptr
= (struct cq_desc
*)(cq
->ring
.descs
) + cq_idx
;
324 data_rq
= &enic
->rq
[sop_rq
->data_queue_idx
];
326 while (nb_rx
< nb_pkts
) {
327 volatile struct rq_enet_desc
*rqd_ptr
;
330 uint8_t packet_error
;
333 /* Check for pkts available */
334 color
= (cqd_ptr
->type_color
>> CQ_DESC_COLOR_SHIFT
)
335 & CQ_DESC_COLOR_MASK
;
336 if (color
== cq
->last_color
)
339 /* Get the cq descriptor and extract rq info from it */
341 rq_num
= cqd
.q_number
& CQ_DESC_Q_NUM_MASK
;
342 rq_idx
= cqd
.completed_index
& CQ_DESC_COMP_NDX_MASK
;
344 rq
= &enic
->rq
[rq_num
];
345 rqd_ptr
= ((struct rq_enet_desc
*)rq
->ring
.descs
) + rq_idx
;
347 /* allocate a new mbuf */
348 nmb
= rte_mbuf_raw_alloc(rq
->mp
);
350 rte_atomic64_inc(&enic
->soft_stats
.rx_nombuf
);
354 /* A packet error means descriptor and data are untrusted */
355 packet_error
= enic_cq_rx_check_err(&cqd
);
357 /* Get the mbuf to return and replace with one just allocated */
358 rxmb
= rq
->mbuf_ring
[rq_idx
];
359 rq
->mbuf_ring
[rq_idx
] = nmb
;
361 /* Increment cqd, rqd, mbuf_table index */
363 if (unlikely(cq_idx
== cq
->ring
.desc_count
)) {
365 cq
->last_color
= cq
->last_color
? 0 : 1;
368 /* Prefetch next mbuf & desc while processing current one */
369 cqd_ptr
= (struct cq_desc
*)(cq
->ring
.descs
) + cq_idx
;
370 rte_enic_prefetch(cqd_ptr
);
372 ciflags
= enic_cq_rx_desc_ciflags(
373 (struct cq_enet_rq_desc
*)&cqd
);
375 /* Push descriptor for newly allocated mbuf */
376 nmb
->data_off
= RTE_PKTMBUF_HEADROOM
;
377 dma_addr
= (dma_addr_t
)(nmb
->buf_physaddr
+
378 RTE_PKTMBUF_HEADROOM
);
379 rq_enet_desc_enc(rqd_ptr
, dma_addr
,
380 (rq
->is_sop
? RQ_ENET_TYPE_ONLY_SOP
381 : RQ_ENET_TYPE_NOT_SOP
),
382 nmb
->buf_len
- RTE_PKTMBUF_HEADROOM
);
384 /* Fill in the rest of the mbuf */
385 seg_length
= enic_cq_rx_desc_n_bytes(&cqd
);
389 first_seg
->pkt_len
= seg_length
;
391 first_seg
->pkt_len
= (uint16_t)(first_seg
->pkt_len
393 first_seg
->nb_segs
++;
394 last_seg
->next
= rxmb
;
397 rxmb
->port
= enic
->port_id
;
398 rxmb
->data_len
= seg_length
;
402 if (!(enic_cq_rx_desc_eop(ciflags
))) {
407 /* cq rx flags are only valid if eop bit is set */
408 first_seg
->packet_type
= enic_cq_rx_flags_to_pkt_type(&cqd
);
409 enic_cq_rx_to_pkt_flags(&cqd
, first_seg
);
411 if (unlikely(packet_error
)) {
412 rte_pktmbuf_free(first_seg
);
413 rte_atomic64_inc(&enic
->soft_stats
.rx_packet_errors
);
418 /* prefetch mbuf data for caller */
419 rte_packet_prefetch(RTE_PTR_ADD(first_seg
->buf_addr
,
420 RTE_PKTMBUF_HEADROOM
));
422 /* store the mbuf address into the next entry of the array */
423 rx_pkts
[nb_rx
++] = first_seg
;
426 sop_rq
->pkt_first_seg
= first_seg
;
427 sop_rq
->pkt_last_seg
= last_seg
;
429 cq
->to_clean
= cq_idx
;
431 if ((sop_rq
->rx_nb_hold
+ data_rq
->rx_nb_hold
) >
432 sop_rq
->rx_free_thresh
) {
433 if (data_rq
->in_use
) {
434 data_rq
->posted_index
=
435 enic_ring_add(data_rq
->ring
.desc_count
,
436 data_rq
->posted_index
,
437 data_rq
->rx_nb_hold
);
438 data_rq
->rx_nb_hold
= 0;
440 sop_rq
->posted_index
= enic_ring_add(sop_rq
->ring
.desc_count
,
441 sop_rq
->posted_index
,
443 sop_rq
->rx_nb_hold
= 0;
447 iowrite32_relaxed(data_rq
->posted_index
,
448 &data_rq
->ctrl
->posted_index
);
449 rte_compiler_barrier();
450 iowrite32_relaxed(sop_rq
->posted_index
,
451 &sop_rq
->ctrl
->posted_index
);
458 static inline void enic_free_wq_bufs(struct vnic_wq
*wq
, u16 completed_index
)
460 struct vnic_wq_buf
*buf
;
461 struct rte_mbuf
*m
, *free
[ENIC_MAX_WQ_DESCS
];
462 unsigned int nb_to_free
, nb_free
= 0, i
;
463 struct rte_mempool
*pool
;
464 unsigned int tail_idx
;
465 unsigned int desc_count
= wq
->ring
.desc_count
;
467 nb_to_free
= enic_ring_sub(desc_count
, wq
->tail_idx
, completed_index
)
469 tail_idx
= wq
->tail_idx
;
470 buf
= &wq
->bufs
[tail_idx
];
471 pool
= ((struct rte_mbuf
*)buf
->mb
)->pool
;
472 for (i
= 0; i
< nb_to_free
; i
++) {
473 buf
= &wq
->bufs
[tail_idx
];
474 m
= rte_pktmbuf_prefree_seg((struct rte_mbuf
*)(buf
->mb
));
477 if (unlikely(m
== NULL
)) {
478 tail_idx
= enic_ring_incr(desc_count
, tail_idx
);
482 if (likely(m
->pool
== pool
)) {
483 RTE_ASSERT(nb_free
< ENIC_MAX_WQ_DESCS
);
486 rte_mempool_put_bulk(pool
, (void *)free
, nb_free
);
491 tail_idx
= enic_ring_incr(desc_count
, tail_idx
);
494 rte_mempool_put_bulk(pool
, (void **)free
, nb_free
);
496 wq
->tail_idx
= tail_idx
;
497 wq
->ring
.desc_avail
+= nb_to_free
;
500 unsigned int enic_cleanup_wq(__rte_unused
struct enic
*enic
, struct vnic_wq
*wq
)
504 completed_index
= *((uint32_t *)wq
->cqmsg_rz
->addr
) & 0xffff;
506 if (wq
->last_completed_index
!= completed_index
) {
507 enic_free_wq_bufs(wq
, completed_index
);
508 wq
->last_completed_index
= completed_index
;
513 uint16_t enic_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
517 unsigned int pkt_len
, data_len
;
518 unsigned int nb_segs
;
519 struct rte_mbuf
*tx_pkt
;
520 struct vnic_wq
*wq
= (struct vnic_wq
*)tx_queue
;
521 struct enic
*enic
= vnic_dev_priv(wq
->vdev
);
522 unsigned short vlan_id
;
524 uint64_t ol_flags_mask
;
525 unsigned int wq_desc_avail
;
527 struct vnic_wq_buf
*buf
;
528 unsigned int desc_count
;
529 struct wq_enet_desc
*descs
, *desc_p
, desc_tmp
;
531 uint8_t vlan_tag_insert
;
534 uint8_t offload_mode
;
537 enic_cleanup_wq(enic
, wq
);
538 wq_desc_avail
= vnic_wq_desc_avail(wq
);
539 head_idx
= wq
->head_idx
;
540 desc_count
= wq
->ring
.desc_count
;
541 ol_flags_mask
= PKT_TX_VLAN_PKT
| PKT_TX_IP_CKSUM
| PKT_TX_L4_MASK
;
543 nb_pkts
= RTE_MIN(nb_pkts
, ENIC_TX_XMIT_MAX
);
545 for (index
= 0; index
< nb_pkts
; index
++) {
547 pkt_len
= tx_pkt
->pkt_len
;
548 data_len
= tx_pkt
->data_len
;
549 ol_flags
= tx_pkt
->ol_flags
;
550 nb_segs
= tx_pkt
->nb_segs
;
552 if (pkt_len
> ENIC_TX_MAX_PKT_SIZE
) {
553 rte_pktmbuf_free(tx_pkt
);
554 rte_atomic64_inc(&enic
->soft_stats
.tx_oversized
);
558 if (nb_segs
> wq_desc_avail
) {
567 bus_addr
= (dma_addr_t
)
568 (tx_pkt
->buf_physaddr
+ tx_pkt
->data_off
);
570 descs
= (struct wq_enet_desc
*)wq
->ring
.descs
;
571 desc_p
= descs
+ head_idx
;
573 eop
= (data_len
== pkt_len
);
574 offload_mode
= WQ_ENET_OFFLOAD_MODE_CSUM
;
577 if (tx_pkt
->tso_segsz
) {
578 header_len
= tso_header_len(tx_pkt
);
580 offload_mode
= WQ_ENET_OFFLOAD_MODE_TSO
;
581 mss
= tx_pkt
->tso_segsz
;
584 if ((ol_flags
& ol_flags_mask
) && (header_len
== 0)) {
585 if (ol_flags
& PKT_TX_IP_CKSUM
)
586 mss
|= ENIC_CALC_IP_CKSUM
;
588 /* Nic uses just 1 bit for UDP and TCP */
589 switch (ol_flags
& PKT_TX_L4_MASK
) {
590 case PKT_TX_TCP_CKSUM
:
591 case PKT_TX_UDP_CKSUM
:
592 mss
|= ENIC_CALC_TCP_UDP_CKSUM
;
597 if (ol_flags
& PKT_TX_VLAN_PKT
) {
599 vlan_id
= tx_pkt
->vlan_tci
;
602 wq_enet_desc_enc(&desc_tmp
, bus_addr
, data_len
, mss
, header_len
,
603 offload_mode
, eop
, eop
, 0, vlan_tag_insert
,
607 buf
= &wq
->bufs
[head_idx
];
608 buf
->mb
= (void *)tx_pkt
;
609 head_idx
= enic_ring_incr(desc_count
, head_idx
);
613 for (tx_pkt
= tx_pkt
->next
; tx_pkt
; tx_pkt
=
615 data_len
= tx_pkt
->data_len
;
617 if (tx_pkt
->next
== NULL
)
619 desc_p
= descs
+ head_idx
;
620 bus_addr
= (dma_addr_t
)(tx_pkt
->buf_physaddr
622 wq_enet_desc_enc((struct wq_enet_desc
*)
623 &desc_tmp
, bus_addr
, data_len
,
624 mss
, 0, offload_mode
, eop
, eop
,
625 0, vlan_tag_insert
, vlan_id
,
629 buf
= &wq
->bufs
[head_idx
];
630 buf
->mb
= (void *)tx_pkt
;
631 head_idx
= enic_ring_incr(desc_count
, head_idx
);
638 iowrite32_relaxed(head_idx
, &wq
->ctrl
->posted_index
);
640 wq
->ring
.desc_avail
= wq_desc_avail
;
641 wq
->head_idx
= head_idx
;