4 * Copyright(c) 2014-2015 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
70 static inline void ship_tx_pkt_coalesce_wr(struct adapter
*adap
,
71 struct sge_eth_txq
*txq
);
74 * Max number of Rx buffers we replenish at a time.
76 #define MAX_RX_REFILL 64U
78 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
81 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
84 #define MAX_IMM_TX_PKT_LEN 256
87 * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
88 * per mbuf buffer). We currently only support two sizes for 1500- and
89 * 9000-byte MTUs. We could easily support more but there doesn't seem to be
90 * much need for that ...
92 #define FL_MTU_SMALL 1500
93 #define FL_MTU_LARGE 9000
95 static inline unsigned int fl_mtu_bufsize(struct adapter
*adapter
,
98 struct sge
*s
= &adapter
->sge
;
100 return CXGBE_ALIGN(s
->pktshift
+ ETHER_HDR_LEN
+ VLAN_HLEN
+ mtu
,
104 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
105 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
108 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
109 * these to specify the buffer size as an index into the SGE Free List Buffer
110 * Size register array. We also use bit 4, when the buffer has been unmapped
111 * for DMA, but this is of course never sent to the hardware and is only used
112 * to prevent double unmappings. All of the above requires that the Free List
113 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
114 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
115 * Free List Buffer alignment is 32 bytes, this works out for us ...
118 RX_BUF_FLAGS
= 0x1f, /* bottom five bits are special */
119 RX_BUF_SIZE
= 0x0f, /* bottom three bits are for buf sizes */
120 RX_UNMAPPED_BUF
= 0x10, /* buffer is not mapped */
123 * XXX We shouldn't depend on being able to use these indices.
124 * XXX Especially when some other Master PF has initialized the
125 * XXX adapter or we use the Firmware Configuration File. We
126 * XXX should really search through the Host Buffer Size register
127 * XXX array for the appropriately sized buffer indices.
129 RX_SMALL_PG_BUF
= 0x0, /* small (PAGE_SIZE) page buffer */
130 RX_LARGE_PG_BUF
= 0x1, /* buffer large page buffer */
132 RX_SMALL_MTU_BUF
= 0x2, /* small MTU buffer */
133 RX_LARGE_MTU_BUF
= 0x3, /* large MTU buffer */
137 * txq_avail - return the number of available slots in a Tx queue
140 * Returns the number of descriptors in a Tx queue available to write new
143 static inline unsigned int txq_avail(const struct sge_txq
*q
)
145 return q
->size
- 1 - q
->in_use
;
148 static int map_mbuf(struct rte_mbuf
*mbuf
, dma_addr_t
*addr
)
150 struct rte_mbuf
*m
= mbuf
;
152 for (; m
; m
= m
->next
, addr
++) {
153 *addr
= m
->buf_physaddr
+ rte_pktmbuf_headroom(m
);
164 * free_tx_desc - reclaims Tx descriptors and their buffers
165 * @q: the Tx queue to reclaim descriptors from
166 * @n: the number of descriptors to reclaim
168 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
169 * Tx buffers. Called with the Tx queue lock held.
171 static void free_tx_desc(struct sge_txq
*q
, unsigned int n
)
173 struct tx_sw_desc
*d
;
174 unsigned int cidx
= 0;
178 if (d
->mbuf
) { /* an SGL is present */
179 rte_pktmbuf_free(d
->mbuf
);
182 if (d
->coalesce
.idx
) {
185 for (i
= 0; i
< d
->coalesce
.idx
; i
++) {
186 rte_pktmbuf_free(d
->coalesce
.mbuf
[i
]);
187 d
->coalesce
.mbuf
[i
] = NULL
;
192 if (++cidx
== q
->size
) {
196 RTE_MBUF_PREFETCH_TO_FREE(&q
->sdesc
->mbuf
->pool
);
200 static void reclaim_tx_desc(struct sge_txq
*q
, unsigned int n
)
202 struct tx_sw_desc
*d
;
203 unsigned int cidx
= q
->cidx
;
207 if (d
->mbuf
) { /* an SGL is present */
208 rte_pktmbuf_free(d
->mbuf
);
212 if (++cidx
== q
->size
) {
221 * fl_cap - return the capacity of a free-buffer list
224 * Returns the capacity of a free-buffer list. The capacity is less than
225 * the size because one descriptor needs to be left unpopulated, otherwise
226 * HW will think the FL is empty.
228 static inline unsigned int fl_cap(const struct sge_fl
*fl
)
230 return fl
->size
- 8; /* 1 descriptor = 8 buffers */
234 * fl_starving - return whether a Free List is starving.
235 * @adapter: pointer to the adapter
238 * Tests specified Free List to see whether the number of buffers
239 * available to the hardware has falled below our "starvation"
242 static inline bool fl_starving(const struct adapter
*adapter
,
243 const struct sge_fl
*fl
)
245 const struct sge
*s
= &adapter
->sge
;
247 return fl
->avail
- fl
->pend_cred
<= s
->fl_starve_thres
;
250 static inline unsigned int get_buf_size(struct adapter
*adapter
,
251 const struct rx_sw_desc
*d
)
253 unsigned int rx_buf_size_idx
= d
->dma_addr
& RX_BUF_SIZE
;
254 unsigned int buf_size
= 0;
256 switch (rx_buf_size_idx
) {
257 case RX_SMALL_MTU_BUF
:
258 buf_size
= FL_MTU_SMALL_BUFSIZE(adapter
);
261 case RX_LARGE_MTU_BUF
:
262 buf_size
= FL_MTU_LARGE_BUFSIZE(adapter
);
274 * free_rx_bufs - free the Rx buffers on an SGE free list
275 * @q: the SGE free list to free buffers from
276 * @n: how many buffers to free
278 * Release the next @n buffers on an SGE free-buffer Rx queue. The
279 * buffers must be made inaccessible to HW before calling this function.
281 static void free_rx_bufs(struct sge_fl
*q
, int n
)
283 unsigned int cidx
= q
->cidx
;
284 struct rx_sw_desc
*d
;
289 rte_pktmbuf_free(d
->buf
);
293 if (++cidx
== q
->size
) {
303 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
304 * @q: the SGE free list
306 * Unmap the current buffer on an SGE free-buffer Rx queue. The
307 * buffer must be made inaccessible to HW before calling this function.
309 * This is similar to @free_rx_bufs above but does not free the buffer.
310 * Do note that the FL still loses any further access to the buffer.
312 static void unmap_rx_buf(struct sge_fl
*q
)
314 if (++q
->cidx
== q
->size
)
319 static inline void ring_fl_db(struct adapter
*adap
, struct sge_fl
*q
)
321 if (q
->pend_cred
>= 64) {
322 u32 val
= adap
->params
.arch
.sge_fl_db
;
324 if (is_t4(adap
->params
.chip
))
325 val
|= V_PIDX(q
->pend_cred
/ 8);
327 val
|= V_PIDX_T5(q
->pend_cred
/ 8);
330 * Make sure all memory writes to the Free List queue are
331 * committed before we tell the hardware about them.
336 * If we don't have access to the new User Doorbell (T5+), use
337 * the old doorbell mechanism; otherwise use the new BAR2
340 if (unlikely(!q
->bar2_addr
)) {
341 t4_write_reg_relaxed(adap
, MYPF_REG(A_SGE_PF_KDOORBELL
),
342 val
| V_QID(q
->cntxt_id
));
344 writel_relaxed(val
| V_QID(q
->bar2_qid
),
345 (void *)((uintptr_t)q
->bar2_addr
+
349 * This Write memory Barrier will force the write to
350 * the User Doorbell area to be flushed.
358 static inline void set_rx_sw_desc(struct rx_sw_desc
*sd
, void *buf
,
362 sd
->dma_addr
= mapping
; /* includes size low bits */
366 * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
368 * @q: the ring to refill
369 * @n: the number of new buffers to allocate
371 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
372 * allocated with the supplied gfp flags. The caller must assure that
373 * @n does not exceed the queue's capacity. If afterwards the queue is
374 * found critically low mark it as starving in the bitmap of starving FLs.
376 * Returns the number of buffers allocated.
378 static unsigned int refill_fl_usembufs(struct adapter
*adap
, struct sge_fl
*q
,
381 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, fl
);
382 unsigned int cred
= q
->avail
;
383 __be64
*d
= &q
->desc
[q
->pidx
];
384 struct rx_sw_desc
*sd
= &q
->sdesc
[q
->pidx
];
385 unsigned int buf_size_idx
= RX_SMALL_MTU_BUF
;
386 struct rte_mbuf
*buf_bulk
[n
];
388 struct rte_pktmbuf_pool_private
*mbp_priv
;
389 u8 jumbo_en
= rxq
->rspq
.eth_dev
->data
->dev_conf
.rxmode
.jumbo_frame
;
391 /* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */
392 mbp_priv
= rte_mempool_get_priv(rxq
->rspq
.mb_pool
);
394 ((mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
) >= 9000))
395 buf_size_idx
= RX_LARGE_MTU_BUF
;
397 ret
= rte_mempool_get_bulk(rxq
->rspq
.mb_pool
, (void *)buf_bulk
, n
);
398 if (unlikely(ret
!= 0)) {
399 dev_debug(adap
, "%s: failed to allocated fl entries in bulk ..\n",
402 rxq
->rspq
.eth_dev
->data
->rx_mbuf_alloc_failed
++;
406 for (i
= 0; i
< n
; i
++) {
407 struct rte_mbuf
*mbuf
= buf_bulk
[i
];
411 dev_debug(adap
, "%s: mbuf alloc failed\n", __func__
);
413 rxq
->rspq
.eth_dev
->data
->rx_mbuf_alloc_failed
++;
417 rte_mbuf_refcnt_set(mbuf
, 1);
418 mbuf
->data_off
= RTE_PKTMBUF_HEADROOM
;
421 mbuf
->port
= rxq
->rspq
.port_id
;
423 mapping
= (dma_addr_t
)(mbuf
->buf_physaddr
+ mbuf
->data_off
);
424 mapping
|= buf_size_idx
;
425 *d
++ = cpu_to_be64(mapping
);
426 set_rx_sw_desc(sd
, mbuf
, mapping
);
430 if (++q
->pidx
== q
->size
) {
437 out
: cred
= q
->avail
- cred
;
438 q
->pend_cred
+= cred
;
441 if (unlikely(fl_starving(adap
, q
))) {
443 * Make sure data has been written to free list
453 * refill_fl - refill an SGE Rx buffer ring with mbufs
455 * @q: the ring to refill
456 * @n: the number of new buffers to allocate
458 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
459 * allocated with the supplied gfp flags. The caller must assure that
460 * @n does not exceed the queue's capacity. Returns the number of buffers
463 static unsigned int refill_fl(struct adapter
*adap
, struct sge_fl
*q
, int n
)
465 return refill_fl_usembufs(adap
, q
, n
);
468 static inline void __refill_fl(struct adapter
*adap
, struct sge_fl
*fl
)
470 refill_fl(adap
, fl
, min(MAX_RX_REFILL
, fl_cap(fl
) - fl
->avail
));
474 * Return the number of reclaimable descriptors in a Tx queue.
476 static inline int reclaimable(const struct sge_txq
*q
)
478 int hw_cidx
= ntohs(q
->stat
->cidx
);
482 return hw_cidx
+ q
->size
;
487 * reclaim_completed_tx - reclaims completed Tx descriptors
488 * @q: the Tx queue to reclaim completed descriptors from
490 * Reclaims Tx descriptors that the SGE has indicated it has processed.
492 void reclaim_completed_tx(struct sge_txq
*q
)
494 unsigned int avail
= reclaimable(q
);
497 /* reclaim as much as possible */
498 reclaim_tx_desc(q
, avail
);
500 avail
= reclaimable(q
);
505 * sgl_len - calculates the size of an SGL of the given capacity
506 * @n: the number of SGL entries
508 * Calculates the number of flits needed for a scatter/gather list that
509 * can hold the given number of entries.
511 static inline unsigned int sgl_len(unsigned int n
)
514 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
515 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
516 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
517 * repeated sequences of { Length[i], Length[i+1], Address[i],
518 * Address[i+1] } (this ensures that all addresses are on 64-bit
519 * boundaries). If N is even, then Length[N+1] should be set to 0 and
520 * Address[N+1] is omitted.
522 * The following calculation incorporates all of the above. It's
523 * somewhat hard to follow but, briefly: the "+2" accounts for the
524 * first two flits which include the DSGL header, Length0 and
525 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
526 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
527 * finally the "+((n-1)&1)" adds the one remaining flit needed if
531 return (3 * n
) / 2 + (n
& 1) + 2;
535 * flits_to_desc - returns the num of Tx descriptors for the given flits
536 * @n: the number of flits
538 * Returns the number of Tx descriptors needed for the supplied number
541 static inline unsigned int flits_to_desc(unsigned int n
)
543 return DIV_ROUND_UP(n
, 8);
547 * is_eth_imm - can an Ethernet packet be sent as immediate data?
550 * Returns whether an Ethernet packet is small enough to fit as
551 * immediate data. Return value corresponds to the headroom required.
553 static inline int is_eth_imm(const struct rte_mbuf
*m
)
555 unsigned int hdrlen
= (m
->ol_flags
& PKT_TX_TCP_SEG
) ?
556 sizeof(struct cpl_tx_pkt_lso_core
) : 0;
558 hdrlen
+= sizeof(struct cpl_tx_pkt
);
559 if (m
->pkt_len
<= MAX_IMM_TX_PKT_LEN
- hdrlen
)
566 * calc_tx_flits - calculate the number of flits for a packet Tx WR
569 * Returns the number of flits needed for a Tx WR for the given Ethernet
570 * packet, including the needed WR and CPL headers.
572 static inline unsigned int calc_tx_flits(const struct rte_mbuf
*m
)
578 * If the mbuf is small enough, we can pump it out as a work request
579 * with only immediate data. In that case we just have to have the
580 * TX Packet header plus the mbuf data in the Work Request.
583 hdrlen
= is_eth_imm(m
);
585 return DIV_ROUND_UP(m
->pkt_len
+ hdrlen
, sizeof(__be64
));
588 * Otherwise, we're going to have to construct a Scatter gather list
589 * of the mbuf body and fragments. We also include the flits necessary
590 * for the TX Packet Work Request and CPL. We always have a firmware
591 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
592 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
593 * message or, if we're doing a Large Send Offload, an LSO CPL message
594 * with an embeded TX Packet Write CPL message.
596 flits
= sgl_len(m
->nb_segs
);
598 flits
+= (sizeof(struct fw_eth_tx_pkt_wr
) +
599 sizeof(struct cpl_tx_pkt_lso_core
) +
600 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
602 flits
+= (sizeof(struct fw_eth_tx_pkt_wr
) +
603 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
608 * write_sgl - populate a scatter/gather list for a packet
610 * @q: the Tx queue we are writing into
611 * @sgl: starting location for writing the SGL
612 * @end: points right after the end of the SGL
613 * @start: start offset into mbuf main-body data to include in the SGL
614 * @addr: address of mapped region
616 * Generates a scatter/gather list for the buffers that make up a packet.
617 * The caller must provide adequate space for the SGL that will be written.
618 * The SGL includes all of the packet's page fragments and the data in its
619 * main body except for the first @start bytes. @sgl must be 16-byte
620 * aligned and within a Tx descriptor with available space. @end points
621 * write after the end of the SGL but does not account for any potential
622 * wrap around, i.e., @end > @sgl.
624 static void write_sgl(struct rte_mbuf
*mbuf
, struct sge_txq
*q
,
625 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
626 const dma_addr_t
*addr
)
629 struct ulptx_sge_pair
*to
;
630 struct rte_mbuf
*m
= mbuf
;
631 unsigned int nfrags
= m
->nb_segs
;
632 struct ulptx_sge_pair buf
[nfrags
/ 2];
634 len
= m
->data_len
- start
;
635 sgl
->len0
= htonl(len
);
636 sgl
->addr0
= rte_cpu_to_be_64(addr
[0]);
638 sgl
->cmd_nsge
= htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL
) |
639 V_ULPTX_NSGE(nfrags
));
640 if (likely(--nfrags
== 0))
643 * Most of the complexity below deals with the possibility we hit the
644 * end of the queue in the middle of writing the SGL. For this case
645 * only we create the SGL in a temporary buffer and then copy it.
647 to
= (u8
*)end
> (u8
*)q
->stat
? buf
: sgl
->sge
;
649 for (i
= 0; nfrags
>= 2; nfrags
-= 2, to
++) {
651 to
->len
[0] = rte_cpu_to_be_32(m
->data_len
);
652 to
->addr
[0] = rte_cpu_to_be_64(addr
[++i
]);
654 to
->len
[1] = rte_cpu_to_be_32(m
->data_len
);
655 to
->addr
[1] = rte_cpu_to_be_64(addr
[++i
]);
659 to
->len
[0] = rte_cpu_to_be_32(m
->data_len
);
660 to
->len
[1] = rte_cpu_to_be_32(0);
661 to
->addr
[0] = rte_cpu_to_be_64(addr
[i
+ 1]);
663 if (unlikely((u8
*)end
> (u8
*)q
->stat
)) {
664 unsigned int part0
= RTE_PTR_DIFF((u8
*)q
->stat
,
669 memcpy(sgl
->sge
, buf
, part0
);
670 part1
= RTE_PTR_DIFF((u8
*)end
, (u8
*)q
->stat
);
671 rte_memcpy(q
->desc
, RTE_PTR_ADD((u8
*)buf
, part0
), part1
);
672 end
= RTE_PTR_ADD((void *)q
->desc
, part1
);
674 if ((uintptr_t)end
& 8) /* 0-pad to multiple of 16 */
678 #define IDXDIFF(head, tail, wrap) \
679 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
681 #define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
682 #define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
685 * ring_tx_db - ring a Tx queue's doorbell
688 * @n: number of new descriptors to give to HW
690 * Ring the doorbel for a Tx queue.
692 static inline void ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
)
694 int n
= Q_IDXDIFF(q
, dbidx
);
697 * Make sure that all writes to the TX Descriptors are committed
698 * before we tell the hardware about them.
703 * If we don't have access to the new User Doorbell (T5+), use the old
704 * doorbell mechanism; otherwise use the new BAR2 mechanism.
706 if (unlikely(!q
->bar2_addr
)) {
710 * For T4 we need to participate in the Doorbell Recovery
714 t4_write_reg(adap
, MYPF_REG(A_SGE_PF_KDOORBELL
),
715 V_QID(q
->cntxt_id
) | val
);
718 q
->db_pidx
= q
->pidx
;
720 u32 val
= V_PIDX_T5(n
);
723 * T4 and later chips share the same PIDX field offset within
724 * the doorbell, but T5 and later shrank the field in order to
725 * gain a bit for Doorbell Priority. The field was absurdly
726 * large in the first place (14 bits) so we just use the T5
727 * and later limits and warn if a Queue ID is too large.
729 WARN_ON(val
& F_DBPRIO
);
731 writel(val
| V_QID(q
->bar2_qid
),
732 (void *)((uintptr_t)q
->bar2_addr
+ SGE_UDB_KDOORBELL
));
735 * This Write Memory Barrier will force the write to the User
736 * Doorbell area to be flushed. This is needed to prevent
737 * writes on different CPUs for the same queue from hitting
738 * the adapter out of order. This is required when some Work
739 * Requests take the Write Combine Gather Buffer path (user
740 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
741 * take the traditional path where we simply increment the
742 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
743 * hardware DMA read the actual Work Request.
751 * Figure out what HW csum a packet wants and return the appropriate control
754 static u64
hwcsum(enum chip_type chip
, const struct rte_mbuf
*m
)
758 if (m
->ol_flags
& PKT_TX_IP_CKSUM
) {
759 switch (m
->ol_flags
& PKT_TX_L4_MASK
) {
760 case PKT_TX_TCP_CKSUM
:
761 csum_type
= TX_CSUM_TCPIP
;
763 case PKT_TX_UDP_CKSUM
:
764 csum_type
= TX_CSUM_UDPIP
;
773 if (likely(csum_type
>= TX_CSUM_TCPIP
)) {
774 int hdr_len
= V_TXPKT_IPHDR_LEN(m
->l3_len
);
775 int eth_hdr_len
= m
->l2_len
;
777 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
778 hdr_len
|= V_TXPKT_ETHHDR_LEN(eth_hdr_len
);
780 hdr_len
|= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len
);
781 return V_TXPKT_CSUM_TYPE(csum_type
) | hdr_len
;
785 * unknown protocol, disable HW csum
786 * and hope a bad packet is detected
788 return F_TXPKT_L4CSUM_DIS
;
791 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
795 if (q
->pidx
>= q
->size
)
799 #define MAX_COALESCE_LEN 64000
801 static inline int wraps_around(struct sge_txq
*q
, int ndesc
)
803 return (q
->pidx
+ ndesc
) > q
->size
? 1 : 0;
806 static void tx_timer_cb(void *data
)
808 struct adapter
*adap
= (struct adapter
*)data
;
809 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[0];
811 unsigned int coal_idx
;
813 /* monitor any pending tx */
814 for (i
= 0; i
< adap
->sge
.max_ethqsets
; i
++, txq
++) {
815 if (t4_os_trylock(&txq
->txq_lock
)) {
816 coal_idx
= txq
->q
.coalesce
.idx
;
818 if (coal_idx
== txq
->q
.last_coal_idx
&&
819 txq
->q
.pidx
== txq
->q
.last_pidx
) {
820 ship_tx_pkt_coalesce_wr(adap
, txq
);
822 txq
->q
.last_coal_idx
= coal_idx
;
823 txq
->q
.last_pidx
= txq
->q
.pidx
;
826 t4_os_unlock(&txq
->txq_lock
);
829 rte_eal_alarm_set(50, tx_timer_cb
, (void *)adap
);
833 * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
834 * @ adap: adapter structure
837 * writes the different fields of the pkts WR and sends it.
839 static inline void ship_tx_pkt_coalesce_wr(struct adapter
*adap
,
840 struct sge_eth_txq
*txq
)
843 struct sge_txq
*q
= &txq
->q
;
844 struct fw_eth_tx_pkts_wr
*wr
;
847 /* fill the pkts WR header */
848 wr
= (void *)&q
->desc
[q
->pidx
];
849 wr
->op_pkd
= htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR
));
851 wr_mid
= V_FW_WR_LEN16(DIV_ROUND_UP(q
->coalesce
.flits
, 2));
852 ndesc
= flits_to_desc(q
->coalesce
.flits
);
853 wr
->equiq_to_len16
= htonl(wr_mid
);
854 wr
->plen
= cpu_to_be16(q
->coalesce
.len
);
855 wr
->npkt
= q
->coalesce
.idx
;
857 wr
->type
= q
->coalesce
.type
;
859 /* zero out coalesce structure members */
861 q
->coalesce
.flits
= 0;
864 txq_advance(q
, ndesc
);
865 txq
->stats
.coal_wr
++;
866 txq
->stats
.coal_pkts
+= wr
->npkt
;
868 if (Q_IDXDIFF(q
, equeidx
) >= q
->size
/ 2) {
869 q
->equeidx
= q
->pidx
;
870 wr_mid
|= F_FW_WR_EQUEQ
;
871 wr
->equiq_to_len16
= htonl(wr_mid
);
877 * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
878 * @txq: tx queue where the mbuf is sent
879 * @mbuf: mbuf to be sent
880 * @nflits: return value for number of flits needed
881 * @adap: adapter structure
883 * This function decides if a packet should be coalesced or not.
885 static inline int should_tx_packet_coalesce(struct sge_eth_txq
*txq
,
886 struct rte_mbuf
*mbuf
,
887 unsigned int *nflits
,
888 struct adapter
*adap
)
890 struct sge_txq
*q
= &txq
->q
;
891 unsigned int flits
, ndesc
;
892 unsigned char type
= 0;
895 /* use coal WR type 1 when no frags are present */
896 type
= (mbuf
->nb_segs
== 1) ? 1 : 0;
898 if (unlikely(type
!= q
->coalesce
.type
&& q
->coalesce
.idx
))
899 ship_tx_pkt_coalesce_wr(adap
, txq
);
901 /* calculate the number of flits required for coalescing this packet
902 * without the 2 flits of the WR header. These are added further down
903 * if we are just starting in new PKTS WR. sgl_len doesn't account for
904 * the possible 16 bytes alignment ULP TX commands so we do it here.
906 flits
= (sgl_len(mbuf
->nb_segs
) + 1) & ~1U;
908 flits
+= (sizeof(struct ulp_txpkt
) +
909 sizeof(struct ulptx_idata
)) / sizeof(__be64
);
910 flits
+= sizeof(struct cpl_tx_pkt_core
) / sizeof(__be64
);
913 /* If coalescing is on, the mbuf is added to a pkts WR */
914 if (q
->coalesce
.idx
) {
915 ndesc
= DIV_ROUND_UP(q
->coalesce
.flits
+ flits
, 8);
916 credits
= txq_avail(q
) - ndesc
;
918 /* If we are wrapping or this is last mbuf then, send the
919 * already coalesced mbufs and let the non-coalesce pass
922 if (unlikely(credits
< 0 || wraps_around(q
, ndesc
))) {
923 ship_tx_pkt_coalesce_wr(adap
, txq
);
927 /* If the max coalesce len or the max WR len is reached
928 * ship the WR and keep coalescing on.
930 if (unlikely((q
->coalesce
.len
+ mbuf
->pkt_len
>
932 (q
->coalesce
.flits
+ flits
>
934 ship_tx_pkt_coalesce_wr(adap
, txq
);
941 /* start a new pkts WR, the WR header is not filled below */
942 flits
+= sizeof(struct fw_eth_tx_pkts_wr
) / sizeof(__be64
);
943 ndesc
= flits_to_desc(q
->coalesce
.flits
+ flits
);
944 credits
= txq_avail(q
) - ndesc
;
946 if (unlikely(credits
< 0 || wraps_around(q
, ndesc
)))
948 q
->coalesce
.flits
+= 2;
949 q
->coalesce
.type
= type
;
950 q
->coalesce
.ptr
= (unsigned char *)&q
->desc
[q
->pidx
] +
956 * tx_do_packet_coalesce - add an mbuf to a coalesce WR
957 * @txq: sge_eth_txq used send the mbuf
958 * @mbuf: mbuf to be sent
959 * @flits: flits needed for this mbuf
960 * @adap: adapter structure
961 * @pi: port_info structure
962 * @addr: mapped address of the mbuf
964 * Adds an mbuf to be sent as part of a coalesce WR by filling a
965 * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
966 * ulp_tx_sc_dsgl command.
968 static inline int tx_do_packet_coalesce(struct sge_eth_txq
*txq
,
969 struct rte_mbuf
*mbuf
,
970 int flits
, struct adapter
*adap
,
971 const struct port_info
*pi
,
975 struct sge_txq
*q
= &txq
->q
;
976 struct ulp_txpkt
*mc
;
977 struct ulptx_idata
*sc_imm
;
978 struct cpl_tx_pkt_core
*cpl
;
979 struct tx_sw_desc
*sd
;
980 unsigned int idx
= q
->coalesce
.idx
, len
= mbuf
->pkt_len
;
982 if (q
->coalesce
.type
== 0) {
983 mc
= (struct ulp_txpkt
*)q
->coalesce
.ptr
;
984 mc
->cmd_dest
= htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
985 V_ULP_TXPKT_FID(adap
->sge
.fw_evtq
.cntxt_id
) |
987 mc
->len
= htonl(DIV_ROUND_UP(flits
, 2));
988 sc_imm
= (struct ulptx_idata
*)(mc
+ 1);
989 sc_imm
->cmd_more
= htonl(V_ULPTX_CMD(ULP_TX_SC_IMM
) |
991 sc_imm
->len
= htonl(sizeof(*cpl
));
992 end
= (u64
*)mc
+ flits
;
993 cpl
= (struct cpl_tx_pkt_core
*)(sc_imm
+ 1);
995 end
= (u64
*)q
->coalesce
.ptr
+ flits
;
996 cpl
= (struct cpl_tx_pkt_core
*)q
->coalesce
.ptr
;
999 /* update coalesce structure for this txq */
1000 q
->coalesce
.flits
+= flits
;
1001 q
->coalesce
.ptr
+= flits
* sizeof(__be64
);
1002 q
->coalesce
.len
+= mbuf
->pkt_len
;
1004 /* fill the cpl message, same as in t4_eth_xmit, this should be kept
1005 * similar to t4_eth_xmit
1007 if (mbuf
->ol_flags
& PKT_TX_IP_CKSUM
) {
1008 cntrl
= hwcsum(adap
->params
.chip
, mbuf
) |
1010 txq
->stats
.tx_cso
++;
1012 cntrl
= F_TXPKT_L4CSUM_DIS
| F_TXPKT_IPCSUM_DIS
;
1015 if (mbuf
->ol_flags
& PKT_TX_VLAN_PKT
) {
1016 txq
->stats
.vlan_ins
++;
1017 cntrl
|= F_TXPKT_VLAN_VLD
| V_TXPKT_VLAN(mbuf
->vlan_tci
);
1020 cpl
->ctrl0
= htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT
) |
1021 V_TXPKT_INTF(pi
->tx_chan
) |
1022 V_TXPKT_PF(adap
->pf
));
1023 cpl
->pack
= htons(0);
1024 cpl
->len
= htons(len
);
1025 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1026 write_sgl(mbuf
, q
, (struct ulptx_sgl
*)(cpl
+ 1), end
, 0, addr
);
1028 txq
->stats
.tx_bytes
+= len
;
1030 sd
= &q
->sdesc
[q
->pidx
+ (idx
>> 1)];
1032 if (sd
->coalesce
.idx
) {
1035 for (i
= 0; i
< sd
->coalesce
.idx
; i
++) {
1036 rte_pktmbuf_free(sd
->coalesce
.mbuf
[i
]);
1037 sd
->coalesce
.mbuf
[i
] = NULL
;
1042 /* store pointers to the mbuf and the sgl used in free_tx_desc.
1043 * each tx desc can hold two pointers corresponding to the value
1044 * of ETH_COALESCE_PKT_PER_DESC
1046 sd
->coalesce
.mbuf
[idx
& 1] = mbuf
;
1047 sd
->coalesce
.sgl
[idx
& 1] = (struct ulptx_sgl
*)(cpl
+ 1);
1048 sd
->coalesce
.idx
= (idx
& 1) + 1;
1050 /* send the coaelsced work request if max reached */
1051 if (++q
->coalesce
.idx
== ETH_COALESCE_PKT_NUM
)
1052 ship_tx_pkt_coalesce_wr(adap
, txq
);
1057 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1058 * @txq: the egress queue
1061 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1063 int t4_eth_xmit(struct sge_eth_txq
*txq
, struct rte_mbuf
*mbuf
)
1065 const struct port_info
*pi
;
1066 struct cpl_tx_pkt_lso_core
*lso
;
1067 struct adapter
*adap
;
1068 struct rte_mbuf
*m
= mbuf
;
1069 struct fw_eth_tx_pkt_wr
*wr
;
1070 struct cpl_tx_pkt_core
*cpl
;
1071 struct tx_sw_desc
*d
;
1072 dma_addr_t addr
[m
->nb_segs
];
1073 unsigned int flits
, ndesc
, cflits
;
1074 int l3hdr_len
, l4hdr_len
, eth_xtra_len
;
1080 u32 max_pkt_len
= txq
->eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
;
1082 /* Reject xmit if queue is stopped */
1083 if (unlikely(txq
->flags
& EQ_STOPPED
))
1087 * The chip min packet length is 10 octets but play safe and reject
1088 * anything shorter than an Ethernet header.
1090 if (unlikely(m
->pkt_len
< ETHER_HDR_LEN
)) {
1092 rte_pktmbuf_free(m
);
1096 if ((!(m
->ol_flags
& PKT_TX_TCP_SEG
)) &&
1097 (unlikely(m
->pkt_len
> max_pkt_len
)))
1100 pi
= (struct port_info
*)txq
->eth_dev
->data
->dev_private
;
1103 cntrl
= F_TXPKT_L4CSUM_DIS
| F_TXPKT_IPCSUM_DIS
;
1104 /* align the end of coalesce WR to a 512 byte boundary */
1105 txq
->q
.coalesce
.max
= (8 - (txq
->q
.pidx
& 7)) * 8;
1107 if (!((m
->ol_flags
& PKT_TX_TCP_SEG
) || (m
->pkt_len
> ETHER_MAX_LEN
))) {
1108 if (should_tx_packet_coalesce(txq
, mbuf
, &cflits
, adap
)) {
1109 if (unlikely(map_mbuf(mbuf
, addr
) < 0)) {
1110 dev_warn(adap
, "%s: mapping err for coalesce\n",
1112 txq
->stats
.mapping_err
++;
1115 rte_prefetch0((volatile void *)addr
);
1116 return tx_do_packet_coalesce(txq
, mbuf
, cflits
, adap
,
1123 if (txq
->q
.coalesce
.idx
)
1124 ship_tx_pkt_coalesce_wr(adap
, txq
);
1126 flits
= calc_tx_flits(m
);
1127 ndesc
= flits_to_desc(flits
);
1128 credits
= txq_avail(&txq
->q
) - ndesc
;
1130 if (unlikely(credits
< 0)) {
1131 dev_debug(adap
, "%s: Tx ring %u full; credits = %d\n",
1132 __func__
, txq
->q
.cntxt_id
, credits
);
1136 if (unlikely(map_mbuf(m
, addr
) < 0)) {
1137 txq
->stats
.mapping_err
++;
1141 wr_mid
= V_FW_WR_LEN16(DIV_ROUND_UP(flits
, 2));
1142 if (Q_IDXDIFF(&txq
->q
, equeidx
) >= 64) {
1143 txq
->q
.equeidx
= txq
->q
.pidx
;
1144 wr_mid
|= F_FW_WR_EQUEQ
;
1147 wr
= (void *)&txq
->q
.desc
[txq
->q
.pidx
];
1148 wr
->equiq_to_len16
= htonl(wr_mid
);
1149 wr
->r3
= rte_cpu_to_be_64(0);
1150 end
= (u64
*)wr
+ flits
;
1153 len
+= sizeof(*cpl
);
1155 /* Coalescing skipped and we send through normal path */
1156 if (!(m
->ol_flags
& PKT_TX_TCP_SEG
)) {
1157 wr
->op_immdlen
= htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR
) |
1158 V_FW_WR_IMMDLEN(len
));
1159 cpl
= (void *)(wr
+ 1);
1160 if (m
->ol_flags
& PKT_TX_IP_CKSUM
) {
1161 cntrl
= hwcsum(adap
->params
.chip
, m
) |
1163 txq
->stats
.tx_cso
++;
1166 lso
= (void *)(wr
+ 1);
1167 v6
= (m
->ol_flags
& PKT_TX_IPV6
) != 0;
1168 l3hdr_len
= m
->l3_len
;
1169 l4hdr_len
= m
->l4_len
;
1170 eth_xtra_len
= m
->l2_len
- ETHER_HDR_LEN
;
1171 len
+= sizeof(*lso
);
1172 wr
->op_immdlen
= htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR
) |
1173 V_FW_WR_IMMDLEN(len
));
1174 lso
->lso_ctrl
= htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO
) |
1175 F_LSO_FIRST_SLICE
| F_LSO_LAST_SLICE
|
1177 V_LSO_ETHHDR_LEN(eth_xtra_len
/ 4) |
1178 V_LSO_IPHDR_LEN(l3hdr_len
/ 4) |
1179 V_LSO_TCPHDR_LEN(l4hdr_len
/ 4));
1180 lso
->ipid_ofst
= htons(0);
1181 lso
->mss
= htons(m
->tso_segsz
);
1182 lso
->seqno_offset
= htonl(0);
1183 if (is_t4(adap
->params
.chip
))
1184 lso
->len
= htonl(m
->pkt_len
);
1186 lso
->len
= htonl(V_LSO_T5_XFER_SIZE(m
->pkt_len
));
1187 cpl
= (void *)(lso
+ 1);
1188 cntrl
= V_TXPKT_CSUM_TYPE(v6
? TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
1189 V_TXPKT_IPHDR_LEN(l3hdr_len
) |
1190 V_TXPKT_ETHHDR_LEN(eth_xtra_len
);
1192 txq
->stats
.tx_cso
+= m
->tso_segsz
;
1195 if (m
->ol_flags
& PKT_TX_VLAN_PKT
) {
1196 txq
->stats
.vlan_ins
++;
1197 cntrl
|= F_TXPKT_VLAN_VLD
| V_TXPKT_VLAN(m
->vlan_tci
);
1200 cpl
->ctrl0
= htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT
) |
1201 V_TXPKT_INTF(pi
->tx_chan
) |
1202 V_TXPKT_PF(adap
->pf
));
1203 cpl
->pack
= htons(0);
1204 cpl
->len
= htons(m
->pkt_len
);
1205 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1208 txq
->stats
.tx_bytes
+= m
->pkt_len
;
1209 last_desc
= txq
->q
.pidx
+ ndesc
- 1;
1210 if (last_desc
>= (int)txq
->q
.size
)
1211 last_desc
-= txq
->q
.size
;
1213 d
= &txq
->q
.sdesc
[last_desc
];
1214 if (d
->coalesce
.idx
) {
1217 for (i
= 0; i
< d
->coalesce
.idx
; i
++) {
1218 rte_pktmbuf_free(d
->coalesce
.mbuf
[i
]);
1219 d
->coalesce
.mbuf
[i
] = NULL
;
1221 d
->coalesce
.idx
= 0;
1223 write_sgl(m
, &txq
->q
, (struct ulptx_sgl
*)(cpl
+ 1), end
, 0,
1225 txq
->q
.sdesc
[last_desc
].mbuf
= m
;
1226 txq
->q
.sdesc
[last_desc
].sgl
= (struct ulptx_sgl
*)(cpl
+ 1);
1227 txq_advance(&txq
->q
, ndesc
);
1228 ring_tx_db(adap
, &txq
->q
);
1233 * alloc_ring - allocate resources for an SGE descriptor ring
1234 * @dev: the PCI device's core device
1235 * @nelem: the number of descriptors
1236 * @elem_size: the size of each descriptor
1237 * @sw_size: the size of the SW state associated with each ring element
1238 * @phys: the physical address of the allocated ring
1239 * @metadata: address of the array holding the SW state for the ring
1240 * @stat_size: extra space in HW ring for status information
1241 * @node: preferred node for memory allocations
1243 * Allocates resources for an SGE descriptor ring, such as Tx queues,
1244 * free buffer lists, or response queues. Each SGE ring requires
1245 * space for its HW descriptors plus, optionally, space for the SW state
1246 * associated with each HW entry (the metadata). The function returns
1247 * three values: the virtual address for the HW ring (the return value
1248 * of the function), the bus address of the HW ring, and the address
1251 static void *alloc_ring(size_t nelem
, size_t elem_size
,
1252 size_t sw_size
, dma_addr_t
*phys
, void *metadata
,
1253 size_t stat_size
, __rte_unused
uint16_t queue_id
,
1254 int socket_id
, const char *z_name
,
1255 const char *z_name_sw
)
1257 size_t len
= CXGBE_MAX_RING_DESC_SIZE
* elem_size
+ stat_size
;
1258 const struct rte_memzone
*tz
;
1261 dev_debug(adapter
, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
1262 "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
1263 " z_name_sw = %s\n", __func__
, nelem
, elem_size
, sw_size
,
1264 stat_size
, queue_id
, socket_id
, z_name
, z_name_sw
);
1266 tz
= rte_memzone_lookup(z_name
);
1268 dev_debug(adapter
, "%s: tz exists...returning existing..\n",
1274 * Allocate TX/RX ring hardware descriptors. A memzone large enough to
1275 * handle the maximum ring size is allocated in order to allow for
1276 * resizing in later calls to the queue setup function.
1278 tz
= rte_memzone_reserve_aligned(z_name
, len
, socket_id
, 0, 4096);
1283 memset(tz
->addr
, 0, len
);
1285 s
= rte_zmalloc_socket(z_name_sw
, nelem
* sw_size
,
1286 RTE_CACHE_LINE_SIZE
, socket_id
);
1289 dev_err(adapter
, "%s: failed to get sw_ring memory\n",
1295 *(void **)metadata
= s
;
1297 *phys
= (uint64_t)tz
->phys_addr
;
1302 * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
1303 * @gl: the gather list
1305 * Builds an mbuf from the given packet gather list. Returns the mbuf or
1306 * %NULL if mbuf allocation failed.
1308 static struct rte_mbuf
*t4_pktgl_to_mbuf_usembufs(const struct pkt_gl
*gl
)
1311 * If there's only one mbuf fragment, just return that.
1313 if (likely(gl
->nfrags
== 1))
1314 return gl
->mbufs
[0];
1320 * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
1321 * @gl: the gather list
1323 * Builds an mbuf from the given packet gather list. Returns the mbuf or
1324 * %NULL if mbuf allocation failed.
1326 static struct rte_mbuf
*t4_pktgl_to_mbuf(const struct pkt_gl
*gl
)
1328 return t4_pktgl_to_mbuf_usembufs(gl
);
1332 * t4_ethrx_handler - process an ingress ethernet packet
1333 * @q: the response queue that received the packet
1334 * @rsp: the response queue descriptor holding the RX_PKT message
1335 * @si: the gather list of packet fragments
1337 * Process an ingress ethernet packet and deliver it to the stack.
1339 int t4_ethrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
1340 const struct pkt_gl
*si
)
1342 struct rte_mbuf
*mbuf
;
1343 const struct cpl_rx_pkt
*pkt
;
1344 const struct rss_header
*rss_hdr
;
1346 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
1348 rss_hdr
= (const void *)rsp
;
1349 pkt
= (const void *)&rsp
[1];
1350 csum_ok
= pkt
->csum_calc
&& !pkt
->err_vec
;
1352 mbuf
= t4_pktgl_to_mbuf(si
);
1353 if (unlikely(!mbuf
)) {
1354 rxq
->stats
.rx_drops
++;
1358 mbuf
->port
= pkt
->iff
;
1359 if (pkt
->l2info
& htonl(F_RXF_IP
)) {
1360 mbuf
->packet_type
= RTE_PTYPE_L3_IPV4
;
1361 if (unlikely(!csum_ok
))
1362 mbuf
->ol_flags
|= PKT_RX_IP_CKSUM_BAD
;
1364 if ((pkt
->l2info
& htonl(F_RXF_UDP
| F_RXF_TCP
)) && !csum_ok
)
1365 mbuf
->ol_flags
|= PKT_RX_L4_CKSUM_BAD
;
1366 } else if (pkt
->l2info
& htonl(F_RXF_IP6
)) {
1367 mbuf
->packet_type
= RTE_PTYPE_L3_IPV6
;
1370 mbuf
->port
= pkt
->iff
;
1372 if (!rss_hdr
->filter_tid
&& rss_hdr
->hash_type
) {
1373 mbuf
->ol_flags
|= PKT_RX_RSS_HASH
;
1374 mbuf
->hash
.rss
= ntohl(rss_hdr
->hash_val
);
1378 mbuf
->ol_flags
|= PKT_RX_VLAN_PKT
;
1379 mbuf
->vlan_tci
= ntohs(pkt
->vlan
);
1382 rxq
->stats
.rx_bytes
+= mbuf
->pkt_len
;
1388 * is_new_response - check if a response is newly written
1389 * @r: the response descriptor
1390 * @q: the response queue
1392 * Returns true if a response descriptor contains a yet unprocessed
1395 static inline bool is_new_response(const struct rsp_ctrl
*r
,
1396 const struct sge_rspq
*q
)
1398 return (r
->u
.type_gen
>> S_RSPD_GEN
) == q
->gen
;
1401 #define CXGB4_MSG_AN ((void *)1)
1404 * rspq_next - advance to the next entry in a response queue
1407 * Updates the state of a response queue to advance it to the next entry.
1409 static inline void rspq_next(struct sge_rspq
*q
)
1411 q
->cur_desc
= (const __be64
*)((const char *)q
->cur_desc
+ q
->iqe_len
);
1412 if (unlikely(++q
->cidx
== q
->size
)) {
1415 q
->cur_desc
= q
->desc
;
1420 * process_responses - process responses from an SGE response queue
1421 * @q: the ingress queue to process
1422 * @budget: how many responses can be processed in this round
1423 * @rx_pkts: mbuf to put the pkts
1425 * Process responses from an SGE response queue up to the supplied budget.
1426 * Responses include received packets as well as control messages from FW
1429 * Additionally choose the interrupt holdoff time for the next interrupt
1430 * on this queue. If the system is under memory shortage use a fairly
1431 * long delay to help recovery.
1433 static int process_responses(struct sge_rspq
*q
, int budget
,
1434 struct rte_mbuf
**rx_pkts
)
1436 int ret
= 0, rsp_type
;
1437 int budget_left
= budget
;
1438 const struct rsp_ctrl
*rc
;
1439 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
1441 while (likely(budget_left
)) {
1442 rc
= (const struct rsp_ctrl
*)
1443 ((const char *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
)));
1445 if (!is_new_response(rc
, q
))
1449 * Ensure response has been read
1452 rsp_type
= G_RSPD_TYPE(rc
->u
.type_gen
);
1454 if (likely(rsp_type
== X_RSPD_TYPE_FLBUF
)) {
1455 const struct rx_sw_desc
*rsd
=
1456 &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
1457 const struct rss_header
*rss_hdr
=
1458 (const void *)q
->cur_desc
;
1459 const struct cpl_rx_pkt
*cpl
=
1460 (const void *)&q
->cur_desc
[1];
1461 bool csum_ok
= cpl
->csum_calc
&& !cpl
->err_vec
;
1462 struct rte_mbuf
*pkt
, *npkt
;
1465 len
= ntohl(rc
->pldbuflen_qid
);
1466 BUG_ON(!(len
& F_RSPD_NEWBUF
));
1469 len
= G_RSPD_LEN(len
);
1472 /* Chain mbufs into len if necessary */
1474 struct rte_mbuf
*new_pkt
= rsd
->buf
;
1476 bufsz
= min(get_buf_size(q
->adapter
, rsd
), len
);
1477 new_pkt
->data_len
= bufsz
;
1478 unmap_rx_buf(&rxq
->fl
);
1480 npkt
->next
= new_pkt
;
1483 rsd
= &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
1488 if (cpl
->l2info
& htonl(F_RXF_IP
)) {
1489 pkt
->packet_type
= RTE_PTYPE_L3_IPV4
;
1490 if (unlikely(!csum_ok
))
1491 pkt
->ol_flags
|= PKT_RX_IP_CKSUM_BAD
;
1494 htonl(F_RXF_UDP
| F_RXF_TCP
)) && !csum_ok
)
1495 pkt
->ol_flags
|= PKT_RX_L4_CKSUM_BAD
;
1496 } else if (cpl
->l2info
& htonl(F_RXF_IP6
)) {
1497 pkt
->packet_type
= RTE_PTYPE_L3_IPV6
;
1500 if (!rss_hdr
->filter_tid
&& rss_hdr
->hash_type
) {
1501 pkt
->ol_flags
|= PKT_RX_RSS_HASH
;
1502 pkt
->hash
.rss
= ntohl(rss_hdr
->hash_val
);
1506 pkt
->ol_flags
|= PKT_RX_VLAN_PKT
;
1507 pkt
->vlan_tci
= ntohs(cpl
->vlan
);
1510 rxq
->stats
.rx_bytes
+= pkt
->pkt_len
;
1511 rx_pkts
[budget
- budget_left
] = pkt
;
1512 } else if (likely(rsp_type
== X_RSPD_TYPE_CPL
)) {
1513 ret
= q
->handler(q
, q
->cur_desc
, NULL
);
1515 ret
= q
->handler(q
, (const __be64
*)rc
, CXGB4_MSG_AN
);
1518 if (unlikely(ret
)) {
1519 /* couldn't process descriptor, back off for recovery */
1520 q
->next_intr_params
= V_QINTR_TIMER_IDX(NOMEM_TMR_IDX
);
1527 if (R_IDXDIFF(q
, gts_idx
) >= 64) {
1528 unsigned int cidx_inc
= R_IDXDIFF(q
, gts_idx
);
1529 unsigned int params
;
1532 if (fl_cap(&rxq
->fl
) - rxq
->fl
.avail
>= 64)
1533 __refill_fl(q
->adapter
, &rxq
->fl
);
1534 params
= V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX
);
1535 q
->next_intr_params
= params
;
1536 val
= V_CIDXINC(cidx_inc
) | V_SEINTARM(params
);
1538 if (unlikely(!q
->bar2_addr
))
1539 t4_write_reg(q
->adapter
, MYPF_REG(A_SGE_PF_GTS
),
1541 V_INGRESSQID((u32
)q
->cntxt_id
));
1543 writel(val
| V_INGRESSQID(q
->bar2_qid
),
1544 (void *)((uintptr_t)q
->bar2_addr
+
1547 * This Write memory Barrier will force the
1548 * write to the User Doorbell area to be
1553 q
->gts_idx
= q
->cidx
;
1558 * If this is a Response Queue with an associated Free List and
1559 * there's room for another chunk of new Free List buffer pointers,
1560 * refill the Free List.
1563 if (q
->offset
>= 0 && fl_cap(&rxq
->fl
) - rxq
->fl
.avail
>= 64)
1564 __refill_fl(q
->adapter
, &rxq
->fl
);
1566 return budget
- budget_left
;
1569 int cxgbe_poll(struct sge_rspq
*q
, struct rte_mbuf
**rx_pkts
,
1570 unsigned int budget
, unsigned int *work_done
)
1574 *work_done
= process_responses(q
, budget
, rx_pkts
);
1579 * bar2_address - return the BAR2 address for an SGE Queue's Registers
1580 * @adapter: the adapter
1581 * @qid: the SGE Queue ID
1582 * @qtype: the SGE Queue Type (Egress or Ingress)
1583 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
1585 * Returns the BAR2 address for the SGE Queue Registers associated with
1586 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
1587 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
1588 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
1589 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
1591 static void __iomem
*bar2_address(struct adapter
*adapter
, unsigned int qid
,
1592 enum t4_bar2_qtype qtype
,
1593 unsigned int *pbar2_qid
)
1598 ret
= t4_bar2_sge_qregs(adapter
, qid
, qtype
, &bar2_qoffset
, pbar2_qid
);
1602 return adapter
->bar2
+ bar2_qoffset
;
1605 int t4_sge_eth_rxq_start(struct adapter
*adap
, struct sge_rspq
*rq
)
1607 struct sge_eth_rxq
*rxq
= container_of(rq
, struct sge_eth_rxq
, rspq
);
1608 unsigned int fl_id
= rxq
->fl
.size
? rxq
->fl
.cntxt_id
: 0xffff;
1610 return t4_iq_start_stop(adap
, adap
->mbox
, true, adap
->pf
, 0,
1611 rq
->cntxt_id
, fl_id
, 0xffff);
1614 int t4_sge_eth_rxq_stop(struct adapter
*adap
, struct sge_rspq
*rq
)
1616 struct sge_eth_rxq
*rxq
= container_of(rq
, struct sge_eth_rxq
, rspq
);
1617 unsigned int fl_id
= rxq
->fl
.size
? rxq
->fl
.cntxt_id
: 0xffff;
1619 return t4_iq_start_stop(adap
, adap
->mbox
, false, adap
->pf
, 0,
1620 rq
->cntxt_id
, fl_id
, 0xffff);
1624 * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
1625 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
1627 int t4_sge_alloc_rxq(struct adapter
*adap
, struct sge_rspq
*iq
, bool fwevtq
,
1628 struct rte_eth_dev
*eth_dev
, int intr_idx
,
1629 struct sge_fl
*fl
, rspq_handler_t hnd
, int cong
,
1630 struct rte_mempool
*mp
, int queue_id
, int socket_id
)
1634 struct sge
*s
= &adap
->sge
;
1635 struct port_info
*pi
= (struct port_info
*)(eth_dev
->data
->dev_private
);
1636 char z_name
[RTE_MEMZONE_NAMESIZE
];
1637 char z_name_sw
[RTE_MEMZONE_NAMESIZE
];
1638 unsigned int nb_refill
;
1640 /* Size needs to be multiple of 16, including status entry. */
1641 iq
->size
= cxgbe_roundup(iq
->size
, 16);
1643 snprintf(z_name
, sizeof(z_name
), "%s_%s_%d_%d",
1644 eth_dev
->data
->drv_name
,
1645 fwevtq
? "fwq_ring" : "rx_ring",
1646 eth_dev
->data
->port_id
, queue_id
);
1647 snprintf(z_name_sw
, sizeof(z_name_sw
), "%s_sw_ring", z_name
);
1649 iq
->desc
= alloc_ring(iq
->size
, iq
->iqe_len
, 0, &iq
->phys_addr
, NULL
, 0,
1650 queue_id
, socket_id
, z_name
, z_name_sw
);
1654 memset(&c
, 0, sizeof(c
));
1655 c
.op_to_vfn
= htonl(V_FW_CMD_OP(FW_IQ_CMD
) | F_FW_CMD_REQUEST
|
1656 F_FW_CMD_WRITE
| F_FW_CMD_EXEC
|
1657 V_FW_IQ_CMD_PFN(adap
->pf
) | V_FW_IQ_CMD_VFN(0));
1658 c
.alloc_to_len16
= htonl(F_FW_IQ_CMD_ALLOC
| F_FW_IQ_CMD_IQSTART
|
1660 c
.type_to_iqandstindex
=
1661 htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP
) |
1662 V_FW_IQ_CMD_IQASYNCH(fwevtq
) |
1663 V_FW_IQ_CMD_VIID(pi
->viid
) |
1664 V_FW_IQ_CMD_IQANDST(intr_idx
< 0) |
1665 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT
) |
1666 V_FW_IQ_CMD_IQANDSTINDEX(intr_idx
>= 0 ? intr_idx
:
1668 c
.iqdroprss_to_iqesize
=
1669 htons(V_FW_IQ_CMD_IQPCIECH(pi
->tx_chan
) |
1670 F_FW_IQ_CMD_IQGTSMODE
|
1671 V_FW_IQ_CMD_IQINTCNTTHRESH(iq
->pktcnt_idx
) |
1672 V_FW_IQ_CMD_IQESIZE(ilog2(iq
->iqe_len
) - 4));
1673 c
.iqsize
= htons(iq
->size
);
1674 c
.iqaddr
= cpu_to_be64(iq
->phys_addr
);
1676 c
.iqns_to_fl0congen
= htonl(F_FW_IQ_CMD_IQFLINTCONGEN
);
1679 struct sge_eth_rxq
*rxq
= container_of(fl
, struct sge_eth_rxq
,
1681 enum chip_type chip
= (enum chip_type
)CHELSIO_CHIP_VERSION(
1685 * Allocate the ring for the hardware free list (with space
1686 * for its status page) along with the associated software
1687 * descriptor ring. The free list size needs to be a multiple
1688 * of the Egress Queue Unit and at least 2 Egress Units larger
1689 * than the SGE's Egress Congrestion Threshold
1690 * (fl_starve_thres - 1).
1692 if (fl
->size
< s
->fl_starve_thres
- 1 + 2 * 8)
1693 fl
->size
= s
->fl_starve_thres
- 1 + 2 * 8;
1694 fl
->size
= cxgbe_roundup(fl
->size
, 8);
1696 snprintf(z_name
, sizeof(z_name
), "%s_%s_%d_%d",
1697 eth_dev
->data
->drv_name
,
1698 fwevtq
? "fwq_ring" : "fl_ring",
1699 eth_dev
->data
->port_id
, queue_id
);
1700 snprintf(z_name_sw
, sizeof(z_name_sw
), "%s_sw_ring", z_name
);
1702 fl
->desc
= alloc_ring(fl
->size
, sizeof(__be64
),
1703 sizeof(struct rx_sw_desc
),
1704 &fl
->addr
, &fl
->sdesc
, s
->stat_len
,
1705 queue_id
, socket_id
, z_name
, z_name_sw
);
1710 flsz
= fl
->size
/ 8 + s
->stat_len
/ sizeof(struct tx_desc
);
1711 c
.iqns_to_fl0congen
|=
1712 htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE
) |
1713 (unlikely(rxq
->usembufs
) ?
1714 0 : F_FW_IQ_CMD_FL0PACKEN
) |
1715 F_FW_IQ_CMD_FL0FETCHRO
| F_FW_IQ_CMD_FL0DATARO
|
1716 F_FW_IQ_CMD_FL0PADEN
);
1718 c
.iqns_to_fl0congen
|=
1719 htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong
) |
1720 F_FW_IQ_CMD_FL0CONGCIF
|
1721 F_FW_IQ_CMD_FL0CONGEN
);
1723 /* In T6, for egress queue type FL there is internal overhead
1724 * of 16B for header going into FLM module.
1725 * Hence maximum allowed burst size will be 448 bytes.
1727 c
.fl0dcaen_to_fl0cidxfthresh
=
1728 htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B
) |
1729 V_FW_IQ_CMD_FL0FBMAX((chip
<= CHELSIO_T5
) ?
1730 X_FETCHBURSTMAX_512B
: X_FETCHBURSTMAX_256B
));
1731 c
.fl0size
= htons(flsz
);
1732 c
.fl0addr
= cpu_to_be64(fl
->addr
);
1735 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
1739 iq
->cur_desc
= iq
->desc
;
1743 iq
->next_intr_params
= iq
->intr_params
;
1744 iq
->cntxt_id
= ntohs(c
.iqid
);
1745 iq
->abs_id
= ntohs(c
.physiqid
);
1746 iq
->bar2_addr
= bar2_address(adap
, iq
->cntxt_id
, T4_BAR2_QTYPE_INGRESS
,
1748 iq
->size
--; /* subtract status entry */
1749 iq
->eth_dev
= eth_dev
;
1751 iq
->port_id
= pi
->port_id
;
1754 /* set offset to -1 to distinguish ingress queues without FL */
1755 iq
->offset
= fl
? 0 : -1;
1758 fl
->cntxt_id
= ntohs(c
.fl0id
);
1763 fl
->alloc_failed
= 0;
1766 * Note, we must initialize the BAR2 Free List User Doorbell
1767 * information before refilling the Free List!
1769 fl
->bar2_addr
= bar2_address(adap
, fl
->cntxt_id
,
1770 T4_BAR2_QTYPE_EGRESS
,
1773 nb_refill
= refill_fl(adap
, fl
, fl_cap(fl
));
1774 if (nb_refill
!= fl_cap(fl
)) {
1776 dev_err(adap
, "%s: mbuf alloc failed with error: %d\n",
1783 * For T5 and later we attempt to set up the Congestion Manager values
1784 * of the new RX Ethernet Queue. This should really be handled by
1785 * firmware because it's more complex than any host driver wants to
1786 * get involved with and it's different per chip and this is almost
1787 * certainly wrong. Formware would be wrong as well, but it would be
1788 * a lot easier to fix in one place ... For now we do something very
1789 * simple (and hopefully less wrong).
1791 if (!is_t4(adap
->params
.chip
) && cong
>= 0) {
1795 param
= (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ
) |
1796 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT
) |
1797 V_FW_PARAMS_PARAM_YZ(iq
->cntxt_id
));
1799 val
= V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE
);
1801 val
= V_CONMCTXT_CNGTPMODE(
1802 X_CONMCTXT_CNGTPMODE_CHANNEL
);
1803 for (i
= 0; i
< 4; i
++) {
1804 if (cong
& (1 << i
))
1805 val
|= V_CONMCTXT_CNGCHMAP(1 <<
1809 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
1812 dev_warn(adap
->pdev_dev
, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
1813 iq
->cntxt_id
, -ret
);
1819 t4_iq_free(adap
, adap
->mbox
, adap
->pf
, 0, FW_IQ_TYPE_FL_INT_CAP
,
1820 iq
->cntxt_id
, fl
->cntxt_id
, 0xffff);
1829 if (fl
&& fl
->desc
) {
1830 rte_free(fl
->sdesc
);
1838 static void init_txq(struct adapter
*adap
, struct sge_txq
*q
, unsigned int id
)
1841 q
->bar2_addr
= bar2_address(adap
, q
->cntxt_id
, T4_BAR2_QTYPE_EGRESS
,
1848 q
->coalesce
.idx
= 0;
1849 q
->coalesce
.len
= 0;
1850 q
->coalesce
.flits
= 0;
1851 q
->last_coal_idx
= 0;
1853 q
->stat
= (void *)&q
->desc
[q
->size
];
1856 int t4_sge_eth_txq_start(struct sge_eth_txq
*txq
)
1859 * TODO: For flow-control, queue may be stopped waiting to reclaim
1861 * Ensure queue is in EQ_STOPPED state before starting it.
1863 if (!(txq
->flags
& EQ_STOPPED
))
1866 txq
->flags
&= ~EQ_STOPPED
;
1871 int t4_sge_eth_txq_stop(struct sge_eth_txq
*txq
)
1873 txq
->flags
|= EQ_STOPPED
;
1878 int t4_sge_alloc_eth_txq(struct adapter
*adap
, struct sge_eth_txq
*txq
,
1879 struct rte_eth_dev
*eth_dev
, uint16_t queue_id
,
1880 unsigned int iqid
, int socket_id
)
1883 struct fw_eq_eth_cmd c
;
1884 struct sge
*s
= &adap
->sge
;
1885 struct port_info
*pi
= (struct port_info
*)(eth_dev
->data
->dev_private
);
1886 char z_name
[RTE_MEMZONE_NAMESIZE
];
1887 char z_name_sw
[RTE_MEMZONE_NAMESIZE
];
1889 /* Add status entries */
1890 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
1892 snprintf(z_name
, sizeof(z_name
), "%s_%s_%d_%d",
1893 eth_dev
->data
->drv_name
, "tx_ring",
1894 eth_dev
->data
->port_id
, queue_id
);
1895 snprintf(z_name_sw
, sizeof(z_name_sw
), "%s_sw_ring", z_name
);
1897 txq
->q
.desc
= alloc_ring(txq
->q
.size
, sizeof(struct tx_desc
),
1898 sizeof(struct tx_sw_desc
), &txq
->q
.phys_addr
,
1899 &txq
->q
.sdesc
, s
->stat_len
, queue_id
,
1900 socket_id
, z_name
, z_name_sw
);
1904 memset(&c
, 0, sizeof(c
));
1905 c
.op_to_vfn
= htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD
) | F_FW_CMD_REQUEST
|
1906 F_FW_CMD_WRITE
| F_FW_CMD_EXEC
|
1907 V_FW_EQ_ETH_CMD_PFN(adap
->pf
) |
1908 V_FW_EQ_ETH_CMD_VFN(0));
1909 c
.alloc_to_len16
= htonl(F_FW_EQ_ETH_CMD_ALLOC
|
1910 F_FW_EQ_ETH_CMD_EQSTART
| (sizeof(c
) / 16));
1911 c
.autoequiqe_to_viid
= htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE
|
1912 V_FW_EQ_ETH_CMD_VIID(pi
->viid
));
1913 c
.fetchszm_to_iqid
=
1914 htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE
) |
1915 V_FW_EQ_ETH_CMD_PCIECHN(pi
->tx_chan
) |
1916 F_FW_EQ_ETH_CMD_FETCHRO
| V_FW_EQ_ETH_CMD_IQID(iqid
));
1918 htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B
) |
1919 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B
) |
1920 V_FW_EQ_ETH_CMD_EQSIZE(nentries
));
1921 c
.eqaddr
= rte_cpu_to_be_64(txq
->q
.phys_addr
);
1923 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
1925 rte_free(txq
->q
.sdesc
);
1926 txq
->q
.sdesc
= NULL
;
1931 init_txq(adap
, &txq
->q
, G_FW_EQ_ETH_CMD_EQID(ntohl(c
.eqid_pkd
)));
1933 txq
->stats
.pkts
= 0;
1934 txq
->stats
.tx_cso
= 0;
1935 txq
->stats
.coal_wr
= 0;
1936 txq
->stats
.vlan_ins
= 0;
1937 txq
->stats
.tx_bytes
= 0;
1938 txq
->stats
.coal_pkts
= 0;
1939 txq
->stats
.mapping_err
= 0;
1940 txq
->flags
|= EQ_STOPPED
;
1941 txq
->eth_dev
= eth_dev
;
1942 t4_os_lock_init(&txq
->txq_lock
);
1946 static void free_txq(struct sge_txq
*q
)
1953 static void free_rspq_fl(struct adapter
*adap
, struct sge_rspq
*rq
,
1956 unsigned int fl_id
= fl
? fl
->cntxt_id
: 0xffff;
1958 t4_iq_free(adap
, adap
->mbox
, adap
->pf
, 0, FW_IQ_TYPE_FL_INT_CAP
,
1959 rq
->cntxt_id
, fl_id
, 0xffff);
1965 free_rx_bufs(fl
, fl
->avail
);
1966 rte_free(fl
->sdesc
);
1974 * Clear all queues of the port
1976 * Note: This function must only be called after rx and tx path
1977 * of the port have been disabled.
1979 void t4_sge_eth_clear_queues(struct port_info
*pi
)
1982 struct adapter
*adap
= pi
->adapter
;
1983 struct sge_eth_rxq
*rxq
= &adap
->sge
.ethrxq
[pi
->first_qset
];
1984 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
1986 for (i
= 0; i
< pi
->n_rx_qsets
; i
++, rxq
++) {
1988 t4_sge_eth_rxq_stop(adap
, &rxq
->rspq
);
1990 for (i
= 0; i
< pi
->n_tx_qsets
; i
++, txq
++) {
1992 struct sge_txq
*q
= &txq
->q
;
1994 t4_sge_eth_txq_stop(txq
);
1995 reclaim_completed_tx(q
);
1996 free_tx_desc(q
, q
->size
);
1997 q
->equeidx
= q
->pidx
;
2002 void t4_sge_eth_rxq_release(struct adapter
*adap
, struct sge_eth_rxq
*rxq
)
2004 if (rxq
->rspq
.desc
) {
2005 t4_sge_eth_rxq_stop(adap
, &rxq
->rspq
);
2006 free_rspq_fl(adap
, &rxq
->rspq
, rxq
->fl
.size
? &rxq
->fl
: NULL
);
2010 void t4_sge_eth_txq_release(struct adapter
*adap
, struct sge_eth_txq
*txq
)
2013 t4_sge_eth_txq_stop(txq
);
2014 reclaim_completed_tx(&txq
->q
);
2015 t4_eth_eq_free(adap
, adap
->mbox
, adap
->pf
, 0, txq
->q
.cntxt_id
);
2016 free_tx_desc(&txq
->q
, txq
->q
.size
);
2017 rte_free(txq
->q
.sdesc
);
2022 void t4_sge_tx_monitor_start(struct adapter
*adap
)
2024 rte_eal_alarm_set(50, tx_timer_cb
, (void *)adap
);
2027 void t4_sge_tx_monitor_stop(struct adapter
*adap
)
2029 rte_eal_alarm_cancel(tx_timer_cb
, (void *)adap
);
2033 * t4_free_sge_resources - free SGE resources
2034 * @adap: the adapter
2036 * Frees resources used by the SGE queue sets.
2038 void t4_free_sge_resources(struct adapter
*adap
)
2041 struct sge_eth_rxq
*rxq
= &adap
->sge
.ethrxq
[0];
2042 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[0];
2044 /* clean up Ethernet Tx/Rx queues */
2045 for (i
= 0; i
< adap
->sge
.max_ethqsets
; i
++, rxq
++, txq
++) {
2046 /* Free only the queues allocated */
2047 if (rxq
->rspq
.desc
) {
2048 t4_sge_eth_rxq_release(adap
, rxq
);
2049 rxq
->rspq
.eth_dev
= NULL
;
2052 t4_sge_eth_txq_release(adap
, txq
);
2053 txq
->eth_dev
= NULL
;
2057 if (adap
->sge
.fw_evtq
.desc
)
2058 free_rspq_fl(adap
, &adap
->sge
.fw_evtq
, NULL
);
2062 * t4_sge_init - initialize SGE
2063 * @adap: the adapter
2065 * Performs SGE initialization needed every time after a chip reset.
2066 * We do not initialize any of the queues here, instead the driver
2067 * top-level must request those individually.
2069 * Called in two different modes:
2071 * 1. Perform actual hardware initialization and record hard-coded
2072 * parameters which were used. This gets used when we're the
2073 * Master PF and the Firmware Configuration File support didn't
2074 * work for some reason.
2076 * 2. We're not the Master PF or initialization was performed with
2077 * a Firmware Configuration File. In this case we need to grab
2078 * any of the SGE operating parameters that we need to have in
2079 * order to do our job and make sure we can live with them ...
2081 static int t4_sge_init_soft(struct adapter
*adap
)
2083 struct sge
*s
= &adap
->sge
;
2084 u32 fl_small_pg
, fl_large_pg
, fl_small_mtu
, fl_large_mtu
;
2085 u32 timer_value_0_and_1
, timer_value_2_and_3
, timer_value_4_and_5
;
2086 u32 ingress_rx_threshold
;
2089 * Verify that CPL messages are going to the Ingress Queue for
2090 * process_responses() and that only packet data is going to the
2093 if ((t4_read_reg(adap
, A_SGE_CONTROL
) & F_RXPKTCPLMODE
) !=
2094 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT
)) {
2095 dev_err(adap
, "bad SGE CPL MODE\n");
2100 * Validate the Host Buffer Register Array indices that we want to
2103 * XXX Note that we should really read through the Host Buffer Size
2104 * XXX register array and find the indices of the Buffer Sizes which
2105 * XXX meet our needs!
2107 #define READ_FL_BUF(x) \
2108 t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
2110 fl_small_pg
= READ_FL_BUF(RX_SMALL_PG_BUF
);
2111 fl_large_pg
= READ_FL_BUF(RX_LARGE_PG_BUF
);
2112 fl_small_mtu
= READ_FL_BUF(RX_SMALL_MTU_BUF
);
2113 fl_large_mtu
= READ_FL_BUF(RX_LARGE_MTU_BUF
);
2116 * We only bother using the Large Page logic if the Large Page Buffer
2117 * is larger than our Page Size Buffer.
2119 if (fl_large_pg
<= fl_small_pg
)
2125 * The Page Size Buffer must be exactly equal to our Page Size and the
2126 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2128 if (fl_small_pg
!= CXGBE_PAGE_SIZE
||
2129 (fl_large_pg
& (fl_large_pg
- 1)) != 0) {
2130 dev_err(adap
, "bad SGE FL page buffer sizes [%d, %d]\n",
2131 fl_small_pg
, fl_large_pg
);
2135 s
->fl_pg_order
= ilog2(fl_large_pg
) - PAGE_SHIFT
;
2137 if (adap
->use_unpacked_mode
) {
2140 if (fl_small_mtu
< FL_MTU_SMALL_BUFSIZE(adap
)) {
2141 dev_err(adap
, "bad SGE FL small MTU %d\n",
2145 if (fl_large_mtu
< FL_MTU_LARGE_BUFSIZE(adap
)) {
2146 dev_err(adap
, "bad SGE FL large MTU %d\n",
2155 * Retrieve our RX interrupt holdoff timer values and counter
2156 * threshold values from the SGE parameters.
2158 timer_value_0_and_1
= t4_read_reg(adap
, A_SGE_TIMER_VALUE_0_AND_1
);
2159 timer_value_2_and_3
= t4_read_reg(adap
, A_SGE_TIMER_VALUE_2_AND_3
);
2160 timer_value_4_and_5
= t4_read_reg(adap
, A_SGE_TIMER_VALUE_4_AND_5
);
2161 s
->timer_val
[0] = core_ticks_to_us(adap
,
2162 G_TIMERVALUE0(timer_value_0_and_1
));
2163 s
->timer_val
[1] = core_ticks_to_us(adap
,
2164 G_TIMERVALUE1(timer_value_0_and_1
));
2165 s
->timer_val
[2] = core_ticks_to_us(adap
,
2166 G_TIMERVALUE2(timer_value_2_and_3
));
2167 s
->timer_val
[3] = core_ticks_to_us(adap
,
2168 G_TIMERVALUE3(timer_value_2_and_3
));
2169 s
->timer_val
[4] = core_ticks_to_us(adap
,
2170 G_TIMERVALUE4(timer_value_4_and_5
));
2171 s
->timer_val
[5] = core_ticks_to_us(adap
,
2172 G_TIMERVALUE5(timer_value_4_and_5
));
2174 ingress_rx_threshold
= t4_read_reg(adap
, A_SGE_INGRESS_RX_THRESHOLD
);
2175 s
->counter_val
[0] = G_THRESHOLD_0(ingress_rx_threshold
);
2176 s
->counter_val
[1] = G_THRESHOLD_1(ingress_rx_threshold
);
2177 s
->counter_val
[2] = G_THRESHOLD_2(ingress_rx_threshold
);
2178 s
->counter_val
[3] = G_THRESHOLD_3(ingress_rx_threshold
);
2183 int t4_sge_init(struct adapter
*adap
)
2185 struct sge
*s
= &adap
->sge
;
2186 u32 sge_control
, sge_control2
, sge_conm_ctrl
;
2187 unsigned int ingpadboundary
, ingpackboundary
;
2188 int ret
, egress_threshold
;
2191 * Ingress Padding Boundary and Egress Status Page Size are set up by
2192 * t4_fixup_host_params().
2194 sge_control
= t4_read_reg(adap
, A_SGE_CONTROL
);
2195 s
->pktshift
= G_PKTSHIFT(sge_control
);
2196 s
->stat_len
= (sge_control
& F_EGRSTATUSPAGESIZE
) ? 128 : 64;
2199 * T4 uses a single control field to specify both the PCIe Padding and
2200 * Packing Boundary. T5 introduced the ability to specify these
2201 * separately. The actual Ingress Packet Data alignment boundary
2202 * within Packed Buffer Mode is the maximum of these two
2205 ingpadboundary
= 1 << (G_INGPADBOUNDARY(sge_control
) +
2206 X_INGPADBOUNDARY_SHIFT
);
2207 s
->fl_align
= ingpadboundary
;
2209 if (!is_t4(adap
->params
.chip
) && !adap
->use_unpacked_mode
) {
2211 * T5 has a weird interpretation of one of the PCIe Packing
2212 * Boundary values. No idea why ...
2214 sge_control2
= t4_read_reg(adap
, A_SGE_CONTROL2
);
2215 ingpackboundary
= G_INGPACKBOUNDARY(sge_control2
);
2216 if (ingpackboundary
== X_INGPACKBOUNDARY_16B
)
2217 ingpackboundary
= 16;
2219 ingpackboundary
= 1 << (ingpackboundary
+
2220 X_INGPACKBOUNDARY_SHIFT
);
2222 s
->fl_align
= max(ingpadboundary
, ingpackboundary
);
2225 ret
= t4_sge_init_soft(adap
);
2227 dev_err(adap
, "%s: t4_sge_init_soft failed, error %d\n",
2233 * A FL with <= fl_starve_thres buffers is starving and a periodic
2234 * timer will attempt to refill it. This needs to be larger than the
2235 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2236 * stuck waiting for new packets while the SGE is waiting for us to
2237 * give it more Free List entries. (Note that the SGE's Egress
2238 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2239 * there was only a single field to control this. For T5 there's the
2240 * original field which now only applies to Unpacked Mode Free List
2241 * buffers and a new field which only applies to Packed Mode Free List
2244 sge_conm_ctrl
= t4_read_reg(adap
, A_SGE_CONM_CTRL
);
2245 if (is_t4(adap
->params
.chip
) || adap
->use_unpacked_mode
)
2246 egress_threshold
= G_EGRTHRESHOLD(sge_conm_ctrl
);
2248 egress_threshold
= G_EGRTHRESHOLDPACKING(sge_conm_ctrl
);
2249 s
->fl_starve_thres
= 2 * egress_threshold
+ 1;