2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
47 #include <net/busy_poll.h>
48 #ifdef CONFIG_CHELSIO_T4_FCOE
49 #include <scsi/fc/fc_fcoe.h>
50 #endif /* CONFIG_CHELSIO_T4_FCOE */
53 #include "t4_values.h"
56 #include "cxgb4_ptp.h"
57 #include "cxgb4_uld.h"
60 * Rx buffer size. We use largish buffers if possible but settle for single
61 * pages under memory shortage.
64 # define FL_PG_ORDER 0
66 # define FL_PG_ORDER (16 - PAGE_SHIFT)
69 /* RX_PULL_LEN should be <= RX_COPY_THRES */
70 #define RX_COPY_THRES 256
71 #define RX_PULL_LEN 128
74 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
75 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
77 #define RX_PKT_SKB_LEN 512
80 * Max number of Tx descriptors we clean up at a time. Should be modest as
81 * freeing skbs isn't cheap and it happens while holding locks. We just need
82 * to free packets faster than they arrive, we eventually catch up and keep
83 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
85 #define MAX_TX_RECLAIM 16
88 * Max number of Rx buffers we replenish at a time. Again keep this modest,
89 * allocating buffers isn't cheap either.
91 #define MAX_RX_REFILL 16U
94 * Period of the Rx queue check timer. This timer is infrequent as it has
95 * something to do only when the system experiences severe memory shortage.
97 #define RX_QCHECK_PERIOD (HZ / 2)
100 * Period of the Tx queue check timer.
102 #define TX_QCHECK_PERIOD (HZ / 2)
105 * Max number of Tx descriptors to be reclaimed by the Tx timer.
107 #define MAX_TIMER_TX_RECLAIM 100
110 * Timer index used when backing off due to memory shortage.
112 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
115 * Suspension threshold for non-Ethernet Tx queues. We require enough room
116 * for a full sized WR.
118 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
121 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
124 #define MAX_IMM_TX_PKT_LEN 256
127 * Max size of a WR sent through a control Tx queue.
129 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
131 struct rx_sw_desc
{ /* SW state per Rx descriptor */
137 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
138 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
139 * We could easily support more but there doesn't seem to be much need for
142 #define FL_MTU_SMALL 1500
143 #define FL_MTU_LARGE 9000
145 static inline unsigned int fl_mtu_bufsize(struct adapter
*adapter
,
148 struct sge
*s
= &adapter
->sge
;
150 return ALIGN(s
->pktshift
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
, s
->fl_align
);
153 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
154 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
157 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
158 * these to specify the buffer size as an index into the SGE Free List Buffer
159 * Size register array. We also use bit 4, when the buffer has been unmapped
160 * for DMA, but this is of course never sent to the hardware and is only used
161 * to prevent double unmappings. All of the above requires that the Free List
162 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
163 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
164 * Free List Buffer alignment is 32 bytes, this works out for us ...
167 RX_BUF_FLAGS
= 0x1f, /* bottom five bits are special */
168 RX_BUF_SIZE
= 0x0f, /* bottom three bits are for buf sizes */
169 RX_UNMAPPED_BUF
= 0x10, /* buffer is not mapped */
172 * XXX We shouldn't depend on being able to use these indices.
173 * XXX Especially when some other Master PF has initialized the
174 * XXX adapter or we use the Firmware Configuration File. We
175 * XXX should really search through the Host Buffer Size register
176 * XXX array for the appropriately sized buffer indices.
178 RX_SMALL_PG_BUF
= 0x0, /* small (PAGE_SIZE) page buffer */
179 RX_LARGE_PG_BUF
= 0x1, /* buffer large (FL_PG_ORDER) page buffer */
181 RX_SMALL_MTU_BUF
= 0x2, /* small MTU buffer */
182 RX_LARGE_MTU_BUF
= 0x3, /* large MTU buffer */
185 static int timer_pkt_quota
[] = {1, 1, 2, 3, 4, 5};
186 #define MIN_NAPI_WORK 1
188 static inline dma_addr_t
get_buf_addr(const struct rx_sw_desc
*d
)
190 return d
->dma_addr
& ~(dma_addr_t
)RX_BUF_FLAGS
;
193 static inline bool is_buf_mapped(const struct rx_sw_desc
*d
)
195 return !(d
->dma_addr
& RX_UNMAPPED_BUF
);
199 * txq_avail - return the number of available slots in a Tx queue
202 * Returns the number of descriptors in a Tx queue available to write new
205 static inline unsigned int txq_avail(const struct sge_txq
*q
)
207 return q
->size
- 1 - q
->in_use
;
211 * fl_cap - return the capacity of a free-buffer list
214 * Returns the capacity of a free-buffer list. The capacity is less than
215 * the size because one descriptor needs to be left unpopulated, otherwise
216 * HW will think the FL is empty.
218 static inline unsigned int fl_cap(const struct sge_fl
*fl
)
220 return fl
->size
- 8; /* 1 descriptor = 8 buffers */
224 * fl_starving - return whether a Free List is starving.
225 * @adapter: pointer to the adapter
228 * Tests specified Free List to see whether the number of buffers
229 * available to the hardware has falled below our "starvation"
232 static inline bool fl_starving(const struct adapter
*adapter
,
233 const struct sge_fl
*fl
)
235 const struct sge
*s
= &adapter
->sge
;
237 return fl
->avail
- fl
->pend_cred
<= s
->fl_starve_thres
;
240 int cxgb4_map_skb(struct device
*dev
, const struct sk_buff
*skb
,
243 const skb_frag_t
*fp
, *end
;
244 const struct skb_shared_info
*si
;
246 *addr
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
247 if (dma_mapping_error(dev
, *addr
))
250 si
= skb_shinfo(skb
);
251 end
= &si
->frags
[si
->nr_frags
];
253 for (fp
= si
->frags
; fp
< end
; fp
++) {
254 *++addr
= skb_frag_dma_map(dev
, fp
, 0, skb_frag_size(fp
),
256 if (dma_mapping_error(dev
, *addr
))
262 while (fp
-- > si
->frags
)
263 dma_unmap_page(dev
, *--addr
, skb_frag_size(fp
), DMA_TO_DEVICE
);
265 dma_unmap_single(dev
, addr
[-1], skb_headlen(skb
), DMA_TO_DEVICE
);
269 EXPORT_SYMBOL(cxgb4_map_skb
);
271 #ifdef CONFIG_NEED_DMA_MAP_STATE
272 static void unmap_skb(struct device
*dev
, const struct sk_buff
*skb
,
273 const dma_addr_t
*addr
)
275 const skb_frag_t
*fp
, *end
;
276 const struct skb_shared_info
*si
;
278 dma_unmap_single(dev
, *addr
++, skb_headlen(skb
), DMA_TO_DEVICE
);
280 si
= skb_shinfo(skb
);
281 end
= &si
->frags
[si
->nr_frags
];
282 for (fp
= si
->frags
; fp
< end
; fp
++)
283 dma_unmap_page(dev
, *addr
++, skb_frag_size(fp
), DMA_TO_DEVICE
);
287 * deferred_unmap_destructor - unmap a packet when it is freed
290 * This is the packet destructor used for Tx packets that need to remain
291 * mapped until they are freed rather than until their Tx descriptors are
294 static void deferred_unmap_destructor(struct sk_buff
*skb
)
296 unmap_skb(skb
->dev
->dev
.parent
, skb
, (dma_addr_t
*)skb
->head
);
300 static void unmap_sgl(struct device
*dev
, const struct sk_buff
*skb
,
301 const struct ulptx_sgl
*sgl
, const struct sge_txq
*q
)
303 const struct ulptx_sge_pair
*p
;
304 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
306 if (likely(skb_headlen(skb
)))
307 dma_unmap_single(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
310 dma_unmap_page(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
316 * the complexity below is because of the possibility of a wrap-around
317 * in the middle of an SGL
319 for (p
= sgl
->sge
; nfrags
>= 2; nfrags
-= 2) {
320 if (likely((u8
*)(p
+ 1) <= (u8
*)q
->stat
)) {
321 unmap
: dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
322 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
323 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[1]),
324 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
326 } else if ((u8
*)p
== (u8
*)q
->stat
) {
327 p
= (const struct ulptx_sge_pair
*)q
->desc
;
329 } else if ((u8
*)p
+ 8 == (u8
*)q
->stat
) {
330 const __be64
*addr
= (const __be64
*)q
->desc
;
332 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
333 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
334 dma_unmap_page(dev
, be64_to_cpu(addr
[1]),
335 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
336 p
= (const struct ulptx_sge_pair
*)&addr
[2];
338 const __be64
*addr
= (const __be64
*)q
->desc
;
340 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
341 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
342 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
343 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
344 p
= (const struct ulptx_sge_pair
*)&addr
[1];
350 if ((u8
*)p
== (u8
*)q
->stat
)
351 p
= (const struct ulptx_sge_pair
*)q
->desc
;
352 addr
= (u8
*)p
+ 16 <= (u8
*)q
->stat
? p
->addr
[0] :
353 *(const __be64
*)q
->desc
;
354 dma_unmap_page(dev
, be64_to_cpu(addr
), ntohl(p
->len
[0]),
360 * free_tx_desc - reclaims Tx descriptors and their buffers
361 * @adapter: the adapter
362 * @q: the Tx queue to reclaim descriptors from
363 * @n: the number of descriptors to reclaim
364 * @unmap: whether the buffers should be unmapped for DMA
366 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
367 * Tx buffers. Called with the Tx queue lock held.
369 void free_tx_desc(struct adapter
*adap
, struct sge_txq
*q
,
370 unsigned int n
, bool unmap
)
372 struct tx_sw_desc
*d
;
373 unsigned int cidx
= q
->cidx
;
374 struct device
*dev
= adap
->pdev_dev
;
378 if (d
->skb
) { /* an SGL is present */
380 unmap_sgl(dev
, d
->skb
, d
->sgl
, q
);
381 dev_consume_skb_any(d
->skb
);
385 if (++cidx
== q
->size
) {
394 * Return the number of reclaimable descriptors in a Tx queue.
396 static inline int reclaimable(const struct sge_txq
*q
)
398 int hw_cidx
= ntohs(READ_ONCE(q
->stat
->cidx
));
400 return hw_cidx
< 0 ? hw_cidx
+ q
->size
: hw_cidx
;
404 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
406 * @q: the Tx queue to reclaim completed descriptors from
407 * @unmap: whether the buffers should be unmapped for DMA
409 * Reclaims Tx descriptors that the SGE has indicated it has processed,
410 * and frees the associated buffers if possible. Called with the Tx
413 inline void cxgb4_reclaim_completed_tx(struct adapter
*adap
, struct sge_txq
*q
,
416 int avail
= reclaimable(q
);
420 * Limit the amount of clean up work we do at a time to keep
421 * the Tx lock hold time O(1).
423 if (avail
> MAX_TX_RECLAIM
)
424 avail
= MAX_TX_RECLAIM
;
426 free_tx_desc(adap
, q
, avail
, unmap
);
430 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx
);
432 static inline int get_buf_size(struct adapter
*adapter
,
433 const struct rx_sw_desc
*d
)
435 struct sge
*s
= &adapter
->sge
;
436 unsigned int rx_buf_size_idx
= d
->dma_addr
& RX_BUF_SIZE
;
439 switch (rx_buf_size_idx
) {
440 case RX_SMALL_PG_BUF
:
441 buf_size
= PAGE_SIZE
;
444 case RX_LARGE_PG_BUF
:
445 buf_size
= PAGE_SIZE
<< s
->fl_pg_order
;
448 case RX_SMALL_MTU_BUF
:
449 buf_size
= FL_MTU_SMALL_BUFSIZE(adapter
);
452 case RX_LARGE_MTU_BUF
:
453 buf_size
= FL_MTU_LARGE_BUFSIZE(adapter
);
464 * free_rx_bufs - free the Rx buffers on an SGE free list
466 * @q: the SGE free list to free buffers from
467 * @n: how many buffers to free
469 * Release the next @n buffers on an SGE free-buffer Rx queue. The
470 * buffers must be made inaccessible to HW before calling this function.
472 static void free_rx_bufs(struct adapter
*adap
, struct sge_fl
*q
, int n
)
475 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
477 if (is_buf_mapped(d
))
478 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
479 get_buf_size(adap
, d
),
483 if (++q
->cidx
== q
->size
)
490 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
492 * @q: the SGE free list
494 * Unmap the current buffer on an SGE free-buffer Rx queue. The
495 * buffer must be made inaccessible to HW before calling this function.
497 * This is similar to @free_rx_bufs above but does not free the buffer.
498 * Do note that the FL still loses any further access to the buffer.
500 static void unmap_rx_buf(struct adapter
*adap
, struct sge_fl
*q
)
502 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
504 if (is_buf_mapped(d
))
505 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
506 get_buf_size(adap
, d
), PCI_DMA_FROMDEVICE
);
508 if (++q
->cidx
== q
->size
)
513 static inline void ring_fl_db(struct adapter
*adap
, struct sge_fl
*q
)
515 if (q
->pend_cred
>= 8) {
516 u32 val
= adap
->params
.arch
.sge_fl_db
;
518 if (is_t4(adap
->params
.chip
))
519 val
|= PIDX_V(q
->pend_cred
/ 8);
521 val
|= PIDX_T5_V(q
->pend_cred
/ 8);
523 /* Make sure all memory writes to the Free List queue are
524 * committed before we tell the hardware about them.
528 /* If we don't have access to the new User Doorbell (T5+), use
529 * the old doorbell mechanism; otherwise use the new BAR2
532 if (unlikely(q
->bar2_addr
== NULL
)) {
533 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
534 val
| QID_V(q
->cntxt_id
));
536 writel(val
| QID_V(q
->bar2_qid
),
537 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
539 /* This Write memory Barrier will force the write to
540 * the User Doorbell area to be flushed.
548 static inline void set_rx_sw_desc(struct rx_sw_desc
*sd
, struct page
*pg
,
552 sd
->dma_addr
= mapping
; /* includes size low bits */
556 * refill_fl - refill an SGE Rx buffer ring
558 * @q: the ring to refill
559 * @n: the number of new buffers to allocate
560 * @gfp: the gfp flags for the allocations
562 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
563 * allocated with the supplied gfp flags. The caller must assure that
564 * @n does not exceed the queue's capacity. If afterwards the queue is
565 * found critically low mark it as starving in the bitmap of starving FLs.
567 * Returns the number of buffers allocated.
569 static unsigned int refill_fl(struct adapter
*adap
, struct sge_fl
*q
, int n
,
572 struct sge
*s
= &adap
->sge
;
575 unsigned int cred
= q
->avail
;
576 __be64
*d
= &q
->desc
[q
->pidx
];
577 struct rx_sw_desc
*sd
= &q
->sdesc
[q
->pidx
];
580 #ifdef CONFIG_DEBUG_FS
581 if (test_bit(q
->cntxt_id
- adap
->sge
.egr_start
, adap
->sge
.blocked_fl
))
586 node
= dev_to_node(adap
->pdev_dev
);
588 if (s
->fl_pg_order
== 0)
589 goto alloc_small_pages
;
592 * Prefer large buffers
595 pg
= alloc_pages_node(node
, gfp
| __GFP_COMP
, s
->fl_pg_order
);
597 q
->large_alloc_failed
++;
598 break; /* fall back to single pages */
601 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0,
602 PAGE_SIZE
<< s
->fl_pg_order
,
604 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
605 __free_pages(pg
, s
->fl_pg_order
);
607 goto out
; /* do not try small pages for this error */
609 mapping
|= RX_LARGE_PG_BUF
;
610 *d
++ = cpu_to_be64(mapping
);
612 set_rx_sw_desc(sd
, pg
, mapping
);
616 if (++q
->pidx
== q
->size
) {
626 pg
= alloc_pages_node(node
, gfp
, 0);
632 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0, PAGE_SIZE
,
634 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
639 *d
++ = cpu_to_be64(mapping
);
641 set_rx_sw_desc(sd
, pg
, mapping
);
645 if (++q
->pidx
== q
->size
) {
652 out
: cred
= q
->avail
- cred
;
653 q
->pend_cred
+= cred
;
656 if (unlikely(fl_starving(adap
, q
))) {
659 set_bit(q
->cntxt_id
- adap
->sge
.egr_start
,
660 adap
->sge
.starving_fl
);
666 static inline void __refill_fl(struct adapter
*adap
, struct sge_fl
*fl
)
668 refill_fl(adap
, fl
, min(MAX_RX_REFILL
, fl_cap(fl
) - fl
->avail
),
673 * alloc_ring - allocate resources for an SGE descriptor ring
674 * @dev: the PCI device's core device
675 * @nelem: the number of descriptors
676 * @elem_size: the size of each descriptor
677 * @sw_size: the size of the SW state associated with each ring element
678 * @phys: the physical address of the allocated ring
679 * @metadata: address of the array holding the SW state for the ring
680 * @stat_size: extra space in HW ring for status information
681 * @node: preferred node for memory allocations
683 * Allocates resources for an SGE descriptor ring, such as Tx queues,
684 * free buffer lists, or response queues. Each SGE ring requires
685 * space for its HW descriptors plus, optionally, space for the SW state
686 * associated with each HW entry (the metadata). The function returns
687 * three values: the virtual address for the HW ring (the return value
688 * of the function), the bus address of the HW ring, and the address
691 static void *alloc_ring(struct device
*dev
, size_t nelem
, size_t elem_size
,
692 size_t sw_size
, dma_addr_t
*phys
, void *metadata
,
693 size_t stat_size
, int node
)
695 size_t len
= nelem
* elem_size
+ stat_size
;
697 void *p
= dma_alloc_coherent(dev
, len
, phys
, GFP_KERNEL
);
702 s
= kcalloc_node(sw_size
, nelem
, GFP_KERNEL
, node
);
705 dma_free_coherent(dev
, len
, p
, *phys
);
710 *(void **)metadata
= s
;
715 * sgl_len - calculates the size of an SGL of the given capacity
716 * @n: the number of SGL entries
718 * Calculates the number of flits needed for a scatter/gather list that
719 * can hold the given number of entries.
721 static inline unsigned int sgl_len(unsigned int n
)
723 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
724 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
725 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
726 * repeated sequences of { Length[i], Length[i+1], Address[i],
727 * Address[i+1] } (this ensures that all addresses are on 64-bit
728 * boundaries). If N is even, then Length[N+1] should be set to 0 and
729 * Address[N+1] is omitted.
731 * The following calculation incorporates all of the above. It's
732 * somewhat hard to follow but, briefly: the "+2" accounts for the
733 * first two flits which include the DSGL header, Length0 and
734 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
735 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
736 * finally the "+((n-1)&1)" adds the one remaining flit needed if
740 return (3 * n
) / 2 + (n
& 1) + 2;
744 * flits_to_desc - returns the num of Tx descriptors for the given flits
745 * @n: the number of flits
747 * Returns the number of Tx descriptors needed for the supplied number
750 static inline unsigned int flits_to_desc(unsigned int n
)
752 BUG_ON(n
> SGE_MAX_WR_LEN
/ 8);
753 return DIV_ROUND_UP(n
, 8);
757 * is_eth_imm - can an Ethernet packet be sent as immediate data?
760 * Returns whether an Ethernet packet is small enough to fit as
761 * immediate data. Return value corresponds to headroom required.
763 static inline int is_eth_imm(const struct sk_buff
*skb
, unsigned int chip_ver
)
767 if (skb
->encapsulation
&& skb_shinfo(skb
)->gso_size
&&
768 chip_ver
> CHELSIO_T5
) {
769 hdrlen
= sizeof(struct cpl_tx_tnl_lso
);
770 hdrlen
+= sizeof(struct cpl_tx_pkt_core
);
772 hdrlen
= skb_shinfo(skb
)->gso_size
?
773 sizeof(struct cpl_tx_pkt_lso_core
) : 0;
774 hdrlen
+= sizeof(struct cpl_tx_pkt
);
776 if (skb
->len
<= MAX_IMM_TX_PKT_LEN
- hdrlen
)
782 * calc_tx_flits - calculate the number of flits for a packet Tx WR
785 * Returns the number of flits needed for a Tx WR for the given Ethernet
786 * packet, including the needed WR and CPL headers.
788 static inline unsigned int calc_tx_flits(const struct sk_buff
*skb
,
789 unsigned int chip_ver
)
792 int hdrlen
= is_eth_imm(skb
, chip_ver
);
794 /* If the skb is small enough, we can pump it out as a work request
795 * with only immediate data. In that case we just have to have the
796 * TX Packet header plus the skb data in the Work Request.
800 return DIV_ROUND_UP(skb
->len
+ hdrlen
, sizeof(__be64
));
802 /* Otherwise, we're going to have to construct a Scatter gather list
803 * of the skb body and fragments. We also include the flits necessary
804 * for the TX Packet Work Request and CPL. We always have a firmware
805 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
806 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
807 * message or, if we're doing a Large Send Offload, an LSO CPL message
808 * with an embedded TX Packet Write CPL message.
810 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
811 if (skb_shinfo(skb
)->gso_size
) {
812 if (skb
->encapsulation
&& chip_ver
> CHELSIO_T5
)
813 hdrlen
= sizeof(struct fw_eth_tx_pkt_wr
) +
814 sizeof(struct cpl_tx_tnl_lso
);
816 hdrlen
= sizeof(struct fw_eth_tx_pkt_wr
) +
817 sizeof(struct cpl_tx_pkt_lso_core
);
819 hdrlen
+= sizeof(struct cpl_tx_pkt_core
);
820 flits
+= (hdrlen
/ sizeof(__be64
));
822 flits
+= (sizeof(struct fw_eth_tx_pkt_wr
) +
823 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
829 * calc_tx_descs - calculate the number of Tx descriptors for a packet
832 * Returns the number of Tx descriptors needed for the given Ethernet
833 * packet, including the needed WR and CPL headers.
835 static inline unsigned int calc_tx_descs(const struct sk_buff
*skb
,
836 unsigned int chip_ver
)
838 return flits_to_desc(calc_tx_flits(skb
, chip_ver
));
842 * cxgb4_write_sgl - populate a scatter/gather list for a packet
844 * @q: the Tx queue we are writing into
845 * @sgl: starting location for writing the SGL
846 * @end: points right after the end of the SGL
847 * @start: start offset into skb main-body data to include in the SGL
848 * @addr: the list of bus addresses for the SGL elements
850 * Generates a gather list for the buffers that make up a packet.
851 * The caller must provide adequate space for the SGL that will be written.
852 * The SGL includes all of the packet's page fragments and the data in its
853 * main body except for the first @start bytes. @sgl must be 16-byte
854 * aligned and within a Tx descriptor with available space. @end points
855 * right after the end of the SGL but does not account for any potential
856 * wrap around, i.e., @end > @sgl.
858 void cxgb4_write_sgl(const struct sk_buff
*skb
, struct sge_txq
*q
,
859 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
860 const dma_addr_t
*addr
)
863 struct ulptx_sge_pair
*to
;
864 const struct skb_shared_info
*si
= skb_shinfo(skb
);
865 unsigned int nfrags
= si
->nr_frags
;
866 struct ulptx_sge_pair buf
[MAX_SKB_FRAGS
/ 2 + 1];
868 len
= skb_headlen(skb
) - start
;
870 sgl
->len0
= htonl(len
);
871 sgl
->addr0
= cpu_to_be64(addr
[0] + start
);
874 sgl
->len0
= htonl(skb_frag_size(&si
->frags
[0]));
875 sgl
->addr0
= cpu_to_be64(addr
[1]);
878 sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
879 ULPTX_NSGE_V(nfrags
));
880 if (likely(--nfrags
== 0))
883 * Most of the complexity below deals with the possibility we hit the
884 * end of the queue in the middle of writing the SGL. For this case
885 * only we create the SGL in a temporary buffer and then copy it.
887 to
= (u8
*)end
> (u8
*)q
->stat
? buf
: sgl
->sge
;
889 for (i
= (nfrags
!= si
->nr_frags
); nfrags
>= 2; nfrags
-= 2, to
++) {
890 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
891 to
->len
[1] = cpu_to_be32(skb_frag_size(&si
->frags
[++i
]));
892 to
->addr
[0] = cpu_to_be64(addr
[i
]);
893 to
->addr
[1] = cpu_to_be64(addr
[++i
]);
896 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
897 to
->len
[1] = cpu_to_be32(0);
898 to
->addr
[0] = cpu_to_be64(addr
[i
+ 1]);
900 if (unlikely((u8
*)end
> (u8
*)q
->stat
)) {
901 unsigned int part0
= (u8
*)q
->stat
- (u8
*)sgl
->sge
, part1
;
904 memcpy(sgl
->sge
, buf
, part0
);
905 part1
= (u8
*)end
- (u8
*)q
->stat
;
906 memcpy(q
->desc
, (u8
*)buf
+ part0
, part1
);
907 end
= (void *)q
->desc
+ part1
;
909 if ((uintptr_t)end
& 8) /* 0-pad to multiple of 16 */
912 EXPORT_SYMBOL(cxgb4_write_sgl
);
914 /* This function copies 64 byte coalesced work request to
915 * memory mapped BAR2 space. For coalesced WR SGE fetches
916 * data from the FIFO instead of from Host.
918 static void cxgb_pio_copy(u64 __iomem
*dst
, u64
*src
)
931 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
934 * @n: number of new descriptors to give to HW
936 * Ring the doorbel for a Tx queue.
938 inline void cxgb4_ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
, int n
)
940 /* Make sure that all writes to the TX Descriptors are committed
941 * before we tell the hardware about them.
945 /* If we don't have access to the new User Doorbell (T5+), use the old
946 * doorbell mechanism; otherwise use the new BAR2 mechanism.
948 if (unlikely(q
->bar2_addr
== NULL
)) {
952 /* For T4 we need to participate in the Doorbell Recovery
955 spin_lock_irqsave(&q
->db_lock
, flags
);
957 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
958 QID_V(q
->cntxt_id
) | val
);
961 q
->db_pidx
= q
->pidx
;
962 spin_unlock_irqrestore(&q
->db_lock
, flags
);
964 u32 val
= PIDX_T5_V(n
);
966 /* T4 and later chips share the same PIDX field offset within
967 * the doorbell, but T5 and later shrank the field in order to
968 * gain a bit for Doorbell Priority. The field was absurdly
969 * large in the first place (14 bits) so we just use the T5
970 * and later limits and warn if a Queue ID is too large.
972 WARN_ON(val
& DBPRIO_F
);
974 /* If we're only writing a single TX Descriptor and we can use
975 * Inferred QID registers, we can use the Write Combining
976 * Gather Buffer; otherwise we use the simple doorbell.
978 if (n
== 1 && q
->bar2_qid
== 0) {
982 u64
*wr
= (u64
*)&q
->desc
[index
];
984 cxgb_pio_copy((u64 __iomem
*)
985 (q
->bar2_addr
+ SGE_UDB_WCDOORBELL
),
988 writel(val
| QID_V(q
->bar2_qid
),
989 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
992 /* This Write Memory Barrier will force the write to the User
993 * Doorbell area to be flushed. This is needed to prevent
994 * writes on different CPUs for the same queue from hitting
995 * the adapter out of order. This is required when some Work
996 * Requests take the Write Combine Gather Buffer path (user
997 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
998 * take the traditional path where we simply increment the
999 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1000 * hardware DMA read the actual Work Request.
1005 EXPORT_SYMBOL(cxgb4_ring_tx_db
);
1008 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1010 * @q: the Tx queue where the packet will be inlined
1011 * @pos: starting position in the Tx queue where to inline the packet
1013 * Inline a packet's contents directly into Tx descriptors, starting at
1014 * the given position within the Tx DMA ring.
1015 * Most of the complexity of this operation is dealing with wrap arounds
1016 * in the middle of the packet we want to inline.
1018 void cxgb4_inline_tx_skb(const struct sk_buff
*skb
,
1019 const struct sge_txq
*q
, void *pos
)
1021 int left
= (void *)q
->stat
- pos
;
1024 if (likely(skb
->len
<= left
)) {
1025 if (likely(!skb
->data_len
))
1026 skb_copy_from_linear_data(skb
, pos
, skb
->len
);
1028 skb_copy_bits(skb
, 0, pos
, skb
->len
);
1031 skb_copy_bits(skb
, 0, pos
, left
);
1032 skb_copy_bits(skb
, left
, q
->desc
, skb
->len
- left
);
1033 pos
= (void *)q
->desc
+ (skb
->len
- left
);
1036 /* 0-pad to multiple of 16 */
1037 p
= PTR_ALIGN(pos
, 8);
1038 if ((uintptr_t)p
& 8)
1041 EXPORT_SYMBOL(cxgb4_inline_tx_skb
);
1043 static void *inline_tx_skb_header(const struct sk_buff
*skb
,
1044 const struct sge_txq
*q
, void *pos
,
1048 int left
= (void *)q
->stat
- pos
;
1050 if (likely(length
<= left
)) {
1051 memcpy(pos
, skb
->data
, length
);
1054 memcpy(pos
, skb
->data
, left
);
1055 memcpy(q
->desc
, skb
->data
+ left
, length
- left
);
1056 pos
= (void *)q
->desc
+ (length
- left
);
1058 /* 0-pad to multiple of 16 */
1059 p
= PTR_ALIGN(pos
, 8);
1060 if ((uintptr_t)p
& 8) {
1068 * Figure out what HW csum a packet wants and return the appropriate control
1071 static u64
hwcsum(enum chip_type chip
, const struct sk_buff
*skb
)
1074 bool inner_hdr_csum
= false;
1077 if (skb
->encapsulation
&&
1078 (CHELSIO_CHIP_VERSION(chip
) > CHELSIO_T5
))
1079 inner_hdr_csum
= true;
1081 if (inner_hdr_csum
) {
1082 ver
= inner_ip_hdr(skb
)->version
;
1083 proto
= (ver
== 4) ? inner_ip_hdr(skb
)->protocol
:
1084 inner_ipv6_hdr(skb
)->nexthdr
;
1086 ver
= ip_hdr(skb
)->version
;
1087 proto
= (ver
== 4) ? ip_hdr(skb
)->protocol
:
1088 ipv6_hdr(skb
)->nexthdr
;
1092 if (proto
== IPPROTO_TCP
)
1093 csum_type
= TX_CSUM_TCPIP
;
1094 else if (proto
== IPPROTO_UDP
)
1095 csum_type
= TX_CSUM_UDPIP
;
1098 * unknown protocol, disable HW csum
1099 * and hope a bad packet is detected
1101 return TXPKT_L4CSUM_DIS_F
;
1105 * this doesn't work with extension headers
1107 if (proto
== IPPROTO_TCP
)
1108 csum_type
= TX_CSUM_TCPIP6
;
1109 else if (proto
== IPPROTO_UDP
)
1110 csum_type
= TX_CSUM_UDPIP6
;
1115 if (likely(csum_type
>= TX_CSUM_TCPIP
)) {
1116 int eth_hdr_len
, l4_len
;
1119 if (inner_hdr_csum
) {
1120 /* This allows checksum offload for all encapsulated
1121 * packets like GRE etc..
1123 l4_len
= skb_inner_network_header_len(skb
);
1124 eth_hdr_len
= skb_inner_network_offset(skb
) - ETH_HLEN
;
1126 l4_len
= skb_network_header_len(skb
);
1127 eth_hdr_len
= skb_network_offset(skb
) - ETH_HLEN
;
1129 hdr_len
= TXPKT_IPHDR_LEN_V(l4_len
);
1131 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1132 hdr_len
|= TXPKT_ETHHDR_LEN_V(eth_hdr_len
);
1134 hdr_len
|= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len
);
1135 return TXPKT_CSUM_TYPE_V(csum_type
) | hdr_len
;
1137 int start
= skb_transport_offset(skb
);
1139 return TXPKT_CSUM_TYPE_V(csum_type
) |
1140 TXPKT_CSUM_START_V(start
) |
1141 TXPKT_CSUM_LOC_V(start
+ skb
->csum_offset
);
1145 static void eth_txq_stop(struct sge_eth_txq
*q
)
1147 netif_tx_stop_queue(q
->txq
);
1151 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
1155 if (q
->pidx
>= q
->size
)
1159 #ifdef CONFIG_CHELSIO_T4_FCOE
1161 cxgb_fcoe_offload(struct sk_buff
*skb
, struct adapter
*adap
,
1162 const struct port_info
*pi
, u64
*cntrl
)
1164 const struct cxgb_fcoe
*fcoe
= &pi
->fcoe
;
1166 if (!(fcoe
->flags
& CXGB_FCOE_ENABLED
))
1169 if (skb
->protocol
!= htons(ETH_P_FCOE
))
1172 skb_reset_mac_header(skb
);
1173 skb
->mac_len
= sizeof(struct ethhdr
);
1175 skb_set_network_header(skb
, skb
->mac_len
);
1176 skb_set_transport_header(skb
, skb
->mac_len
+ sizeof(struct fcoe_hdr
));
1178 if (!cxgb_fcoe_sof_eof_supported(adap
, skb
))
1181 /* FC CRC offload */
1182 *cntrl
= TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE
) |
1183 TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
|
1184 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START
) |
1185 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END
) |
1186 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END
);
1189 #endif /* CONFIG_CHELSIO_T4_FCOE */
1191 /* Returns tunnel type if hardware supports offloading of the same.
1192 * It is called only for T5 and onwards.
1194 enum cpl_tx_tnl_lso_type
cxgb_encap_offload_supported(struct sk_buff
*skb
)
1197 enum cpl_tx_tnl_lso_type tnl_type
= TX_TNL_TYPE_OPAQUE
;
1198 struct port_info
*pi
= netdev_priv(skb
->dev
);
1199 struct adapter
*adapter
= pi
->adapter
;
1201 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
1202 skb
->inner_protocol
!= htons(ETH_P_TEB
))
1205 switch (vlan_get_protocol(skb
)) {
1206 case htons(ETH_P_IP
):
1207 l4_hdr
= ip_hdr(skb
)->protocol
;
1209 case htons(ETH_P_IPV6
):
1210 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
1218 if (adapter
->vxlan_port
== udp_hdr(skb
)->dest
)
1219 tnl_type
= TX_TNL_TYPE_VXLAN
;
1220 else if (adapter
->geneve_port
== udp_hdr(skb
)->dest
)
1221 tnl_type
= TX_TNL_TYPE_GENEVE
;
1230 static inline void t6_fill_tnl_lso(struct sk_buff
*skb
,
1231 struct cpl_tx_tnl_lso
*tnl_lso
,
1232 enum cpl_tx_tnl_lso_type tnl_type
)
1235 int in_eth_xtra_len
;
1236 int l3hdr_len
= skb_network_header_len(skb
);
1237 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1238 const struct skb_shared_info
*ssi
= skb_shinfo(skb
);
1239 bool v6
= (ip_hdr(skb
)->version
== 6);
1241 val
= CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO
) |
1242 CPL_TX_TNL_LSO_FIRST_F
|
1243 CPL_TX_TNL_LSO_LAST_F
|
1244 (v6
? CPL_TX_TNL_LSO_IPV6OUT_F
: 0) |
1245 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len
/ 4) |
1246 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len
/ 4) |
1247 (v6
? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F
) |
1248 CPL_TX_TNL_LSO_IPLENSETOUT_F
|
1249 (v6
? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F
);
1250 tnl_lso
->op_to_IpIdSplitOut
= htonl(val
);
1252 tnl_lso
->IpIdOffsetOut
= 0;
1254 /* Get the tunnel header length */
1255 val
= skb_inner_mac_header(skb
) - skb_mac_header(skb
);
1256 in_eth_xtra_len
= skb_inner_network_header(skb
) -
1257 skb_inner_mac_header(skb
) - ETH_HLEN
;
1260 case TX_TNL_TYPE_VXLAN
:
1261 case TX_TNL_TYPE_GENEVE
:
1262 tnl_lso
->UdpLenSetOut_to_TnlHdrLen
=
1263 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F
|
1264 CPL_TX_TNL_LSO_UDPLENSETOUT_F
);
1267 tnl_lso
->UdpLenSetOut_to_TnlHdrLen
= 0;
1271 tnl_lso
->UdpLenSetOut_to_TnlHdrLen
|=
1272 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val
) |
1273 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type
));
1277 val
= CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len
/ 4) |
1278 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb
)->version
== 6) |
1279 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb
) / 4) |
1280 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb
) / 4);
1281 tnl_lso
->Flow_to_TcpHdrLen
= htonl(val
);
1283 tnl_lso
->IpIdOffset
= htons(0);
1285 tnl_lso
->IpIdSplit_to_Mss
= htons(CPL_TX_TNL_LSO_MSS_V(ssi
->gso_size
));
1286 tnl_lso
->TCPSeqOffset
= htonl(0);
1287 tnl_lso
->EthLenOffset_Size
= htonl(CPL_TX_TNL_LSO_SIZE_V(skb
->len
));
1291 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1293 * @dev: the egress net device
1295 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1297 static netdev_tx_t
cxgb4_eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1299 u32 wr_mid
, ctrl0
, op
;
1300 u64 cntrl
, *end
, *sgl
;
1302 unsigned int flits
, ndesc
;
1303 struct adapter
*adap
;
1304 struct sge_eth_txq
*q
;
1305 const struct port_info
*pi
;
1306 struct fw_eth_tx_pkt_wr
*wr
;
1307 struct cpl_tx_pkt_core
*cpl
;
1308 const struct skb_shared_info
*ssi
;
1309 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
1310 bool immediate
= false;
1311 int len
, max_pkt_len
;
1312 bool ptp_enabled
= is_ptp_enabled(skb
, dev
);
1313 unsigned int chip_ver
;
1314 enum cpl_tx_tnl_lso_type tnl_type
= TX_TNL_TYPE_OPAQUE
;
1316 #ifdef CONFIG_CHELSIO_T4_FCOE
1318 #endif /* CONFIG_CHELSIO_T4_FCOE */
1321 * The chip min packet length is 10 octets but play safe and reject
1322 * anything shorter than an Ethernet header.
1324 if (unlikely(skb
->len
< ETH_HLEN
)) {
1325 out_free
: dev_kfree_skb_any(skb
);
1326 return NETDEV_TX_OK
;
1329 /* Discard the packet if the length is greater than mtu */
1330 max_pkt_len
= ETH_HLEN
+ dev
->mtu
;
1331 if (skb_vlan_tagged(skb
))
1332 max_pkt_len
+= VLAN_HLEN
;
1333 if (!skb_shinfo(skb
)->gso_size
&& (unlikely(skb
->len
> max_pkt_len
)))
1336 pi
= netdev_priv(dev
);
1338 ssi
= skb_shinfo(skb
);
1339 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
1340 if (xfrm_offload(skb
) && !ssi
->gso_size
)
1341 return adap
->uld
[CXGB4_ULD_CRYPTO
].tx_handler(skb
, dev
);
1342 #endif /* CHELSIO_IPSEC_INLINE */
1344 qidx
= skb_get_queue_mapping(skb
);
1346 spin_lock(&adap
->ptp_lock
);
1347 if (!(adap
->ptp_tx_skb
)) {
1348 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1349 adap
->ptp_tx_skb
= skb_get(skb
);
1351 spin_unlock(&adap
->ptp_lock
);
1354 q
= &adap
->sge
.ptptxq
;
1356 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
1358 skb_tx_timestamp(skb
);
1360 cxgb4_reclaim_completed_tx(adap
, &q
->q
, true);
1361 cntrl
= TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
;
1363 #ifdef CONFIG_CHELSIO_T4_FCOE
1364 err
= cxgb_fcoe_offload(skb
, adap
, pi
, &cntrl
);
1365 if (unlikely(err
== -ENOTSUPP
)) {
1367 spin_unlock(&adap
->ptp_lock
);
1370 #endif /* CONFIG_CHELSIO_T4_FCOE */
1372 chip_ver
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
1373 flits
= calc_tx_flits(skb
, chip_ver
);
1374 ndesc
= flits_to_desc(flits
);
1375 credits
= txq_avail(&q
->q
) - ndesc
;
1377 if (unlikely(credits
< 0)) {
1379 dev_err(adap
->pdev_dev
,
1380 "%s: Tx ring %u full while queue awake!\n",
1383 spin_unlock(&adap
->ptp_lock
);
1384 return NETDEV_TX_BUSY
;
1387 if (is_eth_imm(skb
, chip_ver
))
1390 if (skb
->encapsulation
&& chip_ver
> CHELSIO_T5
)
1391 tnl_type
= cxgb_encap_offload_supported(skb
);
1394 unlikely(cxgb4_map_skb(adap
->pdev_dev
, skb
, addr
) < 0)) {
1397 spin_unlock(&adap
->ptp_lock
);
1401 wr_mid
= FW_WR_LEN16_V(DIV_ROUND_UP(flits
, 2));
1402 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
1404 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
1407 wr
= (void *)&q
->q
.desc
[q
->q
.pidx
];
1408 wr
->equiq_to_len16
= htonl(wr_mid
);
1409 wr
->r3
= cpu_to_be64(0);
1410 end
= (u64
*)wr
+ flits
;
1412 len
= immediate
? skb
->len
: 0;
1413 len
+= sizeof(*cpl
);
1414 if (ssi
->gso_size
) {
1415 struct cpl_tx_pkt_lso_core
*lso
= (void *)(wr
+ 1);
1416 bool v6
= (ssi
->gso_type
& SKB_GSO_TCPV6
) != 0;
1417 int l3hdr_len
= skb_network_header_len(skb
);
1418 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1419 struct cpl_tx_tnl_lso
*tnl_lso
= (void *)(wr
+ 1);
1422 len
+= sizeof(*tnl_lso
);
1424 len
+= sizeof(*lso
);
1426 wr
->op_immdlen
= htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR
) |
1427 FW_WR_IMMDLEN_V(len
));
1429 struct iphdr
*iph
= ip_hdr(skb
);
1431 t6_fill_tnl_lso(skb
, tnl_lso
, tnl_type
);
1432 cpl
= (void *)(tnl_lso
+ 1);
1433 /* Driver is expected to compute partial checksum that
1434 * does not include the IP Total Length.
1436 if (iph
->version
== 4) {
1439 iph
->check
= (u16
)(~ip_fast_csum((u8
*)iph
,
1442 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1443 cntrl
= hwcsum(adap
->params
.chip
, skb
);
1445 lso
->lso_ctrl
= htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO
) |
1446 LSO_FIRST_SLICE_F
| LSO_LAST_SLICE_F
|
1448 LSO_ETHHDR_LEN_V(eth_xtra_len
/ 4) |
1449 LSO_IPHDR_LEN_V(l3hdr_len
/ 4) |
1450 LSO_TCPHDR_LEN_V(tcp_hdr(skb
)->doff
));
1451 lso
->ipid_ofst
= htons(0);
1452 lso
->mss
= htons(ssi
->gso_size
);
1453 lso
->seqno_offset
= htonl(0);
1454 if (is_t4(adap
->params
.chip
))
1455 lso
->len
= htonl(skb
->len
);
1457 lso
->len
= htonl(LSO_T5_XFER_SIZE_V(skb
->len
));
1458 cpl
= (void *)(lso
+ 1);
1460 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
)
1462 cntrl
= TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1464 cntrl
= T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1466 cntrl
|= TXPKT_CSUM_TYPE_V(v6
?
1467 TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
1468 TXPKT_IPHDR_LEN_V(l3hdr_len
);
1470 sgl
= (u64
*)(cpl
+ 1); /* sgl start here */
1471 if (unlikely((u8
*)sgl
>= (u8
*)q
->q
.stat
)) {
1472 /* If current position is already at the end of the
1473 * txq, reset the current to point to start of the queue
1474 * and update the end ptr as well.
1476 if (sgl
== (u64
*)q
->q
.stat
) {
1477 int left
= (u8
*)end
- (u8
*)q
->q
.stat
;
1479 end
= (void *)q
->q
.desc
+ left
;
1480 sgl
= (void *)q
->q
.desc
;
1484 q
->tx_cso
+= ssi
->gso_segs
;
1487 op
= FW_PTP_TX_PKT_WR
;
1489 op
= FW_ETH_TX_PKT_WR
;
1490 wr
->op_immdlen
= htonl(FW_WR_OP_V(op
) |
1491 FW_WR_IMMDLEN_V(len
));
1492 cpl
= (void *)(wr
+ 1);
1493 sgl
= (u64
*)(cpl
+ 1);
1494 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1495 cntrl
= hwcsum(adap
->params
.chip
, skb
) |
1501 if (skb_vlan_tag_present(skb
)) {
1503 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
1504 #ifdef CONFIG_CHELSIO_T4_FCOE
1505 if (skb
->protocol
== htons(ETH_P_FCOE
))
1506 cntrl
|= TXPKT_VLAN_V(
1507 ((skb
->priority
& 0x7) << VLAN_PRIO_SHIFT
));
1508 #endif /* CONFIG_CHELSIO_T4_FCOE */
1511 ctrl0
= TXPKT_OPCODE_V(CPL_TX_PKT_XT
) | TXPKT_INTF_V(pi
->tx_chan
) |
1512 TXPKT_PF_V(adap
->pf
);
1514 ctrl0
|= TXPKT_TSTAMP_F
;
1515 #ifdef CONFIG_CHELSIO_T4_DCB
1516 if (is_t4(adap
->params
.chip
))
1517 ctrl0
|= TXPKT_OVLAN_IDX_V(q
->dcb_prio
);
1519 ctrl0
|= TXPKT_T5_OVLAN_IDX_V(q
->dcb_prio
);
1521 cpl
->ctrl0
= htonl(ctrl0
);
1522 cpl
->pack
= htons(0);
1523 cpl
->len
= htons(skb
->len
);
1524 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1527 cxgb4_inline_tx_skb(skb
, &q
->q
, sgl
);
1528 dev_consume_skb_any(skb
);
1532 cxgb4_write_sgl(skb
, &q
->q
, (void *)sgl
, end
, 0, addr
);
1535 last_desc
= q
->q
.pidx
+ ndesc
- 1;
1536 if (last_desc
>= q
->q
.size
)
1537 last_desc
-= q
->q
.size
;
1538 q
->q
.sdesc
[last_desc
].skb
= skb
;
1539 q
->q
.sdesc
[last_desc
].sgl
= (struct ulptx_sgl
*)sgl
;
1542 txq_advance(&q
->q
, ndesc
);
1544 cxgb4_ring_tx_db(adap
, &q
->q
, ndesc
);
1546 spin_unlock(&adap
->ptp_lock
);
1547 return NETDEV_TX_OK
;
1552 /* Egress Queue sizes, producer and consumer indices are all in units
1553 * of Egress Context Units bytes. Note that as far as the hardware is
1554 * concerned, the free list is an Egress Queue (the host produces free
1555 * buffers which the hardware consumes) and free list entries are
1556 * 64-bit PCI DMA addresses.
1558 EQ_UNIT
= SGE_EQ_IDXSIZE
,
1559 FL_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
1560 TXD_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
1562 T4VF_ETHTXQ_MAX_HDR
= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
1563 sizeof(struct cpl_tx_pkt_lso_core
) +
1564 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
),
1568 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1571 * Returns whether an Ethernet packet is small enough to fit completely as
1574 static inline int t4vf_is_eth_imm(const struct sk_buff
*skb
)
1576 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1577 * which does not accommodate immediate data. We could dike out all
1578 * of the support code for immediate data but that would tie our hands
1579 * too much if we ever want to enhace the firmware. It would also
1580 * create more differences between the PF and VF Drivers.
1586 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1589 * Returns the number of flits needed for a TX Work Request for the
1590 * given Ethernet packet, including the needed WR and CPL headers.
1592 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff
*skb
)
1596 /* If the skb is small enough, we can pump it out as a work request
1597 * with only immediate data. In that case we just have to have the
1598 * TX Packet header plus the skb data in the Work Request.
1600 if (t4vf_is_eth_imm(skb
))
1601 return DIV_ROUND_UP(skb
->len
+ sizeof(struct cpl_tx_pkt
),
1604 /* Otherwise, we're going to have to construct a Scatter gather list
1605 * of the skb body and fragments. We also include the flits necessary
1606 * for the TX Packet Work Request and CPL. We always have a firmware
1607 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1608 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1609 * message or, if we're doing a Large Send Offload, an LSO CPL message
1610 * with an embedded TX Packet Write CPL message.
1612 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
1613 if (skb_shinfo(skb
)->gso_size
)
1614 flits
+= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
1615 sizeof(struct cpl_tx_pkt_lso_core
) +
1616 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
1618 flits
+= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
1619 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
1624 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1626 * @dev: the egress net device
1628 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1630 static netdev_tx_t
cxgb4_vf_eth_xmit(struct sk_buff
*skb
,
1631 struct net_device
*dev
)
1633 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
1634 const struct skb_shared_info
*ssi
;
1635 struct fw_eth_tx_pkt_vm_wr
*wr
;
1636 int qidx
, credits
, max_pkt_len
;
1637 struct cpl_tx_pkt_core
*cpl
;
1638 const struct port_info
*pi
;
1639 unsigned int flits
, ndesc
;
1640 struct sge_eth_txq
*txq
;
1641 struct adapter
*adapter
;
1644 const size_t fw_hdr_copy_len
= sizeof(wr
->ethmacdst
) +
1645 sizeof(wr
->ethmacsrc
) +
1646 sizeof(wr
->ethtype
) +
1647 sizeof(wr
->vlantci
);
1649 /* The chip minimum packet length is 10 octets but the firmware
1650 * command that we are using requires that we copy the Ethernet header
1651 * (including the VLAN tag) into the header so we reject anything
1652 * smaller than that ...
1654 if (unlikely(skb
->len
< fw_hdr_copy_len
))
1657 /* Discard the packet if the length is greater than mtu */
1658 max_pkt_len
= ETH_HLEN
+ dev
->mtu
;
1659 if (skb_vlan_tag_present(skb
))
1660 max_pkt_len
+= VLAN_HLEN
;
1661 if (!skb_shinfo(skb
)->gso_size
&& (unlikely(skb
->len
> max_pkt_len
)))
1664 /* Figure out which TX Queue we're going to use. */
1665 pi
= netdev_priv(dev
);
1666 adapter
= pi
->adapter
;
1667 qidx
= skb_get_queue_mapping(skb
);
1668 WARN_ON(qidx
>= pi
->nqsets
);
1669 txq
= &adapter
->sge
.ethtxq
[pi
->first_qset
+ qidx
];
1671 /* Take this opportunity to reclaim any TX Descriptors whose DMA
1672 * transfers have completed.
1674 cxgb4_reclaim_completed_tx(adapter
, &txq
->q
, true);
1676 /* Calculate the number of flits and TX Descriptors we're going to
1677 * need along with how many TX Descriptors will be left over after
1678 * we inject our Work Request.
1680 flits
= t4vf_calc_tx_flits(skb
);
1681 ndesc
= flits_to_desc(flits
);
1682 credits
= txq_avail(&txq
->q
) - ndesc
;
1684 if (unlikely(credits
< 0)) {
1685 /* Not enough room for this packet's Work Request. Stop the
1686 * TX Queue and return a "busy" condition. The queue will get
1687 * started later on when the firmware informs us that space
1691 dev_err(adapter
->pdev_dev
,
1692 "%s: TX ring %u full while queue awake!\n",
1694 return NETDEV_TX_BUSY
;
1697 if (!t4vf_is_eth_imm(skb
) &&
1698 unlikely(cxgb4_map_skb(adapter
->pdev_dev
, skb
, addr
) < 0)) {
1699 /* We need to map the skb into PCI DMA space (because it can't
1700 * be in-lined directly into the Work Request) and the mapping
1701 * operation failed. Record the error and drop the packet.
1707 wr_mid
= FW_WR_LEN16_V(DIV_ROUND_UP(flits
, 2));
1708 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
1709 /* After we're done injecting the Work Request for this
1710 * packet, we'll be below our "stop threshold" so stop the TX
1711 * Queue now and schedule a request for an SGE Egress Queue
1712 * Update message. The queue will get started later on when
1713 * the firmware processes this Work Request and sends us an
1714 * Egress Queue Status Update message indicating that space
1718 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
1721 /* Start filling in our Work Request. Note that we do _not_ handle
1722 * the WR Header wrapping around the TX Descriptor Ring. If our
1723 * maximum header size ever exceeds one TX Descriptor, we'll need to
1724 * do something else here.
1726 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR
, TXD_PER_EQ_UNIT
) > 1);
1727 wr
= (void *)&txq
->q
.desc
[txq
->q
.pidx
];
1728 wr
->equiq_to_len16
= cpu_to_be32(wr_mid
);
1729 wr
->r3
[0] = cpu_to_be32(0);
1730 wr
->r3
[1] = cpu_to_be32(0);
1731 skb_copy_from_linear_data(skb
, (void *)wr
->ethmacdst
, fw_hdr_copy_len
);
1732 end
= (u64
*)wr
+ flits
;
1734 /* If this is a Large Send Offload packet we'll put in an LSO CPL
1735 * message with an encapsulated TX Packet CPL message. Otherwise we
1736 * just use a TX Packet CPL message.
1738 ssi
= skb_shinfo(skb
);
1739 if (ssi
->gso_size
) {
1740 struct cpl_tx_pkt_lso_core
*lso
= (void *)(wr
+ 1);
1741 bool v6
= (ssi
->gso_type
& SKB_GSO_TCPV6
) != 0;
1742 int l3hdr_len
= skb_network_header_len(skb
);
1743 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1746 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR
) |
1747 FW_WR_IMMDLEN_V(sizeof(*lso
) +
1749 /* Fill in the LSO CPL message. */
1751 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO
) |
1755 LSO_ETHHDR_LEN_V(eth_xtra_len
/ 4) |
1756 LSO_IPHDR_LEN_V(l3hdr_len
/ 4) |
1757 LSO_TCPHDR_LEN_V(tcp_hdr(skb
)->doff
));
1758 lso
->ipid_ofst
= cpu_to_be16(0);
1759 lso
->mss
= cpu_to_be16(ssi
->gso_size
);
1760 lso
->seqno_offset
= cpu_to_be32(0);
1761 if (is_t4(adapter
->params
.chip
))
1762 lso
->len
= cpu_to_be32(skb
->len
);
1764 lso
->len
= cpu_to_be32(LSO_T5_XFER_SIZE_V(skb
->len
));
1766 /* Set up TX Packet CPL pointer, control word and perform
1769 cpl
= (void *)(lso
+ 1);
1771 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
)
1772 cntrl
= TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1774 cntrl
= T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1776 cntrl
|= TXPKT_CSUM_TYPE_V(v6
?
1777 TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
1778 TXPKT_IPHDR_LEN_V(l3hdr_len
);
1780 txq
->tx_cso
+= ssi
->gso_segs
;
1784 len
= (t4vf_is_eth_imm(skb
)
1785 ? skb
->len
+ sizeof(*cpl
)
1788 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR
) |
1789 FW_WR_IMMDLEN_V(len
));
1791 /* Set up TX Packet CPL pointer, control word and perform
1794 cpl
= (void *)(wr
+ 1);
1795 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1796 cntrl
= hwcsum(adapter
->params
.chip
, skb
) |
1800 cntrl
= TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
;
1804 /* If there's a VLAN tag present, add that to the list of things to
1805 * do in this Work Request.
1807 if (skb_vlan_tag_present(skb
)) {
1809 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
1812 /* Fill in the TX Packet CPL message header. */
1813 cpl
->ctrl0
= cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT
) |
1814 TXPKT_INTF_V(pi
->port_id
) |
1816 cpl
->pack
= cpu_to_be16(0);
1817 cpl
->len
= cpu_to_be16(skb
->len
);
1818 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1820 /* Fill in the body of the TX Packet CPL message with either in-lined
1821 * data or a Scatter/Gather List.
1823 if (t4vf_is_eth_imm(skb
)) {
1824 /* In-line the packet's data and free the skb since we don't
1825 * need it any longer.
1827 cxgb4_inline_tx_skb(skb
, &txq
->q
, cpl
+ 1);
1828 dev_consume_skb_any(skb
);
1830 /* Write the skb's Scatter/Gather list into the TX Packet CPL
1831 * message and retain a pointer to the skb so we can free it
1832 * later when its DMA completes. (We store the skb pointer
1833 * in the Software Descriptor corresponding to the last TX
1834 * Descriptor used by the Work Request.)
1836 * The retained skb will be freed when the corresponding TX
1837 * Descriptors are reclaimed after their DMAs complete.
1838 * However, this could take quite a while since, in general,
1839 * the hardware is set up to be lazy about sending DMA
1840 * completion notifications to us and we mostly perform TX
1841 * reclaims in the transmit routine.
1843 * This is good for performamce but means that we rely on new
1844 * TX packets arriving to run the destructors of completed
1845 * packets, which open up space in their sockets' send queues.
1846 * Sometimes we do not get such new packets causing TX to
1847 * stall. A single UDP transmitter is a good example of this
1848 * situation. We have a clean up timer that periodically
1849 * reclaims completed packets but it doesn't run often enough
1850 * (nor do we want it to) to prevent lengthy stalls. A
1851 * solution to this problem is to run the destructor early,
1852 * after the packet is queued but before it's DMAd. A con is
1853 * that we lie to socket memory accounting, but the amount of
1854 * extra memory is reasonable (limited by the number of TX
1855 * descriptors), the packets do actually get freed quickly by
1856 * new packets almost always, and for protocols like TCP that
1857 * wait for acks to really free up the data the extra memory
1858 * is even less. On the positive side we run the destructors
1859 * on the sending CPU rather than on a potentially different
1860 * completing CPU, usually a good thing.
1862 * Run the destructor before telling the DMA engine about the
1863 * packet to make sure it doesn't complete and get freed
1866 struct ulptx_sgl
*sgl
= (struct ulptx_sgl
*)(cpl
+ 1);
1867 struct sge_txq
*tq
= &txq
->q
;
1870 /* If the Work Request header was an exact multiple of our TX
1871 * Descriptor length, then it's possible that the starting SGL
1872 * pointer lines up exactly with the end of our TX Descriptor
1873 * ring. If that's the case, wrap around to the beginning
1876 if (unlikely((void *)sgl
== (void *)tq
->stat
)) {
1877 sgl
= (void *)tq
->desc
;
1878 end
= (void *)((void *)tq
->desc
+
1879 ((void *)end
- (void *)tq
->stat
));
1882 cxgb4_write_sgl(skb
, tq
, sgl
, end
, 0, addr
);
1885 last_desc
= tq
->pidx
+ ndesc
- 1;
1886 if (last_desc
>= tq
->size
)
1887 last_desc
-= tq
->size
;
1888 tq
->sdesc
[last_desc
].skb
= skb
;
1889 tq
->sdesc
[last_desc
].sgl
= sgl
;
1892 /* Advance our internal TX Queue state, tell the hardware about
1893 * the new TX descriptors and return success.
1895 txq_advance(&txq
->q
, ndesc
);
1897 cxgb4_ring_tx_db(adapter
, &txq
->q
, ndesc
);
1898 return NETDEV_TX_OK
;
1901 /* An error of some sort happened. Free the TX skb and tell the
1902 * OS that we've "dealt" with the packet ...
1904 dev_kfree_skb_any(skb
);
1905 return NETDEV_TX_OK
;
1908 netdev_tx_t
t4_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1910 struct port_info
*pi
= netdev_priv(dev
);
1912 if (unlikely(pi
->eth_flags
& PRIV_FLAG_PORT_TX_VM
))
1913 return cxgb4_vf_eth_xmit(skb
, dev
);
1915 return cxgb4_eth_xmit(skb
, dev
);
1919 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1920 * @q: the SGE control Tx queue
1922 * This is a variant of cxgb4_reclaim_completed_tx() that is used
1923 * for Tx queues that send only immediate data (presently just
1924 * the control queues) and thus do not have any sk_buffs to release.
1926 static inline void reclaim_completed_tx_imm(struct sge_txq
*q
)
1928 int hw_cidx
= ntohs(READ_ONCE(q
->stat
->cidx
));
1929 int reclaim
= hw_cidx
- q
->cidx
;
1934 q
->in_use
-= reclaim
;
1939 * is_imm - check whether a packet can be sent as immediate data
1942 * Returns true if a packet can be sent as a WR with immediate data.
1944 static inline int is_imm(const struct sk_buff
*skb
)
1946 return skb
->len
<= MAX_CTRL_WR_LEN
;
1950 * ctrlq_check_stop - check if a control queue is full and should stop
1952 * @wr: most recent WR written to the queue
1954 * Check if a control queue has become full and should be stopped.
1955 * We clean up control queue descriptors very lazily, only when we are out.
1956 * If the queue is still full after reclaiming any completed descriptors
1957 * we suspend it and have the last WR wake it up.
1959 static void ctrlq_check_stop(struct sge_ctrl_txq
*q
, struct fw_wr_hdr
*wr
)
1961 reclaim_completed_tx_imm(&q
->q
);
1962 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
1963 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
1970 * ctrl_xmit - send a packet through an SGE control Tx queue
1971 * @q: the control queue
1974 * Send a packet through an SGE control Tx queue. Packets sent through
1975 * a control queue must fit entirely as immediate data.
1977 static int ctrl_xmit(struct sge_ctrl_txq
*q
, struct sk_buff
*skb
)
1980 struct fw_wr_hdr
*wr
;
1982 if (unlikely(!is_imm(skb
))) {
1985 return NET_XMIT_DROP
;
1988 ndesc
= DIV_ROUND_UP(skb
->len
, sizeof(struct tx_desc
));
1989 spin_lock(&q
->sendq
.lock
);
1991 if (unlikely(q
->full
)) {
1992 skb
->priority
= ndesc
; /* save for restart */
1993 __skb_queue_tail(&q
->sendq
, skb
);
1994 spin_unlock(&q
->sendq
.lock
);
1998 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
1999 cxgb4_inline_tx_skb(skb
, &q
->q
, wr
);
2001 txq_advance(&q
->q
, ndesc
);
2002 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
))
2003 ctrlq_check_stop(q
, wr
);
2005 cxgb4_ring_tx_db(q
->adap
, &q
->q
, ndesc
);
2006 spin_unlock(&q
->sendq
.lock
);
2009 return NET_XMIT_SUCCESS
;
2013 * restart_ctrlq - restart a suspended control queue
2014 * @data: the control queue to restart
2016 * Resumes transmission on a suspended Tx control queue.
2018 static void restart_ctrlq(unsigned long data
)
2020 struct sk_buff
*skb
;
2021 unsigned int written
= 0;
2022 struct sge_ctrl_txq
*q
= (struct sge_ctrl_txq
*)data
;
2024 spin_lock(&q
->sendq
.lock
);
2025 reclaim_completed_tx_imm(&q
->q
);
2026 BUG_ON(txq_avail(&q
->q
) < TXQ_STOP_THRES
); /* q should be empty */
2028 while ((skb
= __skb_dequeue(&q
->sendq
)) != NULL
) {
2029 struct fw_wr_hdr
*wr
;
2030 unsigned int ndesc
= skb
->priority
; /* previously saved */
2033 /* Write descriptors and free skbs outside the lock to limit
2034 * wait times. q->full is still set so new skbs will be queued.
2036 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
2037 txq_advance(&q
->q
, ndesc
);
2038 spin_unlock(&q
->sendq
.lock
);
2040 cxgb4_inline_tx_skb(skb
, &q
->q
, wr
);
2043 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
2044 unsigned long old
= q
->q
.stops
;
2046 ctrlq_check_stop(q
, wr
);
2047 if (q
->q
.stops
!= old
) { /* suspended anew */
2048 spin_lock(&q
->sendq
.lock
);
2053 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2056 spin_lock(&q
->sendq
.lock
);
2061 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2062 spin_unlock(&q
->sendq
.lock
);
2066 * t4_mgmt_tx - send a management message
2067 * @adap: the adapter
2068 * @skb: the packet containing the management message
2070 * Send a management message through control queue 0.
2072 int t4_mgmt_tx(struct adapter
*adap
, struct sk_buff
*skb
)
2077 ret
= ctrl_xmit(&adap
->sge
.ctrlq
[0], skb
);
2083 * is_ofld_imm - check whether a packet can be sent as immediate data
2086 * Returns true if a packet can be sent as an offload WR with immediate
2087 * data. We currently use the same limit as for Ethernet packets.
2089 static inline int is_ofld_imm(const struct sk_buff
*skb
)
2091 struct work_request_hdr
*req
= (struct work_request_hdr
*)skb
->data
;
2092 unsigned long opcode
= FW_WR_OP_G(ntohl(req
->wr_hi
));
2094 if (opcode
== FW_CRYPTO_LOOKASIDE_WR
)
2095 return skb
->len
<= SGE_MAX_WR_LEN
;
2097 return skb
->len
<= MAX_IMM_TX_PKT_LEN
;
2101 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2104 * Returns the number of flits needed for the given offload packet.
2105 * These packets are already fully constructed and no additional headers
2108 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
2110 unsigned int flits
, cnt
;
2112 if (is_ofld_imm(skb
))
2113 return DIV_ROUND_UP(skb
->len
, 8);
2115 flits
= skb_transport_offset(skb
) / 8U; /* headers */
2116 cnt
= skb_shinfo(skb
)->nr_frags
;
2117 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
2119 return flits
+ sgl_len(cnt
);
2123 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2124 * @adap: the adapter
2125 * @q: the queue to stop
2127 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2128 * inability to map packets. A periodic timer attempts to restart
2131 static void txq_stop_maperr(struct sge_uld_txq
*q
)
2135 set_bit(q
->q
.cntxt_id
- q
->adap
->sge
.egr_start
,
2136 q
->adap
->sge
.txq_maperr
);
2140 * ofldtxq_stop - stop an offload Tx queue that has become full
2141 * @q: the queue to stop
2142 * @wr: the Work Request causing the queue to become full
2144 * Stops an offload Tx queue that has become full and modifies the packet
2145 * being written to request a wakeup.
2147 static void ofldtxq_stop(struct sge_uld_txq
*q
, struct fw_wr_hdr
*wr
)
2149 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
2155 * service_ofldq - service/restart a suspended offload queue
2156 * @q: the offload queue
2158 * Services an offload Tx queue by moving packets from its Pending Send
2159 * Queue to the Hardware TX ring. The function starts and ends with the
2160 * Send Queue locked, but drops the lock while putting the skb at the
2161 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2162 * allows more skbs to be added to the Send Queue by other threads.
2163 * The packet being processed at the head of the Pending Send Queue is
2164 * left on the queue in case we experience DMA Mapping errors, etc.
2165 * and need to give up and restart later.
2167 * service_ofldq() can be thought of as a task which opportunistically
2168 * uses other threads execution contexts. We use the Offload Queue
2169 * boolean "service_ofldq_running" to make sure that only one instance
2170 * is ever running at a time ...
2172 static void service_ofldq(struct sge_uld_txq
*q
)
2174 u64
*pos
, *before
, *end
;
2176 struct sk_buff
*skb
;
2177 struct sge_txq
*txq
;
2179 unsigned int written
= 0;
2180 unsigned int flits
, ndesc
;
2182 /* If another thread is currently in service_ofldq() processing the
2183 * Pending Send Queue then there's nothing to do. Otherwise, flag
2184 * that we're doing the work and continue. Examining/modifying
2185 * the Offload Queue boolean "service_ofldq_running" must be done
2186 * while holding the Pending Send Queue Lock.
2188 if (q
->service_ofldq_running
)
2190 q
->service_ofldq_running
= true;
2192 while ((skb
= skb_peek(&q
->sendq
)) != NULL
&& !q
->full
) {
2193 /* We drop the lock while we're working with the skb at the
2194 * head of the Pending Send Queue. This allows more skbs to
2195 * be added to the Pending Send Queue while we're working on
2196 * this one. We don't need to lock to guard the TX Ring
2197 * updates because only one thread of execution is ever
2198 * allowed into service_ofldq() at a time.
2200 spin_unlock(&q
->sendq
.lock
);
2202 cxgb4_reclaim_completed_tx(q
->adap
, &q
->q
, false);
2204 flits
= skb
->priority
; /* previously saved */
2205 ndesc
= flits_to_desc(flits
);
2206 credits
= txq_avail(&q
->q
) - ndesc
;
2207 BUG_ON(credits
< 0);
2208 if (unlikely(credits
< TXQ_STOP_THRES
))
2209 ofldtxq_stop(q
, (struct fw_wr_hdr
*)skb
->data
);
2211 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
2212 if (is_ofld_imm(skb
))
2213 cxgb4_inline_tx_skb(skb
, &q
->q
, pos
);
2214 else if (cxgb4_map_skb(q
->adap
->pdev_dev
, skb
,
2215 (dma_addr_t
*)skb
->head
)) {
2217 spin_lock(&q
->sendq
.lock
);
2220 int last_desc
, hdr_len
= skb_transport_offset(skb
);
2222 /* The WR headers may not fit within one descriptor.
2223 * So we need to deal with wrap-around here.
2225 before
= (u64
*)pos
;
2226 end
= (u64
*)pos
+ flits
;
2228 pos
= (void *)inline_tx_skb_header(skb
, &q
->q
,
2231 if (before
> (u64
*)pos
) {
2232 left
= (u8
*)end
- (u8
*)txq
->stat
;
2233 end
= (void *)txq
->desc
+ left
;
2236 /* If current position is already at the end of the
2237 * ofld queue, reset the current to point to
2238 * start of the queue and update the end ptr as well.
2240 if (pos
== (u64
*)txq
->stat
) {
2241 left
= (u8
*)end
- (u8
*)txq
->stat
;
2242 end
= (void *)txq
->desc
+ left
;
2243 pos
= (void *)txq
->desc
;
2246 cxgb4_write_sgl(skb
, &q
->q
, (void *)pos
,
2248 (dma_addr_t
*)skb
->head
);
2249 #ifdef CONFIG_NEED_DMA_MAP_STATE
2250 skb
->dev
= q
->adap
->port
[0];
2251 skb
->destructor
= deferred_unmap_destructor
;
2253 last_desc
= q
->q
.pidx
+ ndesc
- 1;
2254 if (last_desc
>= q
->q
.size
)
2255 last_desc
-= q
->q
.size
;
2256 q
->q
.sdesc
[last_desc
].skb
= skb
;
2259 txq_advance(&q
->q
, ndesc
);
2261 if (unlikely(written
> 32)) {
2262 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2266 /* Reacquire the Pending Send Queue Lock so we can unlink the
2267 * skb we've just successfully transferred to the TX Ring and
2268 * loop for the next skb which may be at the head of the
2269 * Pending Send Queue.
2271 spin_lock(&q
->sendq
.lock
);
2272 __skb_unlink(skb
, &q
->sendq
);
2273 if (is_ofld_imm(skb
))
2276 if (likely(written
))
2277 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2279 /*Indicate that no thread is processing the Pending Send Queue
2282 q
->service_ofldq_running
= false;
2286 * ofld_xmit - send a packet through an offload queue
2287 * @q: the Tx offload queue
2290 * Send an offload packet through an SGE offload queue.
2292 static int ofld_xmit(struct sge_uld_txq
*q
, struct sk_buff
*skb
)
2294 skb
->priority
= calc_tx_flits_ofld(skb
); /* save for restart */
2295 spin_lock(&q
->sendq
.lock
);
2297 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
2298 * that results in this new skb being the only one on the queue, start
2299 * servicing it. If there are other skbs already on the list, then
2300 * either the queue is currently being processed or it's been stopped
2301 * for some reason and it'll be restarted at a later time. Restart
2302 * paths are triggered by events like experiencing a DMA Mapping Error
2303 * or filling the Hardware TX Ring.
2305 __skb_queue_tail(&q
->sendq
, skb
);
2306 if (q
->sendq
.qlen
== 1)
2309 spin_unlock(&q
->sendq
.lock
);
2310 return NET_XMIT_SUCCESS
;
2314 * restart_ofldq - restart a suspended offload queue
2315 * @data: the offload queue to restart
2317 * Resumes transmission on a suspended Tx offload queue.
2319 static void restart_ofldq(unsigned long data
)
2321 struct sge_uld_txq
*q
= (struct sge_uld_txq
*)data
;
2323 spin_lock(&q
->sendq
.lock
);
2324 q
->full
= 0; /* the queue actually is completely empty now */
2326 spin_unlock(&q
->sendq
.lock
);
2330 * skb_txq - return the Tx queue an offload packet should use
2333 * Returns the Tx queue an offload packet should use as indicated by bits
2334 * 1-15 in the packet's queue_mapping.
2336 static inline unsigned int skb_txq(const struct sk_buff
*skb
)
2338 return skb
->queue_mapping
>> 1;
2342 * is_ctrl_pkt - return whether an offload packet is a control packet
2345 * Returns whether an offload packet should use an OFLD or a CTRL
2346 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
2348 static inline unsigned int is_ctrl_pkt(const struct sk_buff
*skb
)
2350 return skb
->queue_mapping
& 1;
2353 static inline int uld_send(struct adapter
*adap
, struct sk_buff
*skb
,
2354 unsigned int tx_uld_type
)
2356 struct sge_uld_txq_info
*txq_info
;
2357 struct sge_uld_txq
*txq
;
2358 unsigned int idx
= skb_txq(skb
);
2360 if (unlikely(is_ctrl_pkt(skb
))) {
2361 /* Single ctrl queue is a requirement for LE workaround path */
2362 if (adap
->tids
.nsftids
)
2364 return ctrl_xmit(&adap
->sge
.ctrlq
[idx
], skb
);
2367 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
2368 if (unlikely(!txq_info
)) {
2370 return NET_XMIT_DROP
;
2373 txq
= &txq_info
->uldtxq
[idx
];
2374 return ofld_xmit(txq
, skb
);
2378 * t4_ofld_send - send an offload packet
2379 * @adap: the adapter
2382 * Sends an offload packet. We use the packet queue_mapping to select the
2383 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2384 * should be sent as regular or control, bits 1-15 select the queue.
2386 int t4_ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
2391 ret
= uld_send(adap
, skb
, CXGB4_TX_OFLD
);
2397 * cxgb4_ofld_send - send an offload packet
2398 * @dev: the net device
2401 * Sends an offload packet. This is an exported version of @t4_ofld_send,
2402 * intended for ULDs.
2404 int cxgb4_ofld_send(struct net_device
*dev
, struct sk_buff
*skb
)
2406 return t4_ofld_send(netdev2adap(dev
), skb
);
2408 EXPORT_SYMBOL(cxgb4_ofld_send
);
2410 static void *inline_tx_header(const void *src
,
2411 const struct sge_txq
*q
,
2412 void *pos
, int length
)
2414 int left
= (void *)q
->stat
- pos
;
2417 if (likely(length
<= left
)) {
2418 memcpy(pos
, src
, length
);
2421 memcpy(pos
, src
, left
);
2422 memcpy(q
->desc
, src
+ left
, length
- left
);
2423 pos
= (void *)q
->desc
+ (length
- left
);
2425 /* 0-pad to multiple of 16 */
2426 p
= PTR_ALIGN(pos
, 8);
2427 if ((uintptr_t)p
& 8) {
2435 * ofld_xmit_direct - copy a WR into offload queue
2436 * @q: the Tx offload queue
2437 * @src: location of WR
2440 * Copy an immediate WR into an uncontended SGE offload queue.
2442 static int ofld_xmit_direct(struct sge_uld_txq
*q
, const void *src
,
2449 /* Use the lower limit as the cut-off */
2450 if (len
> MAX_IMM_OFLD_TX_DATA_WR_LEN
) {
2452 return NET_XMIT_DROP
;
2455 /* Don't return NET_XMIT_CN here as the current
2456 * implementation doesn't queue the request
2457 * using an skb when the following conditions not met
2459 if (!spin_trylock(&q
->sendq
.lock
))
2460 return NET_XMIT_DROP
;
2462 if (q
->full
|| !skb_queue_empty(&q
->sendq
) ||
2463 q
->service_ofldq_running
) {
2464 spin_unlock(&q
->sendq
.lock
);
2465 return NET_XMIT_DROP
;
2467 ndesc
= flits_to_desc(DIV_ROUND_UP(len
, 8));
2468 credits
= txq_avail(&q
->q
) - ndesc
;
2469 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
2471 /* ofldtxq_stop modifies WR header in-situ */
2472 inline_tx_header(src
, &q
->q
, pos
, len
);
2473 if (unlikely(credits
< TXQ_STOP_THRES
))
2474 ofldtxq_stop(q
, (struct fw_wr_hdr
*)pos
);
2475 txq_advance(&q
->q
, ndesc
);
2476 cxgb4_ring_tx_db(q
->adap
, &q
->q
, ndesc
);
2478 spin_unlock(&q
->sendq
.lock
);
2479 return NET_XMIT_SUCCESS
;
2482 int cxgb4_immdata_send(struct net_device
*dev
, unsigned int idx
,
2483 const void *src
, unsigned int len
)
2485 struct sge_uld_txq_info
*txq_info
;
2486 struct sge_uld_txq
*txq
;
2487 struct adapter
*adap
;
2490 adap
= netdev2adap(dev
);
2493 txq_info
= adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
2494 if (unlikely(!txq_info
)) {
2497 return NET_XMIT_DROP
;
2499 txq
= &txq_info
->uldtxq
[idx
];
2501 ret
= ofld_xmit_direct(txq
, src
, len
);
2503 return net_xmit_eval(ret
);
2505 EXPORT_SYMBOL(cxgb4_immdata_send
);
2508 * t4_crypto_send - send crypto packet
2509 * @adap: the adapter
2512 * Sends crypto packet. We use the packet queue_mapping to select the
2513 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2514 * should be sent as regular or control, bits 1-15 select the queue.
2516 static int t4_crypto_send(struct adapter
*adap
, struct sk_buff
*skb
)
2521 ret
= uld_send(adap
, skb
, CXGB4_TX_CRYPTO
);
2527 * cxgb4_crypto_send - send crypto packet
2528 * @dev: the net device
2531 * Sends crypto packet. This is an exported version of @t4_crypto_send,
2532 * intended for ULDs.
2534 int cxgb4_crypto_send(struct net_device
*dev
, struct sk_buff
*skb
)
2536 return t4_crypto_send(netdev2adap(dev
), skb
);
2538 EXPORT_SYMBOL(cxgb4_crypto_send
);
2540 static inline void copy_frags(struct sk_buff
*skb
,
2541 const struct pkt_gl
*gl
, unsigned int offset
)
2545 /* usually there's just one frag */
2546 __skb_fill_page_desc(skb
, 0, gl
->frags
[0].page
,
2547 gl
->frags
[0].offset
+ offset
,
2548 gl
->frags
[0].size
- offset
);
2549 skb_shinfo(skb
)->nr_frags
= gl
->nfrags
;
2550 for (i
= 1; i
< gl
->nfrags
; i
++)
2551 __skb_fill_page_desc(skb
, i
, gl
->frags
[i
].page
,
2552 gl
->frags
[i
].offset
,
2555 /* get a reference to the last page, we don't own it */
2556 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
2560 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
2561 * @gl: the gather list
2562 * @skb_len: size of sk_buff main body if it carries fragments
2563 * @pull_len: amount of data to move to the sk_buff's main body
2565 * Builds an sk_buff from the given packet gather list. Returns the
2566 * sk_buff or %NULL if sk_buff allocation failed.
2568 struct sk_buff
*cxgb4_pktgl_to_skb(const struct pkt_gl
*gl
,
2569 unsigned int skb_len
, unsigned int pull_len
)
2571 struct sk_buff
*skb
;
2574 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
2575 * size, which is expected since buffers are at least PAGE_SIZEd.
2576 * In this case packets up to RX_COPY_THRES have only one fragment.
2578 if (gl
->tot_len
<= RX_COPY_THRES
) {
2579 skb
= dev_alloc_skb(gl
->tot_len
);
2582 __skb_put(skb
, gl
->tot_len
);
2583 skb_copy_to_linear_data(skb
, gl
->va
, gl
->tot_len
);
2585 skb
= dev_alloc_skb(skb_len
);
2588 __skb_put(skb
, pull_len
);
2589 skb_copy_to_linear_data(skb
, gl
->va
, pull_len
);
2591 copy_frags(skb
, gl
, pull_len
);
2592 skb
->len
= gl
->tot_len
;
2593 skb
->data_len
= skb
->len
- pull_len
;
2594 skb
->truesize
+= skb
->data_len
;
2598 EXPORT_SYMBOL(cxgb4_pktgl_to_skb
);
2601 * t4_pktgl_free - free a packet gather list
2602 * @gl: the gather list
2604 * Releases the pages of a packet gather list. We do not own the last
2605 * page on the list and do not free it.
2607 static void t4_pktgl_free(const struct pkt_gl
*gl
)
2610 const struct page_frag
*p
;
2612 for (p
= gl
->frags
, n
= gl
->nfrags
- 1; n
--; p
++)
2617 * Process an MPS trace packet. Give it an unused protocol number so it won't
2618 * be delivered to anyone and send it to the stack for capture.
2620 static noinline
int handle_trace_pkt(struct adapter
*adap
,
2621 const struct pkt_gl
*gl
)
2623 struct sk_buff
*skb
;
2625 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
2626 if (unlikely(!skb
)) {
2631 if (is_t4(adap
->params
.chip
))
2632 __skb_pull(skb
, sizeof(struct cpl_trace_pkt
));
2634 __skb_pull(skb
, sizeof(struct cpl_t5_trace_pkt
));
2636 skb_reset_mac_header(skb
);
2637 skb
->protocol
= htons(0xffff);
2638 skb
->dev
= adap
->port
[0];
2639 netif_receive_skb(skb
);
2644 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
2645 * @adap: the adapter
2646 * @hwtstamps: time stamp structure to update
2647 * @sgetstamp: 60bit iqe timestamp
2649 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
2650 * which is in Core Clock ticks into ktime_t and assign it
2652 static void cxgb4_sgetim_to_hwtstamp(struct adapter
*adap
,
2653 struct skb_shared_hwtstamps
*hwtstamps
,
2657 u64 tmp
= (sgetstamp
* 1000 * 1000 + adap
->params
.vpd
.cclk
/ 2);
2659 ns
= div_u64(tmp
, adap
->params
.vpd
.cclk
);
2661 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
2662 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
2665 static void do_gro(struct sge_eth_rxq
*rxq
, const struct pkt_gl
*gl
,
2666 const struct cpl_rx_pkt
*pkt
, unsigned long tnl_hdr_len
)
2668 struct adapter
*adapter
= rxq
->rspq
.adap
;
2669 struct sge
*s
= &adapter
->sge
;
2670 struct port_info
*pi
;
2672 struct sk_buff
*skb
;
2674 skb
= napi_get_frags(&rxq
->rspq
.napi
);
2675 if (unlikely(!skb
)) {
2677 rxq
->stats
.rx_drops
++;
2681 copy_frags(skb
, gl
, s
->pktshift
);
2683 skb
->csum_level
= 1;
2684 skb
->len
= gl
->tot_len
- s
->pktshift
;
2685 skb
->data_len
= skb
->len
;
2686 skb
->truesize
+= skb
->data_len
;
2687 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2688 skb_record_rx_queue(skb
, rxq
->rspq
.idx
);
2689 pi
= netdev_priv(skb
->dev
);
2691 cxgb4_sgetim_to_hwtstamp(adapter
, skb_hwtstamps(skb
),
2693 if (rxq
->rspq
.netdev
->features
& NETIF_F_RXHASH
)
2694 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
2697 if (unlikely(pkt
->vlan_ex
)) {
2698 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
2699 rxq
->stats
.vlan_ex
++;
2701 ret
= napi_gro_frags(&rxq
->rspq
.napi
);
2702 if (ret
== GRO_HELD
)
2703 rxq
->stats
.lro_pkts
++;
2704 else if (ret
== GRO_MERGED
|| ret
== GRO_MERGED_FREE
)
2705 rxq
->stats
.lro_merged
++;
2707 rxq
->stats
.rx_cso
++;
2717 * t4_systim_to_hwstamp - read hardware time stamp
2718 * @adap: the adapter
2721 * Read Time Stamp from MPS packet and insert in skb which
2722 * is forwarded to PTP application
2724 static noinline
int t4_systim_to_hwstamp(struct adapter
*adapter
,
2725 struct sk_buff
*skb
)
2727 struct skb_shared_hwtstamps
*hwtstamps
;
2728 struct cpl_rx_mps_pkt
*cpl
= NULL
;
2729 unsigned char *data
;
2732 cpl
= (struct cpl_rx_mps_pkt
*)skb
->data
;
2733 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl
->op_to_r1_hi
)) &
2734 X_CPL_RX_MPS_PKT_TYPE_PTP
))
2735 return RX_PTP_PKT_ERR
;
2737 data
= skb
->data
+ sizeof(*cpl
);
2738 skb_pull(skb
, 2 * sizeof(u64
) + sizeof(struct cpl_rx_mps_pkt
));
2739 offset
= ETH_HLEN
+ IPV4_HLEN(skb
->data
) + UDP_HLEN
;
2740 if (skb
->len
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(short))
2741 return RX_PTP_PKT_ERR
;
2743 hwtstamps
= skb_hwtstamps(skb
);
2744 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
2745 hwtstamps
->hwtstamp
= ns_to_ktime(be64_to_cpu(*((u64
*)data
)));
2747 return RX_PTP_PKT_SUC
;
2751 * t4_rx_hststamp - Recv PTP Event Message
2752 * @adap: the adapter
2753 * @rsp: the response queue descriptor holding the RX_PKT message
2756 * PTP enabled and MPS packet, read HW timestamp
2758 static int t4_rx_hststamp(struct adapter
*adapter
, const __be64
*rsp
,
2759 struct sge_eth_rxq
*rxq
, struct sk_buff
*skb
)
2763 if (unlikely((*(u8
*)rsp
== CPL_RX_MPS_PKT
) &&
2764 !is_t4(adapter
->params
.chip
))) {
2765 ret
= t4_systim_to_hwstamp(adapter
, skb
);
2766 if (ret
== RX_PTP_PKT_ERR
) {
2768 rxq
->stats
.rx_drops
++;
2772 return RX_NON_PTP_PKT
;
2776 * t4_tx_hststamp - Loopback PTP Transmit Event Message
2777 * @adap: the adapter
2779 * @dev: the ingress net device
2781 * Read hardware timestamp for the loopback PTP Tx event message
2783 static int t4_tx_hststamp(struct adapter
*adapter
, struct sk_buff
*skb
,
2784 struct net_device
*dev
)
2786 struct port_info
*pi
= netdev_priv(dev
);
2788 if (!is_t4(adapter
->params
.chip
) && adapter
->ptp_tx_skb
) {
2789 cxgb4_ptp_read_hwstamp(adapter
, pi
);
2797 * t4_ethrx_handler - process an ingress ethernet packet
2798 * @q: the response queue that received the packet
2799 * @rsp: the response queue descriptor holding the RX_PKT message
2800 * @si: the gather list of packet fragments
2802 * Process an ingress ethernet packet and deliver it to the stack.
2804 int t4_ethrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
2805 const struct pkt_gl
*si
)
2808 struct sk_buff
*skb
;
2809 const struct cpl_rx_pkt
*pkt
;
2810 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
2811 struct adapter
*adapter
= q
->adap
;
2812 struct sge
*s
= &q
->adap
->sge
;
2813 int cpl_trace_pkt
= is_t4(q
->adap
->params
.chip
) ?
2814 CPL_TRACE_PKT
: CPL_TRACE_PKT_T5
;
2815 u16 err_vec
, tnl_hdr_len
= 0;
2816 struct port_info
*pi
;
2819 if (unlikely(*(u8
*)rsp
== cpl_trace_pkt
))
2820 return handle_trace_pkt(q
->adap
, si
);
2822 pkt
= (const struct cpl_rx_pkt
*)rsp
;
2823 /* Compressed error vector is enabled for T6 only */
2824 if (q
->adap
->params
.tp
.rx_pkt_encap
) {
2825 err_vec
= T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt
->err_vec
));
2826 tnl_hdr_len
= T6_RX_TNLHDR_LEN_G(ntohs(pkt
->err_vec
));
2828 err_vec
= be16_to_cpu(pkt
->err_vec
);
2831 csum_ok
= pkt
->csum_calc
&& !err_vec
&&
2832 (q
->netdev
->features
& NETIF_F_RXCSUM
);
2835 rxq
->stats
.bad_rx_pkts
++;
2837 if (((pkt
->l2info
& htonl(RXF_TCP_F
)) ||
2839 (q
->netdev
->features
& NETIF_F_GRO
) && csum_ok
&& !pkt
->ip_frag
) {
2840 do_gro(rxq
, si
, pkt
, tnl_hdr_len
);
2844 skb
= cxgb4_pktgl_to_skb(si
, RX_PKT_SKB_LEN
, RX_PULL_LEN
);
2845 if (unlikely(!skb
)) {
2847 rxq
->stats
.rx_drops
++;
2850 pi
= netdev_priv(q
->netdev
);
2852 /* Handle PTP Event Rx packet */
2853 if (unlikely(pi
->ptp_enable
)) {
2854 ret
= t4_rx_hststamp(adapter
, rsp
, rxq
, skb
);
2855 if (ret
== RX_PTP_PKT_ERR
)
2859 __skb_pull(skb
, s
->pktshift
); /* remove ethernet header pad */
2861 /* Handle the PTP Event Tx Loopback packet */
2862 if (unlikely(pi
->ptp_enable
&& !ret
&&
2863 (pkt
->l2info
& htonl(RXF_UDP_F
)) &&
2864 cxgb4_ptp_is_ptp_rx(skb
))) {
2865 if (!t4_tx_hststamp(adapter
, skb
, q
->netdev
))
2869 skb
->protocol
= eth_type_trans(skb
, q
->netdev
);
2870 skb_record_rx_queue(skb
, q
->idx
);
2871 if (skb
->dev
->features
& NETIF_F_RXHASH
)
2872 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
2878 cxgb4_sgetim_to_hwtstamp(q
->adap
, skb_hwtstamps(skb
),
2880 if (csum_ok
&& (pkt
->l2info
& htonl(RXF_UDP_F
| RXF_TCP_F
))) {
2881 if (!pkt
->ip_frag
) {
2882 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2883 rxq
->stats
.rx_cso
++;
2884 } else if (pkt
->l2info
& htonl(RXF_IP_F
)) {
2885 __sum16 c
= (__force __sum16
)pkt
->csum
;
2886 skb
->csum
= csum_unfold(c
);
2889 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2890 skb
->csum_level
= 1;
2892 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2894 rxq
->stats
.rx_cso
++;
2897 skb_checksum_none_assert(skb
);
2898 #ifdef CONFIG_CHELSIO_T4_FCOE
2899 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2900 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2902 if (!(pkt
->l2info
& cpu_to_be32(CPL_RX_PKT_FLAGS
))) {
2903 if ((pkt
->l2info
& cpu_to_be32(RXF_FCOE_F
)) &&
2904 (pi
->fcoe
.flags
& CXGB_FCOE_ENABLED
)) {
2905 if (q
->adap
->params
.tp
.rx_pkt_encap
)
2907 T6_COMPR_RXERR_SUM_F
;
2909 csum_ok
= err_vec
& RXERR_CSUM_F
;
2911 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2915 #undef CPL_RX_PKT_FLAGS
2916 #endif /* CONFIG_CHELSIO_T4_FCOE */
2919 if (unlikely(pkt
->vlan_ex
)) {
2920 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
2921 rxq
->stats
.vlan_ex
++;
2923 skb_mark_napi_id(skb
, &q
->napi
);
2924 netif_receive_skb(skb
);
2929 * restore_rx_bufs - put back a packet's Rx buffers
2930 * @si: the packet gather list
2931 * @q: the SGE free list
2932 * @frags: number of FL buffers to restore
2934 * Puts back on an FL the Rx buffers associated with @si. The buffers
2935 * have already been unmapped and are left unmapped, we mark them so to
2936 * prevent further unmapping attempts.
2938 * This function undoes a series of @unmap_rx_buf calls when we find out
2939 * that the current packet can't be processed right away afterall and we
2940 * need to come back to it later. This is a very rare event and there's
2941 * no effort to make this particularly efficient.
2943 static void restore_rx_bufs(const struct pkt_gl
*si
, struct sge_fl
*q
,
2946 struct rx_sw_desc
*d
;
2950 q
->cidx
= q
->size
- 1;
2953 d
= &q
->sdesc
[q
->cidx
];
2954 d
->page
= si
->frags
[frags
].page
;
2955 d
->dma_addr
|= RX_UNMAPPED_BUF
;
2961 * is_new_response - check if a response is newly written
2962 * @r: the response descriptor
2963 * @q: the response queue
2965 * Returns true if a response descriptor contains a yet unprocessed
2968 static inline bool is_new_response(const struct rsp_ctrl
*r
,
2969 const struct sge_rspq
*q
)
2971 return (r
->type_gen
>> RSPD_GEN_S
) == q
->gen
;
2975 * rspq_next - advance to the next entry in a response queue
2978 * Updates the state of a response queue to advance it to the next entry.
2980 static inline void rspq_next(struct sge_rspq
*q
)
2982 q
->cur_desc
= (void *)q
->cur_desc
+ q
->iqe_len
;
2983 if (unlikely(++q
->cidx
== q
->size
)) {
2986 q
->cur_desc
= q
->desc
;
2991 * process_responses - process responses from an SGE response queue
2992 * @q: the ingress queue to process
2993 * @budget: how many responses can be processed in this round
2995 * Process responses from an SGE response queue up to the supplied budget.
2996 * Responses include received packets as well as control messages from FW
2999 * Additionally choose the interrupt holdoff time for the next interrupt
3000 * on this queue. If the system is under memory shortage use a fairly
3001 * long delay to help recovery.
3003 static int process_responses(struct sge_rspq
*q
, int budget
)
3006 int budget_left
= budget
;
3007 const struct rsp_ctrl
*rc
;
3008 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
3009 struct adapter
*adapter
= q
->adap
;
3010 struct sge
*s
= &adapter
->sge
;
3012 while (likely(budget_left
)) {
3013 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
3014 if (!is_new_response(rc
, q
)) {
3015 if (q
->flush_handler
)
3016 q
->flush_handler(q
);
3021 rsp_type
= RSPD_TYPE_G(rc
->type_gen
);
3022 if (likely(rsp_type
== RSPD_TYPE_FLBUF_X
)) {
3023 struct page_frag
*fp
;
3025 const struct rx_sw_desc
*rsd
;
3026 u32 len
= ntohl(rc
->pldbuflen_qid
), bufsz
, frags
;
3028 if (len
& RSPD_NEWBUF_F
) {
3029 if (likely(q
->offset
> 0)) {
3030 free_rx_bufs(q
->adap
, &rxq
->fl
, 1);
3033 len
= RSPD_LEN_G(len
);
3037 /* gather packet fragments */
3038 for (frags
= 0, fp
= si
.frags
; ; frags
++, fp
++) {
3039 rsd
= &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
3040 bufsz
= get_buf_size(adapter
, rsd
);
3041 fp
->page
= rsd
->page
;
3042 fp
->offset
= q
->offset
;
3043 fp
->size
= min(bufsz
, len
);
3047 unmap_rx_buf(q
->adap
, &rxq
->fl
);
3050 si
.sgetstamp
= SGE_TIMESTAMP_G(
3051 be64_to_cpu(rc
->last_flit
));
3053 * Last buffer remains mapped so explicitly make it
3054 * coherent for CPU access.
3056 dma_sync_single_for_cpu(q
->adap
->pdev_dev
,
3058 fp
->size
, DMA_FROM_DEVICE
);
3060 si
.va
= page_address(si
.frags
[0].page
) +
3064 si
.nfrags
= frags
+ 1;
3065 ret
= q
->handler(q
, q
->cur_desc
, &si
);
3066 if (likely(ret
== 0))
3067 q
->offset
+= ALIGN(fp
->size
, s
->fl_align
);
3069 restore_rx_bufs(&si
, &rxq
->fl
, frags
);
3070 } else if (likely(rsp_type
== RSPD_TYPE_CPL_X
)) {
3071 ret
= q
->handler(q
, q
->cur_desc
, NULL
);
3073 ret
= q
->handler(q
, (const __be64
*)rc
, CXGB4_MSG_AN
);
3076 if (unlikely(ret
)) {
3077 /* couldn't process descriptor, back off for recovery */
3078 q
->next_intr_params
= QINTR_TIMER_IDX_V(NOMEM_TMR_IDX
);
3086 if (q
->offset
>= 0 && fl_cap(&rxq
->fl
) - rxq
->fl
.avail
>= 16)
3087 __refill_fl(q
->adap
, &rxq
->fl
);
3088 return budget
- budget_left
;
3092 * napi_rx_handler - the NAPI handler for Rx processing
3093 * @napi: the napi instance
3094 * @budget: how many packets we can process in this round
3096 * Handler for new data events when using NAPI. This does not need any
3097 * locking or protection from interrupts as data interrupts are off at
3098 * this point and other adapter interrupts do not interfere (the latter
3099 * in not a concern at all with MSI-X as non-data interrupts then have
3100 * a separate handler).
3102 static int napi_rx_handler(struct napi_struct
*napi
, int budget
)
3104 unsigned int params
;
3105 struct sge_rspq
*q
= container_of(napi
, struct sge_rspq
, napi
);
3109 work_done
= process_responses(q
, budget
);
3110 if (likely(work_done
< budget
)) {
3113 napi_complete_done(napi
, work_done
);
3114 timer_index
= QINTR_TIMER_IDX_G(q
->next_intr_params
);
3116 if (q
->adaptive_rx
) {
3117 if (work_done
> max(timer_pkt_quota
[timer_index
],
3119 timer_index
= (timer_index
+ 1);
3121 timer_index
= timer_index
- 1;
3123 timer_index
= clamp(timer_index
, 0, SGE_TIMERREGS
- 1);
3124 q
->next_intr_params
=
3125 QINTR_TIMER_IDX_V(timer_index
) |
3127 params
= q
->next_intr_params
;
3129 params
= q
->next_intr_params
;
3130 q
->next_intr_params
= q
->intr_params
;
3133 params
= QINTR_TIMER_IDX_V(7);
3135 val
= CIDXINC_V(work_done
) | SEINTARM_V(params
);
3137 /* If we don't have access to the new User GTS (T5+), use the old
3138 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3140 if (unlikely(q
->bar2_addr
== NULL
)) {
3141 t4_write_reg(q
->adap
, MYPF_REG(SGE_PF_GTS_A
),
3142 val
| INGRESSQID_V((u32
)q
->cntxt_id
));
3144 writel(val
| INGRESSQID_V(q
->bar2_qid
),
3145 q
->bar2_addr
+ SGE_UDB_GTS
);
3152 * The MSI-X interrupt handler for an SGE response queue.
3154 irqreturn_t
t4_sge_intr_msix(int irq
, void *cookie
)
3156 struct sge_rspq
*q
= cookie
;
3158 napi_schedule(&q
->napi
);
3163 * Process the indirect interrupt entries in the interrupt queue and kick off
3164 * NAPI for each queue that has generated an entry.
3166 static unsigned int process_intrq(struct adapter
*adap
)
3168 unsigned int credits
;
3169 const struct rsp_ctrl
*rc
;
3170 struct sge_rspq
*q
= &adap
->sge
.intrq
;
3173 spin_lock(&adap
->sge
.intrq_lock
);
3174 for (credits
= 0; ; credits
++) {
3175 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
3176 if (!is_new_response(rc
, q
))
3180 if (RSPD_TYPE_G(rc
->type_gen
) == RSPD_TYPE_INTR_X
) {
3181 unsigned int qid
= ntohl(rc
->pldbuflen_qid
);
3183 qid
-= adap
->sge
.ingr_start
;
3184 napi_schedule(&adap
->sge
.ingr_map
[qid
]->napi
);
3190 val
= CIDXINC_V(credits
) | SEINTARM_V(q
->intr_params
);
3192 /* If we don't have access to the new User GTS (T5+), use the old
3193 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3195 if (unlikely(q
->bar2_addr
== NULL
)) {
3196 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
3197 val
| INGRESSQID_V(q
->cntxt_id
));
3199 writel(val
| INGRESSQID_V(q
->bar2_qid
),
3200 q
->bar2_addr
+ SGE_UDB_GTS
);
3203 spin_unlock(&adap
->sge
.intrq_lock
);
3208 * The MSI interrupt handler, which handles data events from SGE response queues
3209 * as well as error and other async events as they all use the same MSI vector.
3211 static irqreturn_t
t4_intr_msi(int irq
, void *cookie
)
3213 struct adapter
*adap
= cookie
;
3215 if (adap
->flags
& MASTER_PF
)
3216 t4_slow_intr_handler(adap
);
3217 process_intrq(adap
);
3222 * Interrupt handler for legacy INTx interrupts.
3223 * Handles data events from SGE response queues as well as error and other
3224 * async events as they all use the same interrupt line.
3226 static irqreturn_t
t4_intr_intx(int irq
, void *cookie
)
3228 struct adapter
*adap
= cookie
;
3230 t4_write_reg(adap
, MYPF_REG(PCIE_PF_CLI_A
), 0);
3231 if (((adap
->flags
& MASTER_PF
) && t4_slow_intr_handler(adap
)) |
3232 process_intrq(adap
))
3234 return IRQ_NONE
; /* probably shared interrupt */
3238 * t4_intr_handler - select the top-level interrupt handler
3239 * @adap: the adapter
3241 * Selects the top-level interrupt handler based on the type of interrupts
3242 * (MSI-X, MSI, or INTx).
3244 irq_handler_t
t4_intr_handler(struct adapter
*adap
)
3246 if (adap
->flags
& USING_MSIX
)
3247 return t4_sge_intr_msix
;
3248 if (adap
->flags
& USING_MSI
)
3250 return t4_intr_intx
;
3253 static void sge_rx_timer_cb(struct timer_list
*t
)
3257 struct adapter
*adap
= from_timer(adap
, t
, sge
.rx_timer
);
3258 struct sge
*s
= &adap
->sge
;
3260 for (i
= 0; i
< BITS_TO_LONGS(s
->egr_sz
); i
++)
3261 for (m
= s
->starving_fl
[i
]; m
; m
&= m
- 1) {
3262 struct sge_eth_rxq
*rxq
;
3263 unsigned int id
= __ffs(m
) + i
* BITS_PER_LONG
;
3264 struct sge_fl
*fl
= s
->egr_map
[id
];
3266 clear_bit(id
, s
->starving_fl
);
3267 smp_mb__after_atomic();
3269 if (fl_starving(adap
, fl
)) {
3270 rxq
= container_of(fl
, struct sge_eth_rxq
, fl
);
3271 if (napi_reschedule(&rxq
->rspq
.napi
))
3274 set_bit(id
, s
->starving_fl
);
3277 /* The remainder of the SGE RX Timer Callback routine is dedicated to
3278 * global Master PF activities like checking for chip ingress stalls,
3281 if (!(adap
->flags
& MASTER_PF
))
3284 t4_idma_monitor(adap
, &s
->idma_monitor
, HZ
, RX_QCHECK_PERIOD
);
3287 mod_timer(&s
->rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
3290 static void sge_tx_timer_cb(struct timer_list
*t
)
3293 unsigned int i
, budget
;
3294 struct adapter
*adap
= from_timer(adap
, t
, sge
.tx_timer
);
3295 struct sge
*s
= &adap
->sge
;
3297 for (i
= 0; i
< BITS_TO_LONGS(s
->egr_sz
); i
++)
3298 for (m
= s
->txq_maperr
[i
]; m
; m
&= m
- 1) {
3299 unsigned long id
= __ffs(m
) + i
* BITS_PER_LONG
;
3300 struct sge_uld_txq
*txq
= s
->egr_map
[id
];
3302 clear_bit(id
, s
->txq_maperr
);
3303 tasklet_schedule(&txq
->qresume_tsk
);
3306 if (!is_t4(adap
->params
.chip
)) {
3307 struct sge_eth_txq
*q
= &s
->ptptxq
;
3310 spin_lock(&adap
->ptp_lock
);
3311 avail
= reclaimable(&q
->q
);
3314 free_tx_desc(adap
, &q
->q
, avail
, false);
3315 q
->q
.in_use
-= avail
;
3317 spin_unlock(&adap
->ptp_lock
);
3320 budget
= MAX_TIMER_TX_RECLAIM
;
3321 i
= s
->ethtxq_rover
;
3323 struct sge_eth_txq
*q
= &s
->ethtxq
[i
];
3326 time_after_eq(jiffies
, q
->txq
->trans_start
+ HZ
/ 100) &&
3327 __netif_tx_trylock(q
->txq
)) {
3328 int avail
= reclaimable(&q
->q
);
3334 free_tx_desc(adap
, &q
->q
, avail
, true);
3335 q
->q
.in_use
-= avail
;
3338 __netif_tx_unlock(q
->txq
);
3341 if (++i
>= s
->ethqsets
)
3343 } while (budget
&& i
!= s
->ethtxq_rover
);
3344 s
->ethtxq_rover
= i
;
3345 mod_timer(&s
->tx_timer
, jiffies
+ (budget
? TX_QCHECK_PERIOD
: 2));
3349 * bar2_address - return the BAR2 address for an SGE Queue's Registers
3350 * @adapter: the adapter
3351 * @qid: the SGE Queue ID
3352 * @qtype: the SGE Queue Type (Egress or Ingress)
3353 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
3355 * Returns the BAR2 address for the SGE Queue Registers associated with
3356 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
3357 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
3358 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
3359 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
3361 static void __iomem
*bar2_address(struct adapter
*adapter
,
3363 enum t4_bar2_qtype qtype
,
3364 unsigned int *pbar2_qid
)
3369 ret
= t4_bar2_sge_qregs(adapter
, qid
, qtype
, 0,
3370 &bar2_qoffset
, pbar2_qid
);
3374 return adapter
->bar2
+ bar2_qoffset
;
3377 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
3378 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
3380 int t4_sge_alloc_rxq(struct adapter
*adap
, struct sge_rspq
*iq
, bool fwevtq
,
3381 struct net_device
*dev
, int intr_idx
,
3382 struct sge_fl
*fl
, rspq_handler_t hnd
,
3383 rspq_flush_handler_t flush_hnd
, int cong
)
3387 struct sge
*s
= &adap
->sge
;
3388 struct port_info
*pi
= netdev_priv(dev
);
3389 int relaxed
= !(adap
->flags
& ROOT_NO_RELAXED_ORDERING
);
3391 /* Size needs to be multiple of 16, including status entry. */
3392 iq
->size
= roundup(iq
->size
, 16);
3394 iq
->desc
= alloc_ring(adap
->pdev_dev
, iq
->size
, iq
->iqe_len
, 0,
3395 &iq
->phys_addr
, NULL
, 0,
3396 dev_to_node(adap
->pdev_dev
));
3400 memset(&c
, 0, sizeof(c
));
3401 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_IQ_CMD
) | FW_CMD_REQUEST_F
|
3402 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
3403 FW_IQ_CMD_PFN_V(adap
->pf
) | FW_IQ_CMD_VFN_V(0));
3404 c
.alloc_to_len16
= htonl(FW_IQ_CMD_ALLOC_F
| FW_IQ_CMD_IQSTART_F
|
3406 c
.type_to_iqandstindex
= htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP
) |
3407 FW_IQ_CMD_IQASYNCH_V(fwevtq
) | FW_IQ_CMD_VIID_V(pi
->viid
) |
3408 FW_IQ_CMD_IQANDST_V(intr_idx
< 0) |
3409 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X
) |
3410 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx
>= 0 ? intr_idx
:
3412 c
.iqdroprss_to_iqesize
= htons(FW_IQ_CMD_IQPCIECH_V(pi
->tx_chan
) |
3413 FW_IQ_CMD_IQGTSMODE_F
|
3414 FW_IQ_CMD_IQINTCNTTHRESH_V(iq
->pktcnt_idx
) |
3415 FW_IQ_CMD_IQESIZE_V(ilog2(iq
->iqe_len
) - 4));
3416 c
.iqsize
= htons(iq
->size
);
3417 c
.iqaddr
= cpu_to_be64(iq
->phys_addr
);
3419 c
.iqns_to_fl0congen
= htonl(FW_IQ_CMD_IQFLINTCONGEN_F
|
3420 FW_IQ_CMD_IQTYPE_V(cong
? FW_IQ_IQTYPE_NIC
3421 : FW_IQ_IQTYPE_OFLD
));
3424 enum chip_type chip
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
3426 /* Allocate the ring for the hardware free list (with space
3427 * for its status page) along with the associated software
3428 * descriptor ring. The free list size needs to be a multiple
3429 * of the Egress Queue Unit and at least 2 Egress Units larger
3430 * than the SGE's Egress Congrestion Threshold
3431 * (fl_starve_thres - 1).
3433 if (fl
->size
< s
->fl_starve_thres
- 1 + 2 * 8)
3434 fl
->size
= s
->fl_starve_thres
- 1 + 2 * 8;
3435 fl
->size
= roundup(fl
->size
, 8);
3436 fl
->desc
= alloc_ring(adap
->pdev_dev
, fl
->size
, sizeof(__be64
),
3437 sizeof(struct rx_sw_desc
), &fl
->addr
,
3438 &fl
->sdesc
, s
->stat_len
,
3439 dev_to_node(adap
->pdev_dev
));
3443 flsz
= fl
->size
/ 8 + s
->stat_len
/ sizeof(struct tx_desc
);
3444 c
.iqns_to_fl0congen
|= htonl(FW_IQ_CMD_FL0PACKEN_F
|
3445 FW_IQ_CMD_FL0FETCHRO_V(relaxed
) |
3446 FW_IQ_CMD_FL0DATARO_V(relaxed
) |
3447 FW_IQ_CMD_FL0PADEN_F
);
3449 c
.iqns_to_fl0congen
|=
3450 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong
) |
3451 FW_IQ_CMD_FL0CONGCIF_F
|
3452 FW_IQ_CMD_FL0CONGEN_F
);
3453 /* In T6, for egress queue type FL there is internal overhead
3454 * of 16B for header going into FLM module. Hence the maximum
3455 * allowed burst size is 448 bytes. For T4/T5, the hardware
3456 * doesn't coalesce fetch requests if more than 64 bytes of
3457 * Free List pointers are provided, so we use a 128-byte Fetch
3458 * Burst Minimum there (T6 implements coalescing so we can use
3459 * the smaller 64-byte value there).
3461 c
.fl0dcaen_to_fl0cidxfthresh
=
3462 htons(FW_IQ_CMD_FL0FBMIN_V(chip
<= CHELSIO_T5
?
3463 FETCHBURSTMIN_128B_X
:
3464 FETCHBURSTMIN_64B_X
) |
3465 FW_IQ_CMD_FL0FBMAX_V((chip
<= CHELSIO_T5
) ?
3466 FETCHBURSTMAX_512B_X
:
3467 FETCHBURSTMAX_256B_X
));
3468 c
.fl0size
= htons(flsz
);
3469 c
.fl0addr
= cpu_to_be64(fl
->addr
);
3472 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
3476 netif_napi_add(dev
, &iq
->napi
, napi_rx_handler
, 64);
3477 iq
->cur_desc
= iq
->desc
;
3480 iq
->next_intr_params
= iq
->intr_params
;
3481 iq
->cntxt_id
= ntohs(c
.iqid
);
3482 iq
->abs_id
= ntohs(c
.physiqid
);
3483 iq
->bar2_addr
= bar2_address(adap
,
3485 T4_BAR2_QTYPE_INGRESS
,
3487 iq
->size
--; /* subtract status entry */
3490 iq
->flush_handler
= flush_hnd
;
3492 memset(&iq
->lro_mgr
, 0, sizeof(struct t4_lro_mgr
));
3493 skb_queue_head_init(&iq
->lro_mgr
.lroq
);
3495 /* set offset to -1 to distinguish ingress queues without FL */
3496 iq
->offset
= fl
? 0 : -1;
3498 adap
->sge
.ingr_map
[iq
->cntxt_id
- adap
->sge
.ingr_start
] = iq
;
3501 fl
->cntxt_id
= ntohs(c
.fl0id
);
3502 fl
->avail
= fl
->pend_cred
= 0;
3503 fl
->pidx
= fl
->cidx
= 0;
3504 fl
->alloc_failed
= fl
->large_alloc_failed
= fl
->starving
= 0;
3505 adap
->sge
.egr_map
[fl
->cntxt_id
- adap
->sge
.egr_start
] = fl
;
3507 /* Note, we must initialize the BAR2 Free List User Doorbell
3508 * information before refilling the Free List!
3510 fl
->bar2_addr
= bar2_address(adap
,
3512 T4_BAR2_QTYPE_EGRESS
,
3514 refill_fl(adap
, fl
, fl_cap(fl
), GFP_KERNEL
);
3517 /* For T5 and later we attempt to set up the Congestion Manager values
3518 * of the new RX Ethernet Queue. This should really be handled by
3519 * firmware because it's more complex than any host driver wants to
3520 * get involved with and it's different per chip and this is almost
3521 * certainly wrong. Firmware would be wrong as well, but it would be
3522 * a lot easier to fix in one place ... For now we do something very
3523 * simple (and hopefully less wrong).
3525 if (!is_t4(adap
->params
.chip
) && cong
>= 0) {
3526 u32 param
, val
, ch_map
= 0;
3528 u16 cng_ch_bits_log
= adap
->params
.arch
.cng_ch_bits_log
;
3530 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
3531 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT
) |
3532 FW_PARAMS_PARAM_YZ_V(iq
->cntxt_id
));
3534 val
= CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X
);
3537 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X
);
3538 for (i
= 0; i
< 4; i
++) {
3539 if (cong
& (1 << i
))
3540 ch_map
|= 1 << (i
<< cng_ch_bits_log
);
3542 val
|= CONMCTXT_CNGCHMAP_V(ch_map
);
3544 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
3547 dev_warn(adap
->pdev_dev
, "Failed to set Congestion"
3548 " Manager Context for Ingress Queue %d: %d\n",
3549 iq
->cntxt_id
, -ret
);
3558 dma_free_coherent(adap
->pdev_dev
, iq
->size
* iq
->iqe_len
,
3559 iq
->desc
, iq
->phys_addr
);
3562 if (fl
&& fl
->desc
) {
3565 dma_free_coherent(adap
->pdev_dev
, flsz
* sizeof(struct tx_desc
),
3566 fl
->desc
, fl
->addr
);
3572 static void init_txq(struct adapter
*adap
, struct sge_txq
*q
, unsigned int id
)
3575 q
->bar2_addr
= bar2_address(adap
,
3577 T4_BAR2_QTYPE_EGRESS
,
3580 q
->cidx
= q
->pidx
= 0;
3581 q
->stops
= q
->restarts
= 0;
3582 q
->stat
= (void *)&q
->desc
[q
->size
];
3583 spin_lock_init(&q
->db_lock
);
3584 adap
->sge
.egr_map
[id
- adap
->sge
.egr_start
] = q
;
3587 int t4_sge_alloc_eth_txq(struct adapter
*adap
, struct sge_eth_txq
*txq
,
3588 struct net_device
*dev
, struct netdev_queue
*netdevq
,
3592 struct fw_eq_eth_cmd c
;
3593 struct sge
*s
= &adap
->sge
;
3594 struct port_info
*pi
= netdev_priv(dev
);
3596 /* Add status entries */
3597 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
3599 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
3600 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
3601 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
,
3602 netdev_queue_numa_node_read(netdevq
));
3606 memset(&c
, 0, sizeof(c
));
3607 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD
) | FW_CMD_REQUEST_F
|
3608 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
3609 FW_EQ_ETH_CMD_PFN_V(adap
->pf
) |
3610 FW_EQ_ETH_CMD_VFN_V(0));
3611 c
.alloc_to_len16
= htonl(FW_EQ_ETH_CMD_ALLOC_F
|
3612 FW_EQ_ETH_CMD_EQSTART_F
| FW_LEN16(c
));
3613 c
.viid_pkd
= htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F
|
3614 FW_EQ_ETH_CMD_VIID_V(pi
->viid
));
3615 c
.fetchszm_to_iqid
=
3616 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
3617 FW_EQ_ETH_CMD_PCIECHN_V(pi
->tx_chan
) |
3618 FW_EQ_ETH_CMD_FETCHRO_F
| FW_EQ_ETH_CMD_IQID_V(iqid
));
3620 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X
) |
3621 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
3622 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
3623 FW_EQ_ETH_CMD_EQSIZE_V(nentries
));
3624 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
3626 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
3628 kfree(txq
->q
.sdesc
);
3629 txq
->q
.sdesc
= NULL
;
3630 dma_free_coherent(adap
->pdev_dev
,
3631 nentries
* sizeof(struct tx_desc
),
3632 txq
->q
.desc
, txq
->q
.phys_addr
);
3637 txq
->q
.q_type
= CXGB4_TXQ_ETH
;
3638 init_txq(adap
, &txq
->q
, FW_EQ_ETH_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
3640 txq
->tso
= txq
->tx_cso
= txq
->vlan_ins
= 0;
3641 txq
->mapping_err
= 0;
3645 int t4_sge_alloc_ctrl_txq(struct adapter
*adap
, struct sge_ctrl_txq
*txq
,
3646 struct net_device
*dev
, unsigned int iqid
,
3647 unsigned int cmplqid
)
3650 struct fw_eq_ctrl_cmd c
;
3651 struct sge
*s
= &adap
->sge
;
3652 struct port_info
*pi
= netdev_priv(dev
);
3654 /* Add status entries */
3655 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
3657 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, nentries
,
3658 sizeof(struct tx_desc
), 0, &txq
->q
.phys_addr
,
3659 NULL
, 0, dev_to_node(adap
->pdev_dev
));
3663 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD
) | FW_CMD_REQUEST_F
|
3664 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
3665 FW_EQ_CTRL_CMD_PFN_V(adap
->pf
) |
3666 FW_EQ_CTRL_CMD_VFN_V(0));
3667 c
.alloc_to_len16
= htonl(FW_EQ_CTRL_CMD_ALLOC_F
|
3668 FW_EQ_CTRL_CMD_EQSTART_F
| FW_LEN16(c
));
3669 c
.cmpliqid_eqid
= htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid
));
3670 c
.physeqid_pkd
= htonl(0);
3671 c
.fetchszm_to_iqid
=
3672 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
3673 FW_EQ_CTRL_CMD_PCIECHN_V(pi
->tx_chan
) |
3674 FW_EQ_CTRL_CMD_FETCHRO_F
| FW_EQ_CTRL_CMD_IQID_V(iqid
));
3676 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X
) |
3677 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
3678 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
3679 FW_EQ_CTRL_CMD_EQSIZE_V(nentries
));
3680 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
3682 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
3684 dma_free_coherent(adap
->pdev_dev
,
3685 nentries
* sizeof(struct tx_desc
),
3686 txq
->q
.desc
, txq
->q
.phys_addr
);
3691 txq
->q
.q_type
= CXGB4_TXQ_CTRL
;
3692 init_txq(adap
, &txq
->q
, FW_EQ_CTRL_CMD_EQID_G(ntohl(c
.cmpliqid_eqid
)));
3694 skb_queue_head_init(&txq
->sendq
);
3695 tasklet_init(&txq
->qresume_tsk
, restart_ctrlq
, (unsigned long)txq
);
3700 int t4_sge_mod_ctrl_txq(struct adapter
*adap
, unsigned int eqid
,
3701 unsigned int cmplqid
)
3705 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
3706 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
) |
3707 FW_PARAMS_PARAM_YZ_V(eqid
));
3709 return t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, ¶m
, &val
);
3712 int t4_sge_alloc_uld_txq(struct adapter
*adap
, struct sge_uld_txq
*txq
,
3713 struct net_device
*dev
, unsigned int iqid
,
3714 unsigned int uld_type
)
3717 struct fw_eq_ofld_cmd c
;
3718 struct sge
*s
= &adap
->sge
;
3719 struct port_info
*pi
= netdev_priv(dev
);
3720 int cmd
= FW_EQ_OFLD_CMD
;
3722 /* Add status entries */
3723 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
3725 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
3726 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
3727 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
,
3732 memset(&c
, 0, sizeof(c
));
3733 if (unlikely(uld_type
== CXGB4_TX_CRYPTO
))
3734 cmd
= FW_EQ_CTRL_CMD
;
3735 c
.op_to_vfn
= htonl(FW_CMD_OP_V(cmd
) | FW_CMD_REQUEST_F
|
3736 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
3737 FW_EQ_OFLD_CMD_PFN_V(adap
->pf
) |
3738 FW_EQ_OFLD_CMD_VFN_V(0));
3739 c
.alloc_to_len16
= htonl(FW_EQ_OFLD_CMD_ALLOC_F
|
3740 FW_EQ_OFLD_CMD_EQSTART_F
| FW_LEN16(c
));
3741 c
.fetchszm_to_iqid
=
3742 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
3743 FW_EQ_OFLD_CMD_PCIECHN_V(pi
->tx_chan
) |
3744 FW_EQ_OFLD_CMD_FETCHRO_F
| FW_EQ_OFLD_CMD_IQID_V(iqid
));
3746 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X
) |
3747 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
3748 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
3749 FW_EQ_OFLD_CMD_EQSIZE_V(nentries
));
3750 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
3752 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
3754 kfree(txq
->q
.sdesc
);
3755 txq
->q
.sdesc
= NULL
;
3756 dma_free_coherent(adap
->pdev_dev
,
3757 nentries
* sizeof(struct tx_desc
),
3758 txq
->q
.desc
, txq
->q
.phys_addr
);
3763 txq
->q
.q_type
= CXGB4_TXQ_ULD
;
3764 init_txq(adap
, &txq
->q
, FW_EQ_OFLD_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
3766 skb_queue_head_init(&txq
->sendq
);
3767 tasklet_init(&txq
->qresume_tsk
, restart_ofldq
, (unsigned long)txq
);
3769 txq
->mapping_err
= 0;
3773 void free_txq(struct adapter
*adap
, struct sge_txq
*q
)
3775 struct sge
*s
= &adap
->sge
;
3777 dma_free_coherent(adap
->pdev_dev
,
3778 q
->size
* sizeof(struct tx_desc
) + s
->stat_len
,
3779 q
->desc
, q
->phys_addr
);
3785 void free_rspq_fl(struct adapter
*adap
, struct sge_rspq
*rq
,
3788 struct sge
*s
= &adap
->sge
;
3789 unsigned int fl_id
= fl
? fl
->cntxt_id
: 0xffff;
3791 adap
->sge
.ingr_map
[rq
->cntxt_id
- adap
->sge
.ingr_start
] = NULL
;
3792 t4_iq_free(adap
, adap
->mbox
, adap
->pf
, 0, FW_IQ_TYPE_FL_INT_CAP
,
3793 rq
->cntxt_id
, fl_id
, 0xffff);
3794 dma_free_coherent(adap
->pdev_dev
, (rq
->size
+ 1) * rq
->iqe_len
,
3795 rq
->desc
, rq
->phys_addr
);
3796 netif_napi_del(&rq
->napi
);
3798 rq
->cntxt_id
= rq
->abs_id
= 0;
3802 free_rx_bufs(adap
, fl
, fl
->avail
);
3803 dma_free_coherent(adap
->pdev_dev
, fl
->size
* 8 + s
->stat_len
,
3804 fl
->desc
, fl
->addr
);
3813 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
3814 * @adap: the adapter
3815 * @n: number of queues
3816 * @q: pointer to first queue
3818 * Release the resources of a consecutive block of offload Rx queues.
3820 void t4_free_ofld_rxqs(struct adapter
*adap
, int n
, struct sge_ofld_rxq
*q
)
3822 for ( ; n
; n
--, q
++)
3824 free_rspq_fl(adap
, &q
->rspq
,
3825 q
->fl
.size
? &q
->fl
: NULL
);
3829 * t4_free_sge_resources - free SGE resources
3830 * @adap: the adapter
3832 * Frees resources used by the SGE queue sets.
3834 void t4_free_sge_resources(struct adapter
*adap
)
3837 struct sge_eth_rxq
*eq
;
3838 struct sge_eth_txq
*etq
;
3840 /* stop all Rx queues in order to start them draining */
3841 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++) {
3842 eq
= &adap
->sge
.ethrxq
[i
];
3844 t4_iq_stop(adap
, adap
->mbox
, adap
->pf
, 0,
3845 FW_IQ_TYPE_FL_INT_CAP
,
3847 eq
->fl
.size
? eq
->fl
.cntxt_id
: 0xffff,
3851 /* clean up Ethernet Tx/Rx queues */
3852 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++) {
3853 eq
= &adap
->sge
.ethrxq
[i
];
3855 free_rspq_fl(adap
, &eq
->rspq
,
3856 eq
->fl
.size
? &eq
->fl
: NULL
);
3858 etq
= &adap
->sge
.ethtxq
[i
];
3860 t4_eth_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
3862 __netif_tx_lock_bh(etq
->txq
);
3863 free_tx_desc(adap
, &etq
->q
, etq
->q
.in_use
, true);
3864 __netif_tx_unlock_bh(etq
->txq
);
3865 kfree(etq
->q
.sdesc
);
3866 free_txq(adap
, &etq
->q
);
3870 /* clean up control Tx queues */
3871 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ctrlq
); i
++) {
3872 struct sge_ctrl_txq
*cq
= &adap
->sge
.ctrlq
[i
];
3875 tasklet_kill(&cq
->qresume_tsk
);
3876 t4_ctrl_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
3878 __skb_queue_purge(&cq
->sendq
);
3879 free_txq(adap
, &cq
->q
);
3883 if (adap
->sge
.fw_evtq
.desc
)
3884 free_rspq_fl(adap
, &adap
->sge
.fw_evtq
, NULL
);
3886 if (adap
->sge
.intrq
.desc
)
3887 free_rspq_fl(adap
, &adap
->sge
.intrq
, NULL
);
3889 if (!is_t4(adap
->params
.chip
)) {
3890 etq
= &adap
->sge
.ptptxq
;
3892 t4_eth_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
3894 spin_lock_bh(&adap
->ptp_lock
);
3895 free_tx_desc(adap
, &etq
->q
, etq
->q
.in_use
, true);
3896 spin_unlock_bh(&adap
->ptp_lock
);
3897 kfree(etq
->q
.sdesc
);
3898 free_txq(adap
, &etq
->q
);
3902 /* clear the reverse egress queue map */
3903 memset(adap
->sge
.egr_map
, 0,
3904 adap
->sge
.egr_sz
* sizeof(*adap
->sge
.egr_map
));
3907 void t4_sge_start(struct adapter
*adap
)
3909 adap
->sge
.ethtxq_rover
= 0;
3910 mod_timer(&adap
->sge
.rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
3911 mod_timer(&adap
->sge
.tx_timer
, jiffies
+ TX_QCHECK_PERIOD
);
3915 * t4_sge_stop - disable SGE operation
3916 * @adap: the adapter
3918 * Stop tasklets and timers associated with the DMA engine. Note that
3919 * this is effective only if measures have been taken to disable any HW
3920 * events that may restart them.
3922 void t4_sge_stop(struct adapter
*adap
)
3925 struct sge
*s
= &adap
->sge
;
3927 if (in_interrupt()) /* actions below require waiting */
3930 if (s
->rx_timer
.function
)
3931 del_timer_sync(&s
->rx_timer
);
3932 if (s
->tx_timer
.function
)
3933 del_timer_sync(&s
->tx_timer
);
3935 if (is_offload(adap
)) {
3936 struct sge_uld_txq_info
*txq_info
;
3938 txq_info
= adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
3940 struct sge_uld_txq
*txq
= txq_info
->uldtxq
;
3942 for_each_ofldtxq(&adap
->sge
, i
) {
3944 tasklet_kill(&txq
->qresume_tsk
);
3949 if (is_pci_uld(adap
)) {
3950 struct sge_uld_txq_info
*txq_info
;
3952 txq_info
= adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
3954 struct sge_uld_txq
*txq
= txq_info
->uldtxq
;
3956 for_each_ofldtxq(&adap
->sge
, i
) {
3958 tasklet_kill(&txq
->qresume_tsk
);
3963 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++) {
3964 struct sge_ctrl_txq
*cq
= &s
->ctrlq
[i
];
3967 tasklet_kill(&cq
->qresume_tsk
);
3972 * t4_sge_init_soft - grab core SGE values needed by SGE code
3973 * @adap: the adapter
3975 * We need to grab the SGE operating parameters that we need to have
3976 * in order to do our job and make sure we can live with them.
3979 static int t4_sge_init_soft(struct adapter
*adap
)
3981 struct sge
*s
= &adap
->sge
;
3982 u32 fl_small_pg
, fl_large_pg
, fl_small_mtu
, fl_large_mtu
;
3983 u32 timer_value_0_and_1
, timer_value_2_and_3
, timer_value_4_and_5
;
3984 u32 ingress_rx_threshold
;
3987 * Verify that CPL messages are going to the Ingress Queue for
3988 * process_responses() and that only packet data is going to the
3991 if ((t4_read_reg(adap
, SGE_CONTROL_A
) & RXPKTCPLMODE_F
) !=
3992 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X
)) {
3993 dev_err(adap
->pdev_dev
, "bad SGE CPL MODE\n");
3998 * Validate the Host Buffer Register Array indices that we want to
4001 * XXX Note that we should really read through the Host Buffer Size
4002 * XXX register array and find the indices of the Buffer Sizes which
4003 * XXX meet our needs!
4005 #define READ_FL_BUF(x) \
4006 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
4008 fl_small_pg
= READ_FL_BUF(RX_SMALL_PG_BUF
);
4009 fl_large_pg
= READ_FL_BUF(RX_LARGE_PG_BUF
);
4010 fl_small_mtu
= READ_FL_BUF(RX_SMALL_MTU_BUF
);
4011 fl_large_mtu
= READ_FL_BUF(RX_LARGE_MTU_BUF
);
4013 /* We only bother using the Large Page logic if the Large Page Buffer
4014 * is larger than our Page Size Buffer.
4016 if (fl_large_pg
<= fl_small_pg
)
4021 /* The Page Size Buffer must be exactly equal to our Page Size and the
4022 * Large Page Size Buffer should be 0 (per above) or a power of 2.
4024 if (fl_small_pg
!= PAGE_SIZE
||
4025 (fl_large_pg
& (fl_large_pg
-1)) != 0) {
4026 dev_err(adap
->pdev_dev
, "bad SGE FL page buffer sizes [%d, %d]\n",
4027 fl_small_pg
, fl_large_pg
);
4031 s
->fl_pg_order
= ilog2(fl_large_pg
) - PAGE_SHIFT
;
4033 if (fl_small_mtu
< FL_MTU_SMALL_BUFSIZE(adap
) ||
4034 fl_large_mtu
< FL_MTU_LARGE_BUFSIZE(adap
)) {
4035 dev_err(adap
->pdev_dev
, "bad SGE FL MTU sizes [%d, %d]\n",
4036 fl_small_mtu
, fl_large_mtu
);
4041 * Retrieve our RX interrupt holdoff timer values and counter
4042 * threshold values from the SGE parameters.
4044 timer_value_0_and_1
= t4_read_reg(adap
, SGE_TIMER_VALUE_0_AND_1_A
);
4045 timer_value_2_and_3
= t4_read_reg(adap
, SGE_TIMER_VALUE_2_AND_3_A
);
4046 timer_value_4_and_5
= t4_read_reg(adap
, SGE_TIMER_VALUE_4_AND_5_A
);
4047 s
->timer_val
[0] = core_ticks_to_us(adap
,
4048 TIMERVALUE0_G(timer_value_0_and_1
));
4049 s
->timer_val
[1] = core_ticks_to_us(adap
,
4050 TIMERVALUE1_G(timer_value_0_and_1
));
4051 s
->timer_val
[2] = core_ticks_to_us(adap
,
4052 TIMERVALUE2_G(timer_value_2_and_3
));
4053 s
->timer_val
[3] = core_ticks_to_us(adap
,
4054 TIMERVALUE3_G(timer_value_2_and_3
));
4055 s
->timer_val
[4] = core_ticks_to_us(adap
,
4056 TIMERVALUE4_G(timer_value_4_and_5
));
4057 s
->timer_val
[5] = core_ticks_to_us(adap
,
4058 TIMERVALUE5_G(timer_value_4_and_5
));
4060 ingress_rx_threshold
= t4_read_reg(adap
, SGE_INGRESS_RX_THRESHOLD_A
);
4061 s
->counter_val
[0] = THRESHOLD_0_G(ingress_rx_threshold
);
4062 s
->counter_val
[1] = THRESHOLD_1_G(ingress_rx_threshold
);
4063 s
->counter_val
[2] = THRESHOLD_2_G(ingress_rx_threshold
);
4064 s
->counter_val
[3] = THRESHOLD_3_G(ingress_rx_threshold
);
4070 * t4_sge_init - initialize SGE
4071 * @adap: the adapter
4073 * Perform low-level SGE code initialization needed every time after a
4076 int t4_sge_init(struct adapter
*adap
)
4078 struct sge
*s
= &adap
->sge
;
4079 u32 sge_control
, sge_conm_ctrl
;
4080 int ret
, egress_threshold
;
4083 * Ingress Padding Boundary and Egress Status Page Size are set up by
4084 * t4_fixup_host_params().
4086 sge_control
= t4_read_reg(adap
, SGE_CONTROL_A
);
4087 s
->pktshift
= PKTSHIFT_G(sge_control
);
4088 s
->stat_len
= (sge_control
& EGRSTATUSPAGESIZE_F
) ? 128 : 64;
4090 s
->fl_align
= t4_fl_pkt_align(adap
);
4091 ret
= t4_sge_init_soft(adap
);
4096 * A FL with <= fl_starve_thres buffers is starving and a periodic
4097 * timer will attempt to refill it. This needs to be larger than the
4098 * SGE's Egress Congestion Threshold. If it isn't, then we can get
4099 * stuck waiting for new packets while the SGE is waiting for us to
4100 * give it more Free List entries. (Note that the SGE's Egress
4101 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
4102 * there was only a single field to control this. For T5 there's the
4103 * original field which now only applies to Unpacked Mode Free List
4104 * buffers and a new field which only applies to Packed Mode Free List
4107 sge_conm_ctrl
= t4_read_reg(adap
, SGE_CONM_CTRL_A
);
4108 switch (CHELSIO_CHIP_VERSION(adap
->params
.chip
)) {
4110 egress_threshold
= EGRTHRESHOLD_G(sge_conm_ctrl
);
4113 egress_threshold
= EGRTHRESHOLDPACKING_G(sge_conm_ctrl
);
4116 egress_threshold
= T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl
);
4119 dev_err(adap
->pdev_dev
, "Unsupported Chip version %d\n",
4120 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
4123 s
->fl_starve_thres
= 2*egress_threshold
+ 1;
4125 t4_idma_monitor_init(adap
, &s
->idma_monitor
);
4127 /* Set up timers used for recuring callbacks to process RX and TX
4128 * administrative tasks.
4130 timer_setup(&s
->rx_timer
, sge_rx_timer_cb
, 0);
4131 timer_setup(&s
->tx_timer
, sge_tx_timer_cb
, 0);
4133 spin_lock_init(&s
->intrq_lock
);