2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
46 #ifdef CONFIG_NET_RX_BUSY_POLL
47 #include <net/busy_poll.h>
48 #endif /* CONFIG_NET_RX_BUSY_POLL */
49 #ifdef CONFIG_CHELSIO_T4_FCOE
50 #include <scsi/fc/fc_fcoe.h>
51 #endif /* CONFIG_CHELSIO_T4_FCOE */
54 #include "t4_values.h"
59 * Rx buffer size. We use largish buffers if possible but settle for single
60 * pages under memory shortage.
63 # define FL_PG_ORDER 0
65 # define FL_PG_ORDER (16 - PAGE_SHIFT)
68 /* RX_PULL_LEN should be <= RX_COPY_THRES */
69 #define RX_COPY_THRES 256
70 #define RX_PULL_LEN 128
73 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
74 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
76 #define RX_PKT_SKB_LEN 512
79 * Max number of Tx descriptors we clean up at a time. Should be modest as
80 * freeing skbs isn't cheap and it happens while holding locks. We just need
81 * to free packets faster than they arrive, we eventually catch up and keep
82 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
84 #define MAX_TX_RECLAIM 16
87 * Max number of Rx buffers we replenish at a time. Again keep this modest,
88 * allocating buffers isn't cheap either.
90 #define MAX_RX_REFILL 16U
93 * Period of the Rx queue check timer. This timer is infrequent as it has
94 * something to do only when the system experiences severe memory shortage.
96 #define RX_QCHECK_PERIOD (HZ / 2)
99 * Period of the Tx queue check timer.
101 #define TX_QCHECK_PERIOD (HZ / 2)
104 * Max number of Tx descriptors to be reclaimed by the Tx timer.
106 #define MAX_TIMER_TX_RECLAIM 100
109 * Timer index used when backing off due to memory shortage.
111 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
114 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
115 * This is the same as calc_tx_descs() for a TSO packet with
116 * nr_frags == MAX_SKB_FRAGS.
118 #define ETHTXQ_STOP_THRES \
119 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
122 * Suspension threshold for non-Ethernet Tx queues. We require enough room
123 * for a full sized WR.
125 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
128 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
131 #define MAX_IMM_TX_PKT_LEN 256
134 * Max size of a WR sent through a control Tx queue.
136 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
138 struct tx_sw_desc
{ /* SW state per Tx descriptor */
140 struct ulptx_sgl
*sgl
;
143 struct rx_sw_desc
{ /* SW state per Rx descriptor */
149 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
150 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
151 * We could easily support more but there doesn't seem to be much need for
154 #define FL_MTU_SMALL 1500
155 #define FL_MTU_LARGE 9000
157 static inline unsigned int fl_mtu_bufsize(struct adapter
*adapter
,
160 struct sge
*s
= &adapter
->sge
;
162 return ALIGN(s
->pktshift
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
, s
->fl_align
);
165 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
166 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
169 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
170 * these to specify the buffer size as an index into the SGE Free List Buffer
171 * Size register array. We also use bit 4, when the buffer has been unmapped
172 * for DMA, but this is of course never sent to the hardware and is only used
173 * to prevent double unmappings. All of the above requires that the Free List
174 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
175 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
176 * Free List Buffer alignment is 32 bytes, this works out for us ...
179 RX_BUF_FLAGS
= 0x1f, /* bottom five bits are special */
180 RX_BUF_SIZE
= 0x0f, /* bottom three bits are for buf sizes */
181 RX_UNMAPPED_BUF
= 0x10, /* buffer is not mapped */
184 * XXX We shouldn't depend on being able to use these indices.
185 * XXX Especially when some other Master PF has initialized the
186 * XXX adapter or we use the Firmware Configuration File. We
187 * XXX should really search through the Host Buffer Size register
188 * XXX array for the appropriately sized buffer indices.
190 RX_SMALL_PG_BUF
= 0x0, /* small (PAGE_SIZE) page buffer */
191 RX_LARGE_PG_BUF
= 0x1, /* buffer large (FL_PG_ORDER) page buffer */
193 RX_SMALL_MTU_BUF
= 0x2, /* small MTU buffer */
194 RX_LARGE_MTU_BUF
= 0x3, /* large MTU buffer */
197 static int timer_pkt_quota
[] = {1, 1, 2, 3, 4, 5};
198 #define MIN_NAPI_WORK 1
200 static inline dma_addr_t
get_buf_addr(const struct rx_sw_desc
*d
)
202 return d
->dma_addr
& ~(dma_addr_t
)RX_BUF_FLAGS
;
205 static inline bool is_buf_mapped(const struct rx_sw_desc
*d
)
207 return !(d
->dma_addr
& RX_UNMAPPED_BUF
);
211 * txq_avail - return the number of available slots in a Tx queue
214 * Returns the number of descriptors in a Tx queue available to write new
217 static inline unsigned int txq_avail(const struct sge_txq
*q
)
219 return q
->size
- 1 - q
->in_use
;
223 * fl_cap - return the capacity of a free-buffer list
226 * Returns the capacity of a free-buffer list. The capacity is less than
227 * the size because one descriptor needs to be left unpopulated, otherwise
228 * HW will think the FL is empty.
230 static inline unsigned int fl_cap(const struct sge_fl
*fl
)
232 return fl
->size
- 8; /* 1 descriptor = 8 buffers */
236 * fl_starving - return whether a Free List is starving.
237 * @adapter: pointer to the adapter
240 * Tests specified Free List to see whether the number of buffers
241 * available to the hardware has falled below our "starvation"
244 static inline bool fl_starving(const struct adapter
*adapter
,
245 const struct sge_fl
*fl
)
247 const struct sge
*s
= &adapter
->sge
;
249 return fl
->avail
- fl
->pend_cred
<= s
->fl_starve_thres
;
252 static int map_skb(struct device
*dev
, const struct sk_buff
*skb
,
255 const skb_frag_t
*fp
, *end
;
256 const struct skb_shared_info
*si
;
258 *addr
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
259 if (dma_mapping_error(dev
, *addr
))
262 si
= skb_shinfo(skb
);
263 end
= &si
->frags
[si
->nr_frags
];
265 for (fp
= si
->frags
; fp
< end
; fp
++) {
266 *++addr
= skb_frag_dma_map(dev
, fp
, 0, skb_frag_size(fp
),
268 if (dma_mapping_error(dev
, *addr
))
274 while (fp
-- > si
->frags
)
275 dma_unmap_page(dev
, *--addr
, skb_frag_size(fp
), DMA_TO_DEVICE
);
277 dma_unmap_single(dev
, addr
[-1], skb_headlen(skb
), DMA_TO_DEVICE
);
282 #ifdef CONFIG_NEED_DMA_MAP_STATE
283 static void unmap_skb(struct device
*dev
, const struct sk_buff
*skb
,
284 const dma_addr_t
*addr
)
286 const skb_frag_t
*fp
, *end
;
287 const struct skb_shared_info
*si
;
289 dma_unmap_single(dev
, *addr
++, skb_headlen(skb
), DMA_TO_DEVICE
);
291 si
= skb_shinfo(skb
);
292 end
= &si
->frags
[si
->nr_frags
];
293 for (fp
= si
->frags
; fp
< end
; fp
++)
294 dma_unmap_page(dev
, *addr
++, skb_frag_size(fp
), DMA_TO_DEVICE
);
298 * deferred_unmap_destructor - unmap a packet when it is freed
301 * This is the packet destructor used for Tx packets that need to remain
302 * mapped until they are freed rather than until their Tx descriptors are
305 static void deferred_unmap_destructor(struct sk_buff
*skb
)
307 unmap_skb(skb
->dev
->dev
.parent
, skb
, (dma_addr_t
*)skb
->head
);
311 static void unmap_sgl(struct device
*dev
, const struct sk_buff
*skb
,
312 const struct ulptx_sgl
*sgl
, const struct sge_txq
*q
)
314 const struct ulptx_sge_pair
*p
;
315 unsigned int nfrags
= skb_shinfo(skb
)->nr_frags
;
317 if (likely(skb_headlen(skb
)))
318 dma_unmap_single(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
321 dma_unmap_page(dev
, be64_to_cpu(sgl
->addr0
), ntohl(sgl
->len0
),
327 * the complexity below is because of the possibility of a wrap-around
328 * in the middle of an SGL
330 for (p
= sgl
->sge
; nfrags
>= 2; nfrags
-= 2) {
331 if (likely((u8
*)(p
+ 1) <= (u8
*)q
->stat
)) {
332 unmap
: dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
333 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
334 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[1]),
335 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
337 } else if ((u8
*)p
== (u8
*)q
->stat
) {
338 p
= (const struct ulptx_sge_pair
*)q
->desc
;
340 } else if ((u8
*)p
+ 8 == (u8
*)q
->stat
) {
341 const __be64
*addr
= (const __be64
*)q
->desc
;
343 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
344 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
345 dma_unmap_page(dev
, be64_to_cpu(addr
[1]),
346 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
347 p
= (const struct ulptx_sge_pair
*)&addr
[2];
349 const __be64
*addr
= (const __be64
*)q
->desc
;
351 dma_unmap_page(dev
, be64_to_cpu(p
->addr
[0]),
352 ntohl(p
->len
[0]), DMA_TO_DEVICE
);
353 dma_unmap_page(dev
, be64_to_cpu(addr
[0]),
354 ntohl(p
->len
[1]), DMA_TO_DEVICE
);
355 p
= (const struct ulptx_sge_pair
*)&addr
[1];
361 if ((u8
*)p
== (u8
*)q
->stat
)
362 p
= (const struct ulptx_sge_pair
*)q
->desc
;
363 addr
= (u8
*)p
+ 16 <= (u8
*)q
->stat
? p
->addr
[0] :
364 *(const __be64
*)q
->desc
;
365 dma_unmap_page(dev
, be64_to_cpu(addr
), ntohl(p
->len
[0]),
371 * free_tx_desc - reclaims Tx descriptors and their buffers
372 * @adapter: the adapter
373 * @q: the Tx queue to reclaim descriptors from
374 * @n: the number of descriptors to reclaim
375 * @unmap: whether the buffers should be unmapped for DMA
377 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
378 * Tx buffers. Called with the Tx queue lock held.
380 static void free_tx_desc(struct adapter
*adap
, struct sge_txq
*q
,
381 unsigned int n
, bool unmap
)
383 struct tx_sw_desc
*d
;
384 unsigned int cidx
= q
->cidx
;
385 struct device
*dev
= adap
->pdev_dev
;
389 if (d
->skb
) { /* an SGL is present */
391 unmap_sgl(dev
, d
->skb
, d
->sgl
, q
);
392 dev_consume_skb_any(d
->skb
);
396 if (++cidx
== q
->size
) {
405 * Return the number of reclaimable descriptors in a Tx queue.
407 static inline int reclaimable(const struct sge_txq
*q
)
409 int hw_cidx
= ntohs(ACCESS_ONCE(q
->stat
->cidx
));
411 return hw_cidx
< 0 ? hw_cidx
+ q
->size
: hw_cidx
;
415 * reclaim_completed_tx - reclaims completed Tx descriptors
417 * @q: the Tx queue to reclaim completed descriptors from
418 * @unmap: whether the buffers should be unmapped for DMA
420 * Reclaims Tx descriptors that the SGE has indicated it has processed,
421 * and frees the associated buffers if possible. Called with the Tx
424 static inline void reclaim_completed_tx(struct adapter
*adap
, struct sge_txq
*q
,
427 int avail
= reclaimable(q
);
431 * Limit the amount of clean up work we do at a time to keep
432 * the Tx lock hold time O(1).
434 if (avail
> MAX_TX_RECLAIM
)
435 avail
= MAX_TX_RECLAIM
;
437 free_tx_desc(adap
, q
, avail
, unmap
);
442 static inline int get_buf_size(struct adapter
*adapter
,
443 const struct rx_sw_desc
*d
)
445 struct sge
*s
= &adapter
->sge
;
446 unsigned int rx_buf_size_idx
= d
->dma_addr
& RX_BUF_SIZE
;
449 switch (rx_buf_size_idx
) {
450 case RX_SMALL_PG_BUF
:
451 buf_size
= PAGE_SIZE
;
454 case RX_LARGE_PG_BUF
:
455 buf_size
= PAGE_SIZE
<< s
->fl_pg_order
;
458 case RX_SMALL_MTU_BUF
:
459 buf_size
= FL_MTU_SMALL_BUFSIZE(adapter
);
462 case RX_LARGE_MTU_BUF
:
463 buf_size
= FL_MTU_LARGE_BUFSIZE(adapter
);
474 * free_rx_bufs - free the Rx buffers on an SGE free list
476 * @q: the SGE free list to free buffers from
477 * @n: how many buffers to free
479 * Release the next @n buffers on an SGE free-buffer Rx queue. The
480 * buffers must be made inaccessible to HW before calling this function.
482 static void free_rx_bufs(struct adapter
*adap
, struct sge_fl
*q
, int n
)
485 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
487 if (is_buf_mapped(d
))
488 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
489 get_buf_size(adap
, d
),
493 if (++q
->cidx
== q
->size
)
500 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
502 * @q: the SGE free list
504 * Unmap the current buffer on an SGE free-buffer Rx queue. The
505 * buffer must be made inaccessible to HW before calling this function.
507 * This is similar to @free_rx_bufs above but does not free the buffer.
508 * Do note that the FL still loses any further access to the buffer.
510 static void unmap_rx_buf(struct adapter
*adap
, struct sge_fl
*q
)
512 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
514 if (is_buf_mapped(d
))
515 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
516 get_buf_size(adap
, d
), PCI_DMA_FROMDEVICE
);
518 if (++q
->cidx
== q
->size
)
523 static inline void ring_fl_db(struct adapter
*adap
, struct sge_fl
*q
)
525 if (q
->pend_cred
>= 8) {
526 u32 val
= adap
->params
.arch
.sge_fl_db
;
528 if (is_t4(adap
->params
.chip
))
529 val
|= PIDX_V(q
->pend_cred
/ 8);
531 val
|= PIDX_T5_V(q
->pend_cred
/ 8);
533 /* Make sure all memory writes to the Free List queue are
534 * committed before we tell the hardware about them.
538 /* If we don't have access to the new User Doorbell (T5+), use
539 * the old doorbell mechanism; otherwise use the new BAR2
542 if (unlikely(q
->bar2_addr
== NULL
)) {
543 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
544 val
| QID_V(q
->cntxt_id
));
546 writel(val
| QID_V(q
->bar2_qid
),
547 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
549 /* This Write memory Barrier will force the write to
550 * the User Doorbell area to be flushed.
558 static inline void set_rx_sw_desc(struct rx_sw_desc
*sd
, struct page
*pg
,
562 sd
->dma_addr
= mapping
; /* includes size low bits */
566 * refill_fl - refill an SGE Rx buffer ring
568 * @q: the ring to refill
569 * @n: the number of new buffers to allocate
570 * @gfp: the gfp flags for the allocations
572 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
573 * allocated with the supplied gfp flags. The caller must assure that
574 * @n does not exceed the queue's capacity. If afterwards the queue is
575 * found critically low mark it as starving in the bitmap of starving FLs.
577 * Returns the number of buffers allocated.
579 static unsigned int refill_fl(struct adapter
*adap
, struct sge_fl
*q
, int n
,
582 struct sge
*s
= &adap
->sge
;
585 unsigned int cred
= q
->avail
;
586 __be64
*d
= &q
->desc
[q
->pidx
];
587 struct rx_sw_desc
*sd
= &q
->sdesc
[q
->pidx
];
590 #ifdef CONFIG_DEBUG_FS
591 if (test_bit(q
->cntxt_id
- adap
->sge
.egr_start
, adap
->sge
.blocked_fl
))
596 node
= dev_to_node(adap
->pdev_dev
);
598 if (s
->fl_pg_order
== 0)
599 goto alloc_small_pages
;
602 * Prefer large buffers
605 pg
= alloc_pages_node(node
, gfp
| __GFP_COMP
, s
->fl_pg_order
);
607 q
->large_alloc_failed
++;
608 break; /* fall back to single pages */
611 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0,
612 PAGE_SIZE
<< s
->fl_pg_order
,
614 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
615 __free_pages(pg
, s
->fl_pg_order
);
617 goto out
; /* do not try small pages for this error */
619 mapping
|= RX_LARGE_PG_BUF
;
620 *d
++ = cpu_to_be64(mapping
);
622 set_rx_sw_desc(sd
, pg
, mapping
);
626 if (++q
->pidx
== q
->size
) {
636 pg
= alloc_pages_node(node
, gfp
, 0);
642 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0, PAGE_SIZE
,
644 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
649 *d
++ = cpu_to_be64(mapping
);
651 set_rx_sw_desc(sd
, pg
, mapping
);
655 if (++q
->pidx
== q
->size
) {
662 out
: cred
= q
->avail
- cred
;
663 q
->pend_cred
+= cred
;
666 if (unlikely(fl_starving(adap
, q
))) {
669 set_bit(q
->cntxt_id
- adap
->sge
.egr_start
,
670 adap
->sge
.starving_fl
);
676 static inline void __refill_fl(struct adapter
*adap
, struct sge_fl
*fl
)
678 refill_fl(adap
, fl
, min(MAX_RX_REFILL
, fl_cap(fl
) - fl
->avail
),
683 * alloc_ring - allocate resources for an SGE descriptor ring
684 * @dev: the PCI device's core device
685 * @nelem: the number of descriptors
686 * @elem_size: the size of each descriptor
687 * @sw_size: the size of the SW state associated with each ring element
688 * @phys: the physical address of the allocated ring
689 * @metadata: address of the array holding the SW state for the ring
690 * @stat_size: extra space in HW ring for status information
691 * @node: preferred node for memory allocations
693 * Allocates resources for an SGE descriptor ring, such as Tx queues,
694 * free buffer lists, or response queues. Each SGE ring requires
695 * space for its HW descriptors plus, optionally, space for the SW state
696 * associated with each HW entry (the metadata). The function returns
697 * three values: the virtual address for the HW ring (the return value
698 * of the function), the bus address of the HW ring, and the address
701 static void *alloc_ring(struct device
*dev
, size_t nelem
, size_t elem_size
,
702 size_t sw_size
, dma_addr_t
*phys
, void *metadata
,
703 size_t stat_size
, int node
)
705 size_t len
= nelem
* elem_size
+ stat_size
;
707 void *p
= dma_alloc_coherent(dev
, len
, phys
, GFP_KERNEL
);
712 s
= kzalloc_node(nelem
* sw_size
, GFP_KERNEL
, node
);
715 dma_free_coherent(dev
, len
, p
, *phys
);
720 *(void **)metadata
= s
;
726 * sgl_len - calculates the size of an SGL of the given capacity
727 * @n: the number of SGL entries
729 * Calculates the number of flits needed for a scatter/gather list that
730 * can hold the given number of entries.
732 static inline unsigned int sgl_len(unsigned int n
)
734 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
735 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
736 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
737 * repeated sequences of { Length[i], Length[i+1], Address[i],
738 * Address[i+1] } (this ensures that all addresses are on 64-bit
739 * boundaries). If N is even, then Length[N+1] should be set to 0 and
740 * Address[N+1] is omitted.
742 * The following calculation incorporates all of the above. It's
743 * somewhat hard to follow but, briefly: the "+2" accounts for the
744 * first two flits which include the DSGL header, Length0 and
745 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
746 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
747 * finally the "+((n-1)&1)" adds the one remaining flit needed if
751 return (3 * n
) / 2 + (n
& 1) + 2;
755 * flits_to_desc - returns the num of Tx descriptors for the given flits
756 * @n: the number of flits
758 * Returns the number of Tx descriptors needed for the supplied number
761 static inline unsigned int flits_to_desc(unsigned int n
)
763 BUG_ON(n
> SGE_MAX_WR_LEN
/ 8);
764 return DIV_ROUND_UP(n
, 8);
768 * is_eth_imm - can an Ethernet packet be sent as immediate data?
771 * Returns whether an Ethernet packet is small enough to fit as
772 * immediate data. Return value corresponds to headroom required.
774 static inline int is_eth_imm(const struct sk_buff
*skb
)
776 int hdrlen
= skb_shinfo(skb
)->gso_size
?
777 sizeof(struct cpl_tx_pkt_lso_core
) : 0;
779 hdrlen
+= sizeof(struct cpl_tx_pkt
);
780 if (skb
->len
<= MAX_IMM_TX_PKT_LEN
- hdrlen
)
786 * calc_tx_flits - calculate the number of flits for a packet Tx WR
789 * Returns the number of flits needed for a Tx WR for the given Ethernet
790 * packet, including the needed WR and CPL headers.
792 static inline unsigned int calc_tx_flits(const struct sk_buff
*skb
)
795 int hdrlen
= is_eth_imm(skb
);
797 /* If the skb is small enough, we can pump it out as a work request
798 * with only immediate data. In that case we just have to have the
799 * TX Packet header plus the skb data in the Work Request.
803 return DIV_ROUND_UP(skb
->len
+ hdrlen
, sizeof(__be64
));
805 /* Otherwise, we're going to have to construct a Scatter gather list
806 * of the skb body and fragments. We also include the flits necessary
807 * for the TX Packet Work Request and CPL. We always have a firmware
808 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
809 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
810 * message or, if we're doing a Large Send Offload, an LSO CPL message
811 * with an embedded TX Packet Write CPL message.
813 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
814 if (skb_shinfo(skb
)->gso_size
)
815 flits
+= (sizeof(struct fw_eth_tx_pkt_wr
) +
816 sizeof(struct cpl_tx_pkt_lso_core
) +
817 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
819 flits
+= (sizeof(struct fw_eth_tx_pkt_wr
) +
820 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
825 * calc_tx_descs - calculate the number of Tx descriptors for a packet
828 * Returns the number of Tx descriptors needed for the given Ethernet
829 * packet, including the needed WR and CPL headers.
831 static inline unsigned int calc_tx_descs(const struct sk_buff
*skb
)
833 return flits_to_desc(calc_tx_flits(skb
));
837 * write_sgl - populate a scatter/gather list for a packet
839 * @q: the Tx queue we are writing into
840 * @sgl: starting location for writing the SGL
841 * @end: points right after the end of the SGL
842 * @start: start offset into skb main-body data to include in the SGL
843 * @addr: the list of bus addresses for the SGL elements
845 * Generates a gather list for the buffers that make up a packet.
846 * The caller must provide adequate space for the SGL that will be written.
847 * The SGL includes all of the packet's page fragments and the data in its
848 * main body except for the first @start bytes. @sgl must be 16-byte
849 * aligned and within a Tx descriptor with available space. @end points
850 * right after the end of the SGL but does not account for any potential
851 * wrap around, i.e., @end > @sgl.
853 static void write_sgl(const struct sk_buff
*skb
, struct sge_txq
*q
,
854 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
855 const dma_addr_t
*addr
)
858 struct ulptx_sge_pair
*to
;
859 const struct skb_shared_info
*si
= skb_shinfo(skb
);
860 unsigned int nfrags
= si
->nr_frags
;
861 struct ulptx_sge_pair buf
[MAX_SKB_FRAGS
/ 2 + 1];
863 len
= skb_headlen(skb
) - start
;
865 sgl
->len0
= htonl(len
);
866 sgl
->addr0
= cpu_to_be64(addr
[0] + start
);
869 sgl
->len0
= htonl(skb_frag_size(&si
->frags
[0]));
870 sgl
->addr0
= cpu_to_be64(addr
[1]);
873 sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
874 ULPTX_NSGE_V(nfrags
));
875 if (likely(--nfrags
== 0))
878 * Most of the complexity below deals with the possibility we hit the
879 * end of the queue in the middle of writing the SGL. For this case
880 * only we create the SGL in a temporary buffer and then copy it.
882 to
= (u8
*)end
> (u8
*)q
->stat
? buf
: sgl
->sge
;
884 for (i
= (nfrags
!= si
->nr_frags
); nfrags
>= 2; nfrags
-= 2, to
++) {
885 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
886 to
->len
[1] = cpu_to_be32(skb_frag_size(&si
->frags
[++i
]));
887 to
->addr
[0] = cpu_to_be64(addr
[i
]);
888 to
->addr
[1] = cpu_to_be64(addr
[++i
]);
891 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
892 to
->len
[1] = cpu_to_be32(0);
893 to
->addr
[0] = cpu_to_be64(addr
[i
+ 1]);
895 if (unlikely((u8
*)end
> (u8
*)q
->stat
)) {
896 unsigned int part0
= (u8
*)q
->stat
- (u8
*)sgl
->sge
, part1
;
899 memcpy(sgl
->sge
, buf
, part0
);
900 part1
= (u8
*)end
- (u8
*)q
->stat
;
901 memcpy(q
->desc
, (u8
*)buf
+ part0
, part1
);
902 end
= (void *)q
->desc
+ part1
;
904 if ((uintptr_t)end
& 8) /* 0-pad to multiple of 16 */
908 /* This function copies 64 byte coalesced work request to
909 * memory mapped BAR2 space. For coalesced WR SGE fetches
910 * data from the FIFO instead of from Host.
912 static void cxgb_pio_copy(u64 __iomem
*dst
, u64
*src
)
925 * ring_tx_db - check and potentially ring a Tx queue's doorbell
928 * @n: number of new descriptors to give to HW
930 * Ring the doorbel for a Tx queue.
932 static inline void ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
, int n
)
934 /* Make sure that all writes to the TX Descriptors are committed
935 * before we tell the hardware about them.
939 /* If we don't have access to the new User Doorbell (T5+), use the old
940 * doorbell mechanism; otherwise use the new BAR2 mechanism.
942 if (unlikely(q
->bar2_addr
== NULL
)) {
946 /* For T4 we need to participate in the Doorbell Recovery
949 spin_lock_irqsave(&q
->db_lock
, flags
);
951 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
952 QID_V(q
->cntxt_id
) | val
);
955 q
->db_pidx
= q
->pidx
;
956 spin_unlock_irqrestore(&q
->db_lock
, flags
);
958 u32 val
= PIDX_T5_V(n
);
960 /* T4 and later chips share the same PIDX field offset within
961 * the doorbell, but T5 and later shrank the field in order to
962 * gain a bit for Doorbell Priority. The field was absurdly
963 * large in the first place (14 bits) so we just use the T5
964 * and later limits and warn if a Queue ID is too large.
966 WARN_ON(val
& DBPRIO_F
);
968 /* If we're only writing a single TX Descriptor and we can use
969 * Inferred QID registers, we can use the Write Combining
970 * Gather Buffer; otherwise we use the simple doorbell.
972 if (n
== 1 && q
->bar2_qid
== 0) {
976 u64
*wr
= (u64
*)&q
->desc
[index
];
978 cxgb_pio_copy((u64 __iomem
*)
979 (q
->bar2_addr
+ SGE_UDB_WCDOORBELL
),
982 writel(val
| QID_V(q
->bar2_qid
),
983 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
986 /* This Write Memory Barrier will force the write to the User
987 * Doorbell area to be flushed. This is needed to prevent
988 * writes on different CPUs for the same queue from hitting
989 * the adapter out of order. This is required when some Work
990 * Requests take the Write Combine Gather Buffer path (user
991 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
992 * take the traditional path where we simply increment the
993 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
994 * hardware DMA read the actual Work Request.
1001 * inline_tx_skb - inline a packet's data into Tx descriptors
1003 * @q: the Tx queue where the packet will be inlined
1004 * @pos: starting position in the Tx queue where to inline the packet
1006 * Inline a packet's contents directly into Tx descriptors, starting at
1007 * the given position within the Tx DMA ring.
1008 * Most of the complexity of this operation is dealing with wrap arounds
1009 * in the middle of the packet we want to inline.
1011 static void inline_tx_skb(const struct sk_buff
*skb
, const struct sge_txq
*q
,
1015 int left
= (void *)q
->stat
- pos
;
1017 if (likely(skb
->len
<= left
)) {
1018 if (likely(!skb
->data_len
))
1019 skb_copy_from_linear_data(skb
, pos
, skb
->len
);
1021 skb_copy_bits(skb
, 0, pos
, skb
->len
);
1024 skb_copy_bits(skb
, 0, pos
, left
);
1025 skb_copy_bits(skb
, left
, q
->desc
, skb
->len
- left
);
1026 pos
= (void *)q
->desc
+ (skb
->len
- left
);
1029 /* 0-pad to multiple of 16 */
1030 p
= PTR_ALIGN(pos
, 8);
1031 if ((uintptr_t)p
& 8)
1035 static void *inline_tx_skb_header(const struct sk_buff
*skb
,
1036 const struct sge_txq
*q
, void *pos
,
1040 int left
= (void *)q
->stat
- pos
;
1042 if (likely(length
<= left
)) {
1043 memcpy(pos
, skb
->data
, length
);
1046 memcpy(pos
, skb
->data
, left
);
1047 memcpy(q
->desc
, skb
->data
+ left
, length
- left
);
1048 pos
= (void *)q
->desc
+ (length
- left
);
1050 /* 0-pad to multiple of 16 */
1051 p
= PTR_ALIGN(pos
, 8);
1052 if ((uintptr_t)p
& 8) {
1060 * Figure out what HW csum a packet wants and return the appropriate control
1063 static u64
hwcsum(enum chip_type chip
, const struct sk_buff
*skb
)
1066 const struct iphdr
*iph
= ip_hdr(skb
);
1068 if (iph
->version
== 4) {
1069 if (iph
->protocol
== IPPROTO_TCP
)
1070 csum_type
= TX_CSUM_TCPIP
;
1071 else if (iph
->protocol
== IPPROTO_UDP
)
1072 csum_type
= TX_CSUM_UDPIP
;
1075 * unknown protocol, disable HW csum
1076 * and hope a bad packet is detected
1078 return TXPKT_L4CSUM_DIS_F
;
1082 * this doesn't work with extension headers
1084 const struct ipv6hdr
*ip6h
= (const struct ipv6hdr
*)iph
;
1086 if (ip6h
->nexthdr
== IPPROTO_TCP
)
1087 csum_type
= TX_CSUM_TCPIP6
;
1088 else if (ip6h
->nexthdr
== IPPROTO_UDP
)
1089 csum_type
= TX_CSUM_UDPIP6
;
1094 if (likely(csum_type
>= TX_CSUM_TCPIP
)) {
1095 u64 hdr_len
= TXPKT_IPHDR_LEN_V(skb_network_header_len(skb
));
1096 int eth_hdr_len
= skb_network_offset(skb
) - ETH_HLEN
;
1098 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1099 hdr_len
|= TXPKT_ETHHDR_LEN_V(eth_hdr_len
);
1101 hdr_len
|= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len
);
1102 return TXPKT_CSUM_TYPE_V(csum_type
) | hdr_len
;
1104 int start
= skb_transport_offset(skb
);
1106 return TXPKT_CSUM_TYPE_V(csum_type
) |
1107 TXPKT_CSUM_START_V(start
) |
1108 TXPKT_CSUM_LOC_V(start
+ skb
->csum_offset
);
1112 static void eth_txq_stop(struct sge_eth_txq
*q
)
1114 netif_tx_stop_queue(q
->txq
);
1118 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
1122 if (q
->pidx
>= q
->size
)
1126 #ifdef CONFIG_CHELSIO_T4_FCOE
1128 cxgb_fcoe_offload(struct sk_buff
*skb
, struct adapter
*adap
,
1129 const struct port_info
*pi
, u64
*cntrl
)
1131 const struct cxgb_fcoe
*fcoe
= &pi
->fcoe
;
1133 if (!(fcoe
->flags
& CXGB_FCOE_ENABLED
))
1136 if (skb
->protocol
!= htons(ETH_P_FCOE
))
1139 skb_reset_mac_header(skb
);
1140 skb
->mac_len
= sizeof(struct ethhdr
);
1142 skb_set_network_header(skb
, skb
->mac_len
);
1143 skb_set_transport_header(skb
, skb
->mac_len
+ sizeof(struct fcoe_hdr
));
1145 if (!cxgb_fcoe_sof_eof_supported(adap
, skb
))
1148 /* FC CRC offload */
1149 *cntrl
= TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE
) |
1150 TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
|
1151 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START
) |
1152 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END
) |
1153 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END
);
1156 #endif /* CONFIG_CHELSIO_T4_FCOE */
1159 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1161 * @dev: the egress net device
1163 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1165 netdev_tx_t
t4_eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1170 unsigned int flits
, ndesc
;
1171 struct adapter
*adap
;
1172 struct sge_eth_txq
*q
;
1173 const struct port_info
*pi
;
1174 struct fw_eth_tx_pkt_wr
*wr
;
1175 struct cpl_tx_pkt_core
*cpl
;
1176 const struct skb_shared_info
*ssi
;
1177 dma_addr_t addr
[MAX_SKB_FRAGS
+ 1];
1178 bool immediate
= false;
1179 int len
, max_pkt_len
;
1180 #ifdef CONFIG_CHELSIO_T4_FCOE
1182 #endif /* CONFIG_CHELSIO_T4_FCOE */
1185 * The chip min packet length is 10 octets but play safe and reject
1186 * anything shorter than an Ethernet header.
1188 if (unlikely(skb
->len
< ETH_HLEN
)) {
1189 out_free
: dev_kfree_skb_any(skb
);
1190 return NETDEV_TX_OK
;
1193 /* Discard the packet if the length is greater than mtu */
1194 max_pkt_len
= ETH_HLEN
+ dev
->mtu
;
1195 if (skb_vlan_tag_present(skb
))
1196 max_pkt_len
+= VLAN_HLEN
;
1197 if (!skb_shinfo(skb
)->gso_size
&& (unlikely(skb
->len
> max_pkt_len
)))
1200 pi
= netdev_priv(dev
);
1202 qidx
= skb_get_queue_mapping(skb
);
1203 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
1205 reclaim_completed_tx(adap
, &q
->q
, true);
1206 cntrl
= TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
;
1208 #ifdef CONFIG_CHELSIO_T4_FCOE
1209 err
= cxgb_fcoe_offload(skb
, adap
, pi
, &cntrl
);
1210 if (unlikely(err
== -ENOTSUPP
))
1212 #endif /* CONFIG_CHELSIO_T4_FCOE */
1214 flits
= calc_tx_flits(skb
);
1215 ndesc
= flits_to_desc(flits
);
1216 credits
= txq_avail(&q
->q
) - ndesc
;
1218 if (unlikely(credits
< 0)) {
1220 dev_err(adap
->pdev_dev
,
1221 "%s: Tx ring %u full while queue awake!\n",
1223 return NETDEV_TX_BUSY
;
1226 if (is_eth_imm(skb
))
1230 unlikely(map_skb(adap
->pdev_dev
, skb
, addr
) < 0)) {
1235 wr_mid
= FW_WR_LEN16_V(DIV_ROUND_UP(flits
, 2));
1236 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
1238 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
1241 wr
= (void *)&q
->q
.desc
[q
->q
.pidx
];
1242 wr
->equiq_to_len16
= htonl(wr_mid
);
1243 wr
->r3
= cpu_to_be64(0);
1244 end
= (u64
*)wr
+ flits
;
1246 len
= immediate
? skb
->len
: 0;
1247 ssi
= skb_shinfo(skb
);
1248 if (ssi
->gso_size
) {
1249 struct cpl_tx_pkt_lso
*lso
= (void *)wr
;
1250 bool v6
= (ssi
->gso_type
& SKB_GSO_TCPV6
) != 0;
1251 int l3hdr_len
= skb_network_header_len(skb
);
1252 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1254 len
+= sizeof(*lso
);
1255 wr
->op_immdlen
= htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR
) |
1256 FW_WR_IMMDLEN_V(len
));
1257 lso
->c
.lso_ctrl
= htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO
) |
1258 LSO_FIRST_SLICE_F
| LSO_LAST_SLICE_F
|
1260 LSO_ETHHDR_LEN_V(eth_xtra_len
/ 4) |
1261 LSO_IPHDR_LEN_V(l3hdr_len
/ 4) |
1262 LSO_TCPHDR_LEN_V(tcp_hdr(skb
)->doff
));
1263 lso
->c
.ipid_ofst
= htons(0);
1264 lso
->c
.mss
= htons(ssi
->gso_size
);
1265 lso
->c
.seqno_offset
= htonl(0);
1266 if (is_t4(adap
->params
.chip
))
1267 lso
->c
.len
= htonl(skb
->len
);
1269 lso
->c
.len
= htonl(LSO_T5_XFER_SIZE_V(skb
->len
));
1270 cpl
= (void *)(lso
+ 1);
1272 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
1273 cntrl
= TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1275 cntrl
= T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1277 cntrl
|= TXPKT_CSUM_TYPE_V(v6
?
1278 TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
1279 TXPKT_IPHDR_LEN_V(l3hdr_len
);
1281 q
->tx_cso
+= ssi
->gso_segs
;
1283 len
+= sizeof(*cpl
);
1284 wr
->op_immdlen
= htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR
) |
1285 FW_WR_IMMDLEN_V(len
));
1286 cpl
= (void *)(wr
+ 1);
1287 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1288 cntrl
= hwcsum(adap
->params
.chip
, skb
) |
1294 if (skb_vlan_tag_present(skb
)) {
1296 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
1297 #ifdef CONFIG_CHELSIO_T4_FCOE
1298 if (skb
->protocol
== htons(ETH_P_FCOE
))
1299 cntrl
|= TXPKT_VLAN_V(
1300 ((skb
->priority
& 0x7) << VLAN_PRIO_SHIFT
));
1301 #endif /* CONFIG_CHELSIO_T4_FCOE */
1304 ctrl0
= TXPKT_OPCODE_V(CPL_TX_PKT_XT
) | TXPKT_INTF_V(pi
->tx_chan
) |
1305 TXPKT_PF_V(adap
->pf
);
1306 #ifdef CONFIG_CHELSIO_T4_DCB
1307 if (is_t4(adap
->params
.chip
))
1308 ctrl0
|= TXPKT_OVLAN_IDX_V(q
->dcb_prio
);
1310 ctrl0
|= TXPKT_T5_OVLAN_IDX_V(q
->dcb_prio
);
1312 cpl
->ctrl0
= htonl(ctrl0
);
1313 cpl
->pack
= htons(0);
1314 cpl
->len
= htons(skb
->len
);
1315 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1318 inline_tx_skb(skb
, &q
->q
, cpl
+ 1);
1319 dev_consume_skb_any(skb
);
1323 write_sgl(skb
, &q
->q
, (struct ulptx_sgl
*)(cpl
+ 1), end
, 0,
1327 last_desc
= q
->q
.pidx
+ ndesc
- 1;
1328 if (last_desc
>= q
->q
.size
)
1329 last_desc
-= q
->q
.size
;
1330 q
->q
.sdesc
[last_desc
].skb
= skb
;
1331 q
->q
.sdesc
[last_desc
].sgl
= (struct ulptx_sgl
*)(cpl
+ 1);
1334 txq_advance(&q
->q
, ndesc
);
1336 ring_tx_db(adap
, &q
->q
, ndesc
);
1337 return NETDEV_TX_OK
;
1341 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1342 * @q: the SGE control Tx queue
1344 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1345 * that send only immediate data (presently just the control queues) and
1346 * thus do not have any sk_buffs to release.
1348 static inline void reclaim_completed_tx_imm(struct sge_txq
*q
)
1350 int hw_cidx
= ntohs(ACCESS_ONCE(q
->stat
->cidx
));
1351 int reclaim
= hw_cidx
- q
->cidx
;
1356 q
->in_use
-= reclaim
;
1361 * is_imm - check whether a packet can be sent as immediate data
1364 * Returns true if a packet can be sent as a WR with immediate data.
1366 static inline int is_imm(const struct sk_buff
*skb
)
1368 return skb
->len
<= MAX_CTRL_WR_LEN
;
1372 * ctrlq_check_stop - check if a control queue is full and should stop
1374 * @wr: most recent WR written to the queue
1376 * Check if a control queue has become full and should be stopped.
1377 * We clean up control queue descriptors very lazily, only when we are out.
1378 * If the queue is still full after reclaiming any completed descriptors
1379 * we suspend it and have the last WR wake it up.
1381 static void ctrlq_check_stop(struct sge_ctrl_txq
*q
, struct fw_wr_hdr
*wr
)
1383 reclaim_completed_tx_imm(&q
->q
);
1384 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
1385 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
1392 * ctrl_xmit - send a packet through an SGE control Tx queue
1393 * @q: the control queue
1396 * Send a packet through an SGE control Tx queue. Packets sent through
1397 * a control queue must fit entirely as immediate data.
1399 static int ctrl_xmit(struct sge_ctrl_txq
*q
, struct sk_buff
*skb
)
1402 struct fw_wr_hdr
*wr
;
1404 if (unlikely(!is_imm(skb
))) {
1407 return NET_XMIT_DROP
;
1410 ndesc
= DIV_ROUND_UP(skb
->len
, sizeof(struct tx_desc
));
1411 spin_lock(&q
->sendq
.lock
);
1413 if (unlikely(q
->full
)) {
1414 skb
->priority
= ndesc
; /* save for restart */
1415 __skb_queue_tail(&q
->sendq
, skb
);
1416 spin_unlock(&q
->sendq
.lock
);
1420 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
1421 inline_tx_skb(skb
, &q
->q
, wr
);
1423 txq_advance(&q
->q
, ndesc
);
1424 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
))
1425 ctrlq_check_stop(q
, wr
);
1427 ring_tx_db(q
->adap
, &q
->q
, ndesc
);
1428 spin_unlock(&q
->sendq
.lock
);
1431 return NET_XMIT_SUCCESS
;
1435 * restart_ctrlq - restart a suspended control queue
1436 * @data: the control queue to restart
1438 * Resumes transmission on a suspended Tx control queue.
1440 static void restart_ctrlq(unsigned long data
)
1442 struct sk_buff
*skb
;
1443 unsigned int written
= 0;
1444 struct sge_ctrl_txq
*q
= (struct sge_ctrl_txq
*)data
;
1446 spin_lock(&q
->sendq
.lock
);
1447 reclaim_completed_tx_imm(&q
->q
);
1448 BUG_ON(txq_avail(&q
->q
) < TXQ_STOP_THRES
); /* q should be empty */
1450 while ((skb
= __skb_dequeue(&q
->sendq
)) != NULL
) {
1451 struct fw_wr_hdr
*wr
;
1452 unsigned int ndesc
= skb
->priority
; /* previously saved */
1455 /* Write descriptors and free skbs outside the lock to limit
1456 * wait times. q->full is still set so new skbs will be queued.
1458 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
1459 txq_advance(&q
->q
, ndesc
);
1460 spin_unlock(&q
->sendq
.lock
);
1462 inline_tx_skb(skb
, &q
->q
, wr
);
1465 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
1466 unsigned long old
= q
->q
.stops
;
1468 ctrlq_check_stop(q
, wr
);
1469 if (q
->q
.stops
!= old
) { /* suspended anew */
1470 spin_lock(&q
->sendq
.lock
);
1475 ring_tx_db(q
->adap
, &q
->q
, written
);
1478 spin_lock(&q
->sendq
.lock
);
1481 ringdb
: if (written
)
1482 ring_tx_db(q
->adap
, &q
->q
, written
);
1483 spin_unlock(&q
->sendq
.lock
);
1487 * t4_mgmt_tx - send a management message
1488 * @adap: the adapter
1489 * @skb: the packet containing the management message
1491 * Send a management message through control queue 0.
1493 int t4_mgmt_tx(struct adapter
*adap
, struct sk_buff
*skb
)
1498 ret
= ctrl_xmit(&adap
->sge
.ctrlq
[0], skb
);
1504 * is_ofld_imm - check whether a packet can be sent as immediate data
1507 * Returns true if a packet can be sent as an offload WR with immediate
1508 * data. We currently use the same limit as for Ethernet packets.
1510 static inline int is_ofld_imm(const struct sk_buff
*skb
)
1512 return skb
->len
<= MAX_IMM_TX_PKT_LEN
;
1516 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1519 * Returns the number of flits needed for the given offload packet.
1520 * These packets are already fully constructed and no additional headers
1523 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
1525 unsigned int flits
, cnt
;
1527 if (is_ofld_imm(skb
))
1528 return DIV_ROUND_UP(skb
->len
, 8);
1530 flits
= skb_transport_offset(skb
) / 8U; /* headers */
1531 cnt
= skb_shinfo(skb
)->nr_frags
;
1532 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
1534 return flits
+ sgl_len(cnt
);
1538 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1539 * @adap: the adapter
1540 * @q: the queue to stop
1542 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1543 * inability to map packets. A periodic timer attempts to restart
1546 static void txq_stop_maperr(struct sge_ofld_txq
*q
)
1550 set_bit(q
->q
.cntxt_id
- q
->adap
->sge
.egr_start
,
1551 q
->adap
->sge
.txq_maperr
);
1555 * ofldtxq_stop - stop an offload Tx queue that has become full
1556 * @q: the queue to stop
1557 * @skb: the packet causing the queue to become full
1559 * Stops an offload Tx queue that has become full and modifies the packet
1560 * being written to request a wakeup.
1562 static void ofldtxq_stop(struct sge_ofld_txq
*q
, struct sk_buff
*skb
)
1564 struct fw_wr_hdr
*wr
= (struct fw_wr_hdr
*)skb
->data
;
1566 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
1572 * service_ofldq - service/restart a suspended offload queue
1573 * @q: the offload queue
1575 * Services an offload Tx queue by moving packets from its Pending Send
1576 * Queue to the Hardware TX ring. The function starts and ends with the
1577 * Send Queue locked, but drops the lock while putting the skb at the
1578 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
1579 * allows more skbs to be added to the Send Queue by other threads.
1580 * The packet being processed at the head of the Pending Send Queue is
1581 * left on the queue in case we experience DMA Mapping errors, etc.
1582 * and need to give up and restart later.
1584 * service_ofldq() can be thought of as a task which opportunistically
1585 * uses other threads execution contexts. We use the Offload Queue
1586 * boolean "service_ofldq_running" to make sure that only one instance
1587 * is ever running at a time ...
1589 static void service_ofldq(struct sge_ofld_txq
*q
)
1591 u64
*pos
, *before
, *end
;
1593 struct sk_buff
*skb
;
1594 struct sge_txq
*txq
;
1596 unsigned int written
= 0;
1597 unsigned int flits
, ndesc
;
1599 /* If another thread is currently in service_ofldq() processing the
1600 * Pending Send Queue then there's nothing to do. Otherwise, flag
1601 * that we're doing the work and continue. Examining/modifying
1602 * the Offload Queue boolean "service_ofldq_running" must be done
1603 * while holding the Pending Send Queue Lock.
1605 if (q
->service_ofldq_running
)
1607 q
->service_ofldq_running
= true;
1609 while ((skb
= skb_peek(&q
->sendq
)) != NULL
&& !q
->full
) {
1610 /* We drop the lock while we're working with the skb at the
1611 * head of the Pending Send Queue. This allows more skbs to
1612 * be added to the Pending Send Queue while we're working on
1613 * this one. We don't need to lock to guard the TX Ring
1614 * updates because only one thread of execution is ever
1615 * allowed into service_ofldq() at a time.
1617 spin_unlock(&q
->sendq
.lock
);
1619 reclaim_completed_tx(q
->adap
, &q
->q
, false);
1621 flits
= skb
->priority
; /* previously saved */
1622 ndesc
= flits_to_desc(flits
);
1623 credits
= txq_avail(&q
->q
) - ndesc
;
1624 BUG_ON(credits
< 0);
1625 if (unlikely(credits
< TXQ_STOP_THRES
))
1626 ofldtxq_stop(q
, skb
);
1628 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
1629 if (is_ofld_imm(skb
))
1630 inline_tx_skb(skb
, &q
->q
, pos
);
1631 else if (map_skb(q
->adap
->pdev_dev
, skb
,
1632 (dma_addr_t
*)skb
->head
)) {
1634 spin_lock(&q
->sendq
.lock
);
1637 int last_desc
, hdr_len
= skb_transport_offset(skb
);
1639 /* The WR headers may not fit within one descriptor.
1640 * So we need to deal with wrap-around here.
1642 before
= (u64
*)pos
;
1643 end
= (u64
*)pos
+ flits
;
1645 pos
= (void *)inline_tx_skb_header(skb
, &q
->q
,
1648 if (before
> (u64
*)pos
) {
1649 left
= (u8
*)end
- (u8
*)txq
->stat
;
1650 end
= (void *)txq
->desc
+ left
;
1653 /* If current position is already at the end of the
1654 * ofld queue, reset the current to point to
1655 * start of the queue and update the end ptr as well.
1657 if (pos
== (u64
*)txq
->stat
) {
1658 left
= (u8
*)end
- (u8
*)txq
->stat
;
1659 end
= (void *)txq
->desc
+ left
;
1660 pos
= (void *)txq
->desc
;
1663 write_sgl(skb
, &q
->q
, (void *)pos
,
1665 (dma_addr_t
*)skb
->head
);
1666 #ifdef CONFIG_NEED_DMA_MAP_STATE
1667 skb
->dev
= q
->adap
->port
[0];
1668 skb
->destructor
= deferred_unmap_destructor
;
1670 last_desc
= q
->q
.pidx
+ ndesc
- 1;
1671 if (last_desc
>= q
->q
.size
)
1672 last_desc
-= q
->q
.size
;
1673 q
->q
.sdesc
[last_desc
].skb
= skb
;
1676 txq_advance(&q
->q
, ndesc
);
1678 if (unlikely(written
> 32)) {
1679 ring_tx_db(q
->adap
, &q
->q
, written
);
1683 /* Reacquire the Pending Send Queue Lock so we can unlink the
1684 * skb we've just successfully transferred to the TX Ring and
1685 * loop for the next skb which may be at the head of the
1686 * Pending Send Queue.
1688 spin_lock(&q
->sendq
.lock
);
1689 __skb_unlink(skb
, &q
->sendq
);
1690 if (is_ofld_imm(skb
))
1693 if (likely(written
))
1694 ring_tx_db(q
->adap
, &q
->q
, written
);
1696 /*Indicate that no thread is processing the Pending Send Queue
1699 q
->service_ofldq_running
= false;
1703 * ofld_xmit - send a packet through an offload queue
1704 * @q: the Tx offload queue
1707 * Send an offload packet through an SGE offload queue.
1709 static int ofld_xmit(struct sge_ofld_txq
*q
, struct sk_buff
*skb
)
1711 skb
->priority
= calc_tx_flits_ofld(skb
); /* save for restart */
1712 spin_lock(&q
->sendq
.lock
);
1714 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
1715 * that results in this new skb being the only one on the queue, start
1716 * servicing it. If there are other skbs already on the list, then
1717 * either the queue is currently being processed or it's been stopped
1718 * for some reason and it'll be restarted at a later time. Restart
1719 * paths are triggered by events like experiencing a DMA Mapping Error
1720 * or filling the Hardware TX Ring.
1722 __skb_queue_tail(&q
->sendq
, skb
);
1723 if (q
->sendq
.qlen
== 1)
1726 spin_unlock(&q
->sendq
.lock
);
1727 return NET_XMIT_SUCCESS
;
1731 * restart_ofldq - restart a suspended offload queue
1732 * @data: the offload queue to restart
1734 * Resumes transmission on a suspended Tx offload queue.
1736 static void restart_ofldq(unsigned long data
)
1738 struct sge_ofld_txq
*q
= (struct sge_ofld_txq
*)data
;
1740 spin_lock(&q
->sendq
.lock
);
1741 q
->full
= 0; /* the queue actually is completely empty now */
1743 spin_unlock(&q
->sendq
.lock
);
1747 * skb_txq - return the Tx queue an offload packet should use
1750 * Returns the Tx queue an offload packet should use as indicated by bits
1751 * 1-15 in the packet's queue_mapping.
1753 static inline unsigned int skb_txq(const struct sk_buff
*skb
)
1755 return skb
->queue_mapping
>> 1;
1759 * is_ctrl_pkt - return whether an offload packet is a control packet
1762 * Returns whether an offload packet should use an OFLD or a CTRL
1763 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1765 static inline unsigned int is_ctrl_pkt(const struct sk_buff
*skb
)
1767 return skb
->queue_mapping
& 1;
1770 static inline int ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
1772 unsigned int idx
= skb_txq(skb
);
1774 if (unlikely(is_ctrl_pkt(skb
))) {
1775 /* Single ctrl queue is a requirement for LE workaround path */
1776 if (adap
->tids
.nsftids
)
1778 return ctrl_xmit(&adap
->sge
.ctrlq
[idx
], skb
);
1780 return ofld_xmit(&adap
->sge
.ofldtxq
[idx
], skb
);
1784 * t4_ofld_send - send an offload packet
1785 * @adap: the adapter
1788 * Sends an offload packet. We use the packet queue_mapping to select the
1789 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1790 * should be sent as regular or control, bits 1-15 select the queue.
1792 int t4_ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
1797 ret
= ofld_send(adap
, skb
);
1803 * cxgb4_ofld_send - send an offload packet
1804 * @dev: the net device
1807 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1808 * intended for ULDs.
1810 int cxgb4_ofld_send(struct net_device
*dev
, struct sk_buff
*skb
)
1812 return t4_ofld_send(netdev2adap(dev
), skb
);
1814 EXPORT_SYMBOL(cxgb4_ofld_send
);
1816 static inline void copy_frags(struct sk_buff
*skb
,
1817 const struct pkt_gl
*gl
, unsigned int offset
)
1821 /* usually there's just one frag */
1822 __skb_fill_page_desc(skb
, 0, gl
->frags
[0].page
,
1823 gl
->frags
[0].offset
+ offset
,
1824 gl
->frags
[0].size
- offset
);
1825 skb_shinfo(skb
)->nr_frags
= gl
->nfrags
;
1826 for (i
= 1; i
< gl
->nfrags
; i
++)
1827 __skb_fill_page_desc(skb
, i
, gl
->frags
[i
].page
,
1828 gl
->frags
[i
].offset
,
1831 /* get a reference to the last page, we don't own it */
1832 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
1836 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1837 * @gl: the gather list
1838 * @skb_len: size of sk_buff main body if it carries fragments
1839 * @pull_len: amount of data to move to the sk_buff's main body
1841 * Builds an sk_buff from the given packet gather list. Returns the
1842 * sk_buff or %NULL if sk_buff allocation failed.
1844 struct sk_buff
*cxgb4_pktgl_to_skb(const struct pkt_gl
*gl
,
1845 unsigned int skb_len
, unsigned int pull_len
)
1847 struct sk_buff
*skb
;
1850 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1851 * size, which is expected since buffers are at least PAGE_SIZEd.
1852 * In this case packets up to RX_COPY_THRES have only one fragment.
1854 if (gl
->tot_len
<= RX_COPY_THRES
) {
1855 skb
= dev_alloc_skb(gl
->tot_len
);
1858 __skb_put(skb
, gl
->tot_len
);
1859 skb_copy_to_linear_data(skb
, gl
->va
, gl
->tot_len
);
1861 skb
= dev_alloc_skb(skb_len
);
1864 __skb_put(skb
, pull_len
);
1865 skb_copy_to_linear_data(skb
, gl
->va
, pull_len
);
1867 copy_frags(skb
, gl
, pull_len
);
1868 skb
->len
= gl
->tot_len
;
1869 skb
->data_len
= skb
->len
- pull_len
;
1870 skb
->truesize
+= skb
->data_len
;
1874 EXPORT_SYMBOL(cxgb4_pktgl_to_skb
);
1877 * t4_pktgl_free - free a packet gather list
1878 * @gl: the gather list
1880 * Releases the pages of a packet gather list. We do not own the last
1881 * page on the list and do not free it.
1883 static void t4_pktgl_free(const struct pkt_gl
*gl
)
1886 const struct page_frag
*p
;
1888 for (p
= gl
->frags
, n
= gl
->nfrags
- 1; n
--; p
++)
1893 * Process an MPS trace packet. Give it an unused protocol number so it won't
1894 * be delivered to anyone and send it to the stack for capture.
1896 static noinline
int handle_trace_pkt(struct adapter
*adap
,
1897 const struct pkt_gl
*gl
)
1899 struct sk_buff
*skb
;
1901 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
1902 if (unlikely(!skb
)) {
1907 if (is_t4(adap
->params
.chip
))
1908 __skb_pull(skb
, sizeof(struct cpl_trace_pkt
));
1910 __skb_pull(skb
, sizeof(struct cpl_t5_trace_pkt
));
1912 skb_reset_mac_header(skb
);
1913 skb
->protocol
= htons(0xffff);
1914 skb
->dev
= adap
->port
[0];
1915 netif_receive_skb(skb
);
1920 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
1921 * @adap: the adapter
1922 * @hwtstamps: time stamp structure to update
1923 * @sgetstamp: 60bit iqe timestamp
1925 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
1926 * which is in Core Clock ticks into ktime_t and assign it
1928 static void cxgb4_sgetim_to_hwtstamp(struct adapter
*adap
,
1929 struct skb_shared_hwtstamps
*hwtstamps
,
1933 u64 tmp
= (sgetstamp
* 1000 * 1000 + adap
->params
.vpd
.cclk
/ 2);
1935 ns
= div_u64(tmp
, adap
->params
.vpd
.cclk
);
1937 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
1938 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
1941 static void do_gro(struct sge_eth_rxq
*rxq
, const struct pkt_gl
*gl
,
1942 const struct cpl_rx_pkt
*pkt
)
1944 struct adapter
*adapter
= rxq
->rspq
.adap
;
1945 struct sge
*s
= &adapter
->sge
;
1946 struct port_info
*pi
;
1948 struct sk_buff
*skb
;
1950 skb
= napi_get_frags(&rxq
->rspq
.napi
);
1951 if (unlikely(!skb
)) {
1953 rxq
->stats
.rx_drops
++;
1957 copy_frags(skb
, gl
, s
->pktshift
);
1958 skb
->len
= gl
->tot_len
- s
->pktshift
;
1959 skb
->data_len
= skb
->len
;
1960 skb
->truesize
+= skb
->data_len
;
1961 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1962 skb_record_rx_queue(skb
, rxq
->rspq
.idx
);
1963 pi
= netdev_priv(skb
->dev
);
1965 cxgb4_sgetim_to_hwtstamp(adapter
, skb_hwtstamps(skb
),
1967 if (rxq
->rspq
.netdev
->features
& NETIF_F_RXHASH
)
1968 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
1971 if (unlikely(pkt
->vlan_ex
)) {
1972 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
1973 rxq
->stats
.vlan_ex
++;
1975 ret
= napi_gro_frags(&rxq
->rspq
.napi
);
1976 if (ret
== GRO_HELD
)
1977 rxq
->stats
.lro_pkts
++;
1978 else if (ret
== GRO_MERGED
|| ret
== GRO_MERGED_FREE
)
1979 rxq
->stats
.lro_merged
++;
1981 rxq
->stats
.rx_cso
++;
1985 * t4_ethrx_handler - process an ingress ethernet packet
1986 * @q: the response queue that received the packet
1987 * @rsp: the response queue descriptor holding the RX_PKT message
1988 * @si: the gather list of packet fragments
1990 * Process an ingress ethernet packet and deliver it to the stack.
1992 int t4_ethrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
1993 const struct pkt_gl
*si
)
1996 struct sk_buff
*skb
;
1997 const struct cpl_rx_pkt
*pkt
;
1998 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
1999 struct sge
*s
= &q
->adap
->sge
;
2000 int cpl_trace_pkt
= is_t4(q
->adap
->params
.chip
) ?
2001 CPL_TRACE_PKT
: CPL_TRACE_PKT_T5
;
2002 struct port_info
*pi
;
2004 if (unlikely(*(u8
*)rsp
== cpl_trace_pkt
))
2005 return handle_trace_pkt(q
->adap
, si
);
2007 pkt
= (const struct cpl_rx_pkt
*)rsp
;
2008 csum_ok
= pkt
->csum_calc
&& !pkt
->err_vec
&&
2009 (q
->netdev
->features
& NETIF_F_RXCSUM
);
2010 if ((pkt
->l2info
& htonl(RXF_TCP_F
)) &&
2011 !(cxgb_poll_busy_polling(q
)) &&
2012 (q
->netdev
->features
& NETIF_F_GRO
) && csum_ok
&& !pkt
->ip_frag
) {
2013 do_gro(rxq
, si
, pkt
);
2017 skb
= cxgb4_pktgl_to_skb(si
, RX_PKT_SKB_LEN
, RX_PULL_LEN
);
2018 if (unlikely(!skb
)) {
2020 rxq
->stats
.rx_drops
++;
2024 __skb_pull(skb
, s
->pktshift
); /* remove ethernet header padding */
2025 skb
->protocol
= eth_type_trans(skb
, q
->netdev
);
2026 skb_record_rx_queue(skb
, q
->idx
);
2027 if (skb
->dev
->features
& NETIF_F_RXHASH
)
2028 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
2033 pi
= netdev_priv(skb
->dev
);
2035 cxgb4_sgetim_to_hwtstamp(q
->adap
, skb_hwtstamps(skb
),
2037 if (csum_ok
&& (pkt
->l2info
& htonl(RXF_UDP_F
| RXF_TCP_F
))) {
2038 if (!pkt
->ip_frag
) {
2039 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2040 rxq
->stats
.rx_cso
++;
2041 } else if (pkt
->l2info
& htonl(RXF_IP_F
)) {
2042 __sum16 c
= (__force __sum16
)pkt
->csum
;
2043 skb
->csum
= csum_unfold(c
);
2044 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2045 rxq
->stats
.rx_cso
++;
2048 skb_checksum_none_assert(skb
);
2049 #ifdef CONFIG_CHELSIO_T4_FCOE
2050 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2051 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2053 if (!(pkt
->l2info
& cpu_to_be32(CPL_RX_PKT_FLAGS
))) {
2054 if ((pkt
->l2info
& cpu_to_be32(RXF_FCOE_F
)) &&
2055 (pi
->fcoe
.flags
& CXGB_FCOE_ENABLED
)) {
2056 if (!(pkt
->err_vec
& cpu_to_be16(RXERR_CSUM_F
)))
2057 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2061 #undef CPL_RX_PKT_FLAGS
2062 #endif /* CONFIG_CHELSIO_T4_FCOE */
2065 if (unlikely(pkt
->vlan_ex
)) {
2066 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
2067 rxq
->stats
.vlan_ex
++;
2069 skb_mark_napi_id(skb
, &q
->napi
);
2070 netif_receive_skb(skb
);
2075 * restore_rx_bufs - put back a packet's Rx buffers
2076 * @si: the packet gather list
2077 * @q: the SGE free list
2078 * @frags: number of FL buffers to restore
2080 * Puts back on an FL the Rx buffers associated with @si. The buffers
2081 * have already been unmapped and are left unmapped, we mark them so to
2082 * prevent further unmapping attempts.
2084 * This function undoes a series of @unmap_rx_buf calls when we find out
2085 * that the current packet can't be processed right away afterall and we
2086 * need to come back to it later. This is a very rare event and there's
2087 * no effort to make this particularly efficient.
2089 static void restore_rx_bufs(const struct pkt_gl
*si
, struct sge_fl
*q
,
2092 struct rx_sw_desc
*d
;
2096 q
->cidx
= q
->size
- 1;
2099 d
= &q
->sdesc
[q
->cidx
];
2100 d
->page
= si
->frags
[frags
].page
;
2101 d
->dma_addr
|= RX_UNMAPPED_BUF
;
2107 * is_new_response - check if a response is newly written
2108 * @r: the response descriptor
2109 * @q: the response queue
2111 * Returns true if a response descriptor contains a yet unprocessed
2114 static inline bool is_new_response(const struct rsp_ctrl
*r
,
2115 const struct sge_rspq
*q
)
2117 return (r
->type_gen
>> RSPD_GEN_S
) == q
->gen
;
2121 * rspq_next - advance to the next entry in a response queue
2124 * Updates the state of a response queue to advance it to the next entry.
2126 static inline void rspq_next(struct sge_rspq
*q
)
2128 q
->cur_desc
= (void *)q
->cur_desc
+ q
->iqe_len
;
2129 if (unlikely(++q
->cidx
== q
->size
)) {
2132 q
->cur_desc
= q
->desc
;
2137 * process_responses - process responses from an SGE response queue
2138 * @q: the ingress queue to process
2139 * @budget: how many responses can be processed in this round
2141 * Process responses from an SGE response queue up to the supplied budget.
2142 * Responses include received packets as well as control messages from FW
2145 * Additionally choose the interrupt holdoff time for the next interrupt
2146 * on this queue. If the system is under memory shortage use a fairly
2147 * long delay to help recovery.
2149 static int process_responses(struct sge_rspq
*q
, int budget
)
2152 int budget_left
= budget
;
2153 const struct rsp_ctrl
*rc
;
2154 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
2155 struct adapter
*adapter
= q
->adap
;
2156 struct sge
*s
= &adapter
->sge
;
2158 while (likely(budget_left
)) {
2159 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
2160 if (!is_new_response(rc
, q
)) {
2161 if (q
->flush_handler
)
2162 q
->flush_handler(q
);
2167 rsp_type
= RSPD_TYPE_G(rc
->type_gen
);
2168 if (likely(rsp_type
== RSPD_TYPE_FLBUF_X
)) {
2169 struct page_frag
*fp
;
2171 const struct rx_sw_desc
*rsd
;
2172 u32 len
= ntohl(rc
->pldbuflen_qid
), bufsz
, frags
;
2174 if (len
& RSPD_NEWBUF_F
) {
2175 if (likely(q
->offset
> 0)) {
2176 free_rx_bufs(q
->adap
, &rxq
->fl
, 1);
2179 len
= RSPD_LEN_G(len
);
2183 /* gather packet fragments */
2184 for (frags
= 0, fp
= si
.frags
; ; frags
++, fp
++) {
2185 rsd
= &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
2186 bufsz
= get_buf_size(adapter
, rsd
);
2187 fp
->page
= rsd
->page
;
2188 fp
->offset
= q
->offset
;
2189 fp
->size
= min(bufsz
, len
);
2193 unmap_rx_buf(q
->adap
, &rxq
->fl
);
2196 si
.sgetstamp
= SGE_TIMESTAMP_G(
2197 be64_to_cpu(rc
->last_flit
));
2199 * Last buffer remains mapped so explicitly make it
2200 * coherent for CPU access.
2202 dma_sync_single_for_cpu(q
->adap
->pdev_dev
,
2204 fp
->size
, DMA_FROM_DEVICE
);
2206 si
.va
= page_address(si
.frags
[0].page
) +
2210 si
.nfrags
= frags
+ 1;
2211 ret
= q
->handler(q
, q
->cur_desc
, &si
);
2212 if (likely(ret
== 0))
2213 q
->offset
+= ALIGN(fp
->size
, s
->fl_align
);
2215 restore_rx_bufs(&si
, &rxq
->fl
, frags
);
2216 } else if (likely(rsp_type
== RSPD_TYPE_CPL_X
)) {
2217 ret
= q
->handler(q
, q
->cur_desc
, NULL
);
2219 ret
= q
->handler(q
, (const __be64
*)rc
, CXGB4_MSG_AN
);
2222 if (unlikely(ret
)) {
2223 /* couldn't process descriptor, back off for recovery */
2224 q
->next_intr_params
= QINTR_TIMER_IDX_V(NOMEM_TMR_IDX
);
2232 if (q
->offset
>= 0 && fl_cap(&rxq
->fl
) - rxq
->fl
.avail
>= 16)
2233 __refill_fl(q
->adap
, &rxq
->fl
);
2234 return budget
- budget_left
;
2237 #ifdef CONFIG_NET_RX_BUSY_POLL
2238 int cxgb_busy_poll(struct napi_struct
*napi
)
2240 struct sge_rspq
*q
= container_of(napi
, struct sge_rspq
, napi
);
2241 unsigned int params
, work_done
;
2244 if (!cxgb_poll_lock_poll(q
))
2245 return LL_FLUSH_BUSY
;
2247 work_done
= process_responses(q
, 4);
2248 params
= QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X
) | QINTR_CNT_EN_V(1);
2249 q
->next_intr_params
= params
;
2250 val
= CIDXINC_V(work_done
) | SEINTARM_V(params
);
2252 /* If we don't have access to the new User GTS (T5+), use the old
2253 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2255 if (unlikely(!q
->bar2_addr
))
2256 t4_write_reg(q
->adap
, MYPF_REG(SGE_PF_GTS_A
),
2257 val
| INGRESSQID_V((u32
)q
->cntxt_id
));
2259 writel(val
| INGRESSQID_V(q
->bar2_qid
),
2260 q
->bar2_addr
+ SGE_UDB_GTS
);
2264 cxgb_poll_unlock_poll(q
);
2267 #endif /* CONFIG_NET_RX_BUSY_POLL */
2270 * napi_rx_handler - the NAPI handler for Rx processing
2271 * @napi: the napi instance
2272 * @budget: how many packets we can process in this round
2274 * Handler for new data events when using NAPI. This does not need any
2275 * locking or protection from interrupts as data interrupts are off at
2276 * this point and other adapter interrupts do not interfere (the latter
2277 * in not a concern at all with MSI-X as non-data interrupts then have
2278 * a separate handler).
2280 static int napi_rx_handler(struct napi_struct
*napi
, int budget
)
2282 unsigned int params
;
2283 struct sge_rspq
*q
= container_of(napi
, struct sge_rspq
, napi
);
2287 if (!cxgb_poll_lock_napi(q
))
2290 work_done
= process_responses(q
, budget
);
2291 if (likely(work_done
< budget
)) {
2294 napi_complete_done(napi
, work_done
);
2295 timer_index
= QINTR_TIMER_IDX_G(q
->next_intr_params
);
2297 if (q
->adaptive_rx
) {
2298 if (work_done
> max(timer_pkt_quota
[timer_index
],
2300 timer_index
= (timer_index
+ 1);
2302 timer_index
= timer_index
- 1;
2304 timer_index
= clamp(timer_index
, 0, SGE_TIMERREGS
- 1);
2305 q
->next_intr_params
=
2306 QINTR_TIMER_IDX_V(timer_index
) |
2308 params
= q
->next_intr_params
;
2310 params
= q
->next_intr_params
;
2311 q
->next_intr_params
= q
->intr_params
;
2314 params
= QINTR_TIMER_IDX_V(7);
2316 val
= CIDXINC_V(work_done
) | SEINTARM_V(params
);
2318 /* If we don't have access to the new User GTS (T5+), use the old
2319 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2321 if (unlikely(q
->bar2_addr
== NULL
)) {
2322 t4_write_reg(q
->adap
, MYPF_REG(SGE_PF_GTS_A
),
2323 val
| INGRESSQID_V((u32
)q
->cntxt_id
));
2325 writel(val
| INGRESSQID_V(q
->bar2_qid
),
2326 q
->bar2_addr
+ SGE_UDB_GTS
);
2329 cxgb_poll_unlock_napi(q
);
2334 * The MSI-X interrupt handler for an SGE response queue.
2336 irqreturn_t
t4_sge_intr_msix(int irq
, void *cookie
)
2338 struct sge_rspq
*q
= cookie
;
2340 napi_schedule(&q
->napi
);
2345 * Process the indirect interrupt entries in the interrupt queue and kick off
2346 * NAPI for each queue that has generated an entry.
2348 static unsigned int process_intrq(struct adapter
*adap
)
2350 unsigned int credits
;
2351 const struct rsp_ctrl
*rc
;
2352 struct sge_rspq
*q
= &adap
->sge
.intrq
;
2355 spin_lock(&adap
->sge
.intrq_lock
);
2356 for (credits
= 0; ; credits
++) {
2357 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
2358 if (!is_new_response(rc
, q
))
2362 if (RSPD_TYPE_G(rc
->type_gen
) == RSPD_TYPE_INTR_X
) {
2363 unsigned int qid
= ntohl(rc
->pldbuflen_qid
);
2365 qid
-= adap
->sge
.ingr_start
;
2366 napi_schedule(&adap
->sge
.ingr_map
[qid
]->napi
);
2372 val
= CIDXINC_V(credits
) | SEINTARM_V(q
->intr_params
);
2374 /* If we don't have access to the new User GTS (T5+), use the old
2375 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2377 if (unlikely(q
->bar2_addr
== NULL
)) {
2378 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
2379 val
| INGRESSQID_V(q
->cntxt_id
));
2381 writel(val
| INGRESSQID_V(q
->bar2_qid
),
2382 q
->bar2_addr
+ SGE_UDB_GTS
);
2385 spin_unlock(&adap
->sge
.intrq_lock
);
2390 * The MSI interrupt handler, which handles data events from SGE response queues
2391 * as well as error and other async events as they all use the same MSI vector.
2393 static irqreturn_t
t4_intr_msi(int irq
, void *cookie
)
2395 struct adapter
*adap
= cookie
;
2397 if (adap
->flags
& MASTER_PF
)
2398 t4_slow_intr_handler(adap
);
2399 process_intrq(adap
);
2404 * Interrupt handler for legacy INTx interrupts.
2405 * Handles data events from SGE response queues as well as error and other
2406 * async events as they all use the same interrupt line.
2408 static irqreturn_t
t4_intr_intx(int irq
, void *cookie
)
2410 struct adapter
*adap
= cookie
;
2412 t4_write_reg(adap
, MYPF_REG(PCIE_PF_CLI_A
), 0);
2413 if (((adap
->flags
& MASTER_PF
) && t4_slow_intr_handler(adap
)) |
2414 process_intrq(adap
))
2416 return IRQ_NONE
; /* probably shared interrupt */
2420 * t4_intr_handler - select the top-level interrupt handler
2421 * @adap: the adapter
2423 * Selects the top-level interrupt handler based on the type of interrupts
2424 * (MSI-X, MSI, or INTx).
2426 irq_handler_t
t4_intr_handler(struct adapter
*adap
)
2428 if (adap
->flags
& USING_MSIX
)
2429 return t4_sge_intr_msix
;
2430 if (adap
->flags
& USING_MSI
)
2432 return t4_intr_intx
;
2435 static void sge_rx_timer_cb(unsigned long data
)
2439 struct adapter
*adap
= (struct adapter
*)data
;
2440 struct sge
*s
= &adap
->sge
;
2442 for (i
= 0; i
< BITS_TO_LONGS(s
->egr_sz
); i
++)
2443 for (m
= s
->starving_fl
[i
]; m
; m
&= m
- 1) {
2444 struct sge_eth_rxq
*rxq
;
2445 unsigned int id
= __ffs(m
) + i
* BITS_PER_LONG
;
2446 struct sge_fl
*fl
= s
->egr_map
[id
];
2448 clear_bit(id
, s
->starving_fl
);
2449 smp_mb__after_atomic();
2451 if (fl_starving(adap
, fl
)) {
2452 rxq
= container_of(fl
, struct sge_eth_rxq
, fl
);
2453 if (napi_reschedule(&rxq
->rspq
.napi
))
2456 set_bit(id
, s
->starving_fl
);
2459 /* The remainder of the SGE RX Timer Callback routine is dedicated to
2460 * global Master PF activities like checking for chip ingress stalls,
2463 if (!(adap
->flags
& MASTER_PF
))
2466 t4_idma_monitor(adap
, &s
->idma_monitor
, HZ
, RX_QCHECK_PERIOD
);
2469 mod_timer(&s
->rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
2472 static void sge_tx_timer_cb(unsigned long data
)
2475 unsigned int i
, budget
;
2476 struct adapter
*adap
= (struct adapter
*)data
;
2477 struct sge
*s
= &adap
->sge
;
2479 for (i
= 0; i
< BITS_TO_LONGS(s
->egr_sz
); i
++)
2480 for (m
= s
->txq_maperr
[i
]; m
; m
&= m
- 1) {
2481 unsigned long id
= __ffs(m
) + i
* BITS_PER_LONG
;
2482 struct sge_ofld_txq
*txq
= s
->egr_map
[id
];
2484 clear_bit(id
, s
->txq_maperr
);
2485 tasklet_schedule(&txq
->qresume_tsk
);
2488 budget
= MAX_TIMER_TX_RECLAIM
;
2489 i
= s
->ethtxq_rover
;
2491 struct sge_eth_txq
*q
= &s
->ethtxq
[i
];
2494 time_after_eq(jiffies
, q
->txq
->trans_start
+ HZ
/ 100) &&
2495 __netif_tx_trylock(q
->txq
)) {
2496 int avail
= reclaimable(&q
->q
);
2502 free_tx_desc(adap
, &q
->q
, avail
, true);
2503 q
->q
.in_use
-= avail
;
2506 __netif_tx_unlock(q
->txq
);
2509 if (++i
>= s
->ethqsets
)
2511 } while (budget
&& i
!= s
->ethtxq_rover
);
2512 s
->ethtxq_rover
= i
;
2513 mod_timer(&s
->tx_timer
, jiffies
+ (budget
? TX_QCHECK_PERIOD
: 2));
2517 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2518 * @adapter: the adapter
2519 * @qid: the SGE Queue ID
2520 * @qtype: the SGE Queue Type (Egress or Ingress)
2521 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2523 * Returns the BAR2 address for the SGE Queue Registers associated with
2524 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2525 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2526 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2527 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2529 static void __iomem
*bar2_address(struct adapter
*adapter
,
2531 enum t4_bar2_qtype qtype
,
2532 unsigned int *pbar2_qid
)
2537 ret
= t4_bar2_sge_qregs(adapter
, qid
, qtype
, 0,
2538 &bar2_qoffset
, pbar2_qid
);
2542 return adapter
->bar2
+ bar2_qoffset
;
2545 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
2546 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
2548 int t4_sge_alloc_rxq(struct adapter
*adap
, struct sge_rspq
*iq
, bool fwevtq
,
2549 struct net_device
*dev
, int intr_idx
,
2550 struct sge_fl
*fl
, rspq_handler_t hnd
,
2551 rspq_flush_handler_t flush_hnd
, int cong
)
2555 struct sge
*s
= &adap
->sge
;
2556 struct port_info
*pi
= netdev_priv(dev
);
2558 /* Size needs to be multiple of 16, including status entry. */
2559 iq
->size
= roundup(iq
->size
, 16);
2561 iq
->desc
= alloc_ring(adap
->pdev_dev
, iq
->size
, iq
->iqe_len
, 0,
2562 &iq
->phys_addr
, NULL
, 0,
2563 dev_to_node(adap
->pdev_dev
));
2567 memset(&c
, 0, sizeof(c
));
2568 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_IQ_CMD
) | FW_CMD_REQUEST_F
|
2569 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2570 FW_IQ_CMD_PFN_V(adap
->pf
) | FW_IQ_CMD_VFN_V(0));
2571 c
.alloc_to_len16
= htonl(FW_IQ_CMD_ALLOC_F
| FW_IQ_CMD_IQSTART_F
|
2573 c
.type_to_iqandstindex
= htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP
) |
2574 FW_IQ_CMD_IQASYNCH_V(fwevtq
) | FW_IQ_CMD_VIID_V(pi
->viid
) |
2575 FW_IQ_CMD_IQANDST_V(intr_idx
< 0) |
2576 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X
) |
2577 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx
>= 0 ? intr_idx
:
2579 c
.iqdroprss_to_iqesize
= htons(FW_IQ_CMD_IQPCIECH_V(pi
->tx_chan
) |
2580 FW_IQ_CMD_IQGTSMODE_F
|
2581 FW_IQ_CMD_IQINTCNTTHRESH_V(iq
->pktcnt_idx
) |
2582 FW_IQ_CMD_IQESIZE_V(ilog2(iq
->iqe_len
) - 4));
2583 c
.iqsize
= htons(iq
->size
);
2584 c
.iqaddr
= cpu_to_be64(iq
->phys_addr
);
2586 c
.iqns_to_fl0congen
= htonl(FW_IQ_CMD_IQFLINTCONGEN_F
);
2589 enum chip_type chip
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
2591 /* Allocate the ring for the hardware free list (with space
2592 * for its status page) along with the associated software
2593 * descriptor ring. The free list size needs to be a multiple
2594 * of the Egress Queue Unit and at least 2 Egress Units larger
2595 * than the SGE's Egress Congrestion Threshold
2596 * (fl_starve_thres - 1).
2598 if (fl
->size
< s
->fl_starve_thres
- 1 + 2 * 8)
2599 fl
->size
= s
->fl_starve_thres
- 1 + 2 * 8;
2600 fl
->size
= roundup(fl
->size
, 8);
2601 fl
->desc
= alloc_ring(adap
->pdev_dev
, fl
->size
, sizeof(__be64
),
2602 sizeof(struct rx_sw_desc
), &fl
->addr
,
2603 &fl
->sdesc
, s
->stat_len
,
2604 dev_to_node(adap
->pdev_dev
));
2608 flsz
= fl
->size
/ 8 + s
->stat_len
/ sizeof(struct tx_desc
);
2609 c
.iqns_to_fl0congen
|= htonl(FW_IQ_CMD_FL0PACKEN_F
|
2610 FW_IQ_CMD_FL0FETCHRO_F
|
2611 FW_IQ_CMD_FL0DATARO_F
|
2612 FW_IQ_CMD_FL0PADEN_F
);
2614 c
.iqns_to_fl0congen
|=
2615 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong
) |
2616 FW_IQ_CMD_FL0CONGCIF_F
|
2617 FW_IQ_CMD_FL0CONGEN_F
);
2618 /* In T6, for egress queue type FL there is internal overhead
2619 * of 16B for header going into FLM module. Hence the maximum
2620 * allowed burst size is 448 bytes. For T4/T5, the hardware
2621 * doesn't coalesce fetch requests if more than 64 bytes of
2622 * Free List pointers are provided, so we use a 128-byte Fetch
2623 * Burst Minimum there (T6 implements coalescing so we can use
2624 * the smaller 64-byte value there).
2626 c
.fl0dcaen_to_fl0cidxfthresh
=
2627 htons(FW_IQ_CMD_FL0FBMIN_V(chip
<= CHELSIO_T5
?
2628 FETCHBURSTMIN_128B_X
:
2629 FETCHBURSTMIN_64B_X
) |
2630 FW_IQ_CMD_FL0FBMAX_V((chip
<= CHELSIO_T5
) ?
2631 FETCHBURSTMAX_512B_X
:
2632 FETCHBURSTMAX_256B_X
));
2633 c
.fl0size
= htons(flsz
);
2634 c
.fl0addr
= cpu_to_be64(fl
->addr
);
2637 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
2641 netif_napi_add(dev
, &iq
->napi
, napi_rx_handler
, 64);
2642 iq
->cur_desc
= iq
->desc
;
2645 iq
->next_intr_params
= iq
->intr_params
;
2646 iq
->cntxt_id
= ntohs(c
.iqid
);
2647 iq
->abs_id
= ntohs(c
.physiqid
);
2648 iq
->bar2_addr
= bar2_address(adap
,
2650 T4_BAR2_QTYPE_INGRESS
,
2652 iq
->size
--; /* subtract status entry */
2655 iq
->flush_handler
= flush_hnd
;
2657 memset(&iq
->lro_mgr
, 0, sizeof(struct t4_lro_mgr
));
2658 skb_queue_head_init(&iq
->lro_mgr
.lroq
);
2660 /* set offset to -1 to distinguish ingress queues without FL */
2661 iq
->offset
= fl
? 0 : -1;
2663 adap
->sge
.ingr_map
[iq
->cntxt_id
- adap
->sge
.ingr_start
] = iq
;
2666 fl
->cntxt_id
= ntohs(c
.fl0id
);
2667 fl
->avail
= fl
->pend_cred
= 0;
2668 fl
->pidx
= fl
->cidx
= 0;
2669 fl
->alloc_failed
= fl
->large_alloc_failed
= fl
->starving
= 0;
2670 adap
->sge
.egr_map
[fl
->cntxt_id
- adap
->sge
.egr_start
] = fl
;
2672 /* Note, we must initialize the BAR2 Free List User Doorbell
2673 * information before refilling the Free List!
2675 fl
->bar2_addr
= bar2_address(adap
,
2677 T4_BAR2_QTYPE_EGRESS
,
2679 refill_fl(adap
, fl
, fl_cap(fl
), GFP_KERNEL
);
2682 /* For T5 and later we attempt to set up the Congestion Manager values
2683 * of the new RX Ethernet Queue. This should really be handled by
2684 * firmware because it's more complex than any host driver wants to
2685 * get involved with and it's different per chip and this is almost
2686 * certainly wrong. Firmware would be wrong as well, but it would be
2687 * a lot easier to fix in one place ... For now we do something very
2688 * simple (and hopefully less wrong).
2690 if (!is_t4(adap
->params
.chip
) && cong
>= 0) {
2691 u32 param
, val
, ch_map
= 0;
2693 u16 cng_ch_bits_log
= adap
->params
.arch
.cng_ch_bits_log
;
2695 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
2696 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT
) |
2697 FW_PARAMS_PARAM_YZ_V(iq
->cntxt_id
));
2699 val
= CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X
);
2702 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X
);
2703 for (i
= 0; i
< 4; i
++) {
2704 if (cong
& (1 << i
))
2705 ch_map
|= 1 << (i
<< cng_ch_bits_log
);
2707 val
|= CONMCTXT_CNGCHMAP_V(ch_map
);
2709 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
2712 dev_warn(adap
->pdev_dev
, "Failed to set Congestion"
2713 " Manager Context for Ingress Queue %d: %d\n",
2714 iq
->cntxt_id
, -ret
);
2723 dma_free_coherent(adap
->pdev_dev
, iq
->size
* iq
->iqe_len
,
2724 iq
->desc
, iq
->phys_addr
);
2727 if (fl
&& fl
->desc
) {
2730 dma_free_coherent(adap
->pdev_dev
, flsz
* sizeof(struct tx_desc
),
2731 fl
->desc
, fl
->addr
);
2737 static void init_txq(struct adapter
*adap
, struct sge_txq
*q
, unsigned int id
)
2740 q
->bar2_addr
= bar2_address(adap
,
2742 T4_BAR2_QTYPE_EGRESS
,
2745 q
->cidx
= q
->pidx
= 0;
2746 q
->stops
= q
->restarts
= 0;
2747 q
->stat
= (void *)&q
->desc
[q
->size
];
2748 spin_lock_init(&q
->db_lock
);
2749 adap
->sge
.egr_map
[id
- adap
->sge
.egr_start
] = q
;
2752 int t4_sge_alloc_eth_txq(struct adapter
*adap
, struct sge_eth_txq
*txq
,
2753 struct net_device
*dev
, struct netdev_queue
*netdevq
,
2757 struct fw_eq_eth_cmd c
;
2758 struct sge
*s
= &adap
->sge
;
2759 struct port_info
*pi
= netdev_priv(dev
);
2761 /* Add status entries */
2762 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
2764 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
2765 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
2766 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
,
2767 netdev_queue_numa_node_read(netdevq
));
2771 memset(&c
, 0, sizeof(c
));
2772 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD
) | FW_CMD_REQUEST_F
|
2773 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2774 FW_EQ_ETH_CMD_PFN_V(adap
->pf
) |
2775 FW_EQ_ETH_CMD_VFN_V(0));
2776 c
.alloc_to_len16
= htonl(FW_EQ_ETH_CMD_ALLOC_F
|
2777 FW_EQ_ETH_CMD_EQSTART_F
| FW_LEN16(c
));
2778 c
.viid_pkd
= htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F
|
2779 FW_EQ_ETH_CMD_VIID_V(pi
->viid
));
2780 c
.fetchszm_to_iqid
=
2781 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
2782 FW_EQ_ETH_CMD_PCIECHN_V(pi
->tx_chan
) |
2783 FW_EQ_ETH_CMD_FETCHRO_F
| FW_EQ_ETH_CMD_IQID_V(iqid
));
2785 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X
) |
2786 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
2787 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
2788 FW_EQ_ETH_CMD_EQSIZE_V(nentries
));
2789 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2791 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
2793 kfree(txq
->q
.sdesc
);
2794 txq
->q
.sdesc
= NULL
;
2795 dma_free_coherent(adap
->pdev_dev
,
2796 nentries
* sizeof(struct tx_desc
),
2797 txq
->q
.desc
, txq
->q
.phys_addr
);
2802 init_txq(adap
, &txq
->q
, FW_EQ_ETH_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
2804 txq
->tso
= txq
->tx_cso
= txq
->vlan_ins
= 0;
2805 txq
->mapping_err
= 0;
2809 int t4_sge_alloc_ctrl_txq(struct adapter
*adap
, struct sge_ctrl_txq
*txq
,
2810 struct net_device
*dev
, unsigned int iqid
,
2811 unsigned int cmplqid
)
2814 struct fw_eq_ctrl_cmd c
;
2815 struct sge
*s
= &adap
->sge
;
2816 struct port_info
*pi
= netdev_priv(dev
);
2818 /* Add status entries */
2819 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
2821 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, nentries
,
2822 sizeof(struct tx_desc
), 0, &txq
->q
.phys_addr
,
2823 NULL
, 0, dev_to_node(adap
->pdev_dev
));
2827 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD
) | FW_CMD_REQUEST_F
|
2828 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2829 FW_EQ_CTRL_CMD_PFN_V(adap
->pf
) |
2830 FW_EQ_CTRL_CMD_VFN_V(0));
2831 c
.alloc_to_len16
= htonl(FW_EQ_CTRL_CMD_ALLOC_F
|
2832 FW_EQ_CTRL_CMD_EQSTART_F
| FW_LEN16(c
));
2833 c
.cmpliqid_eqid
= htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid
));
2834 c
.physeqid_pkd
= htonl(0);
2835 c
.fetchszm_to_iqid
=
2836 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
2837 FW_EQ_CTRL_CMD_PCIECHN_V(pi
->tx_chan
) |
2838 FW_EQ_CTRL_CMD_FETCHRO_F
| FW_EQ_CTRL_CMD_IQID_V(iqid
));
2840 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X
) |
2841 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
2842 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
2843 FW_EQ_CTRL_CMD_EQSIZE_V(nentries
));
2844 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2846 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
2848 dma_free_coherent(adap
->pdev_dev
,
2849 nentries
* sizeof(struct tx_desc
),
2850 txq
->q
.desc
, txq
->q
.phys_addr
);
2855 init_txq(adap
, &txq
->q
, FW_EQ_CTRL_CMD_EQID_G(ntohl(c
.cmpliqid_eqid
)));
2857 skb_queue_head_init(&txq
->sendq
);
2858 tasklet_init(&txq
->qresume_tsk
, restart_ctrlq
, (unsigned long)txq
);
2863 int t4_sge_alloc_ofld_txq(struct adapter
*adap
, struct sge_ofld_txq
*txq
,
2864 struct net_device
*dev
, unsigned int iqid
)
2867 struct fw_eq_ofld_cmd c
;
2868 struct sge
*s
= &adap
->sge
;
2869 struct port_info
*pi
= netdev_priv(dev
);
2871 /* Add status entries */
2872 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
2874 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
2875 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
2876 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
,
2881 memset(&c
, 0, sizeof(c
));
2882 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD
) | FW_CMD_REQUEST_F
|
2883 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
2884 FW_EQ_OFLD_CMD_PFN_V(adap
->pf
) |
2885 FW_EQ_OFLD_CMD_VFN_V(0));
2886 c
.alloc_to_len16
= htonl(FW_EQ_OFLD_CMD_ALLOC_F
|
2887 FW_EQ_OFLD_CMD_EQSTART_F
| FW_LEN16(c
));
2888 c
.fetchszm_to_iqid
=
2889 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
2890 FW_EQ_OFLD_CMD_PCIECHN_V(pi
->tx_chan
) |
2891 FW_EQ_OFLD_CMD_FETCHRO_F
| FW_EQ_OFLD_CMD_IQID_V(iqid
));
2893 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X
) |
2894 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
2895 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
2896 FW_EQ_OFLD_CMD_EQSIZE_V(nentries
));
2897 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
2899 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
2901 kfree(txq
->q
.sdesc
);
2902 txq
->q
.sdesc
= NULL
;
2903 dma_free_coherent(adap
->pdev_dev
,
2904 nentries
* sizeof(struct tx_desc
),
2905 txq
->q
.desc
, txq
->q
.phys_addr
);
2910 init_txq(adap
, &txq
->q
, FW_EQ_OFLD_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
2912 skb_queue_head_init(&txq
->sendq
);
2913 tasklet_init(&txq
->qresume_tsk
, restart_ofldq
, (unsigned long)txq
);
2915 txq
->mapping_err
= 0;
2919 static void free_txq(struct adapter
*adap
, struct sge_txq
*q
)
2921 struct sge
*s
= &adap
->sge
;
2923 dma_free_coherent(adap
->pdev_dev
,
2924 q
->size
* sizeof(struct tx_desc
) + s
->stat_len
,
2925 q
->desc
, q
->phys_addr
);
2931 static void free_rspq_fl(struct adapter
*adap
, struct sge_rspq
*rq
,
2934 struct sge
*s
= &adap
->sge
;
2935 unsigned int fl_id
= fl
? fl
->cntxt_id
: 0xffff;
2937 adap
->sge
.ingr_map
[rq
->cntxt_id
- adap
->sge
.ingr_start
] = NULL
;
2938 t4_iq_free(adap
, adap
->mbox
, adap
->pf
, 0, FW_IQ_TYPE_FL_INT_CAP
,
2939 rq
->cntxt_id
, fl_id
, 0xffff);
2940 dma_free_coherent(adap
->pdev_dev
, (rq
->size
+ 1) * rq
->iqe_len
,
2941 rq
->desc
, rq
->phys_addr
);
2942 napi_hash_del(&rq
->napi
);
2943 netif_napi_del(&rq
->napi
);
2945 rq
->cntxt_id
= rq
->abs_id
= 0;
2949 free_rx_bufs(adap
, fl
, fl
->avail
);
2950 dma_free_coherent(adap
->pdev_dev
, fl
->size
* 8 + s
->stat_len
,
2951 fl
->desc
, fl
->addr
);
2960 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
2961 * @adap: the adapter
2962 * @n: number of queues
2963 * @q: pointer to first queue
2965 * Release the resources of a consecutive block of offload Rx queues.
2967 void t4_free_ofld_rxqs(struct adapter
*adap
, int n
, struct sge_ofld_rxq
*q
)
2969 for ( ; n
; n
--, q
++)
2971 free_rspq_fl(adap
, &q
->rspq
,
2972 q
->fl
.size
? &q
->fl
: NULL
);
2976 * t4_free_sge_resources - free SGE resources
2977 * @adap: the adapter
2979 * Frees resources used by the SGE queue sets.
2981 void t4_free_sge_resources(struct adapter
*adap
)
2984 struct sge_eth_rxq
*eq
;
2985 struct sge_eth_txq
*etq
;
2987 /* stop all Rx queues in order to start them draining */
2988 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++) {
2989 eq
= &adap
->sge
.ethrxq
[i
];
2991 t4_iq_stop(adap
, adap
->mbox
, adap
->pf
, 0,
2992 FW_IQ_TYPE_FL_INT_CAP
,
2994 eq
->fl
.size
? eq
->fl
.cntxt_id
: 0xffff,
2998 /* clean up Ethernet Tx/Rx queues */
2999 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++) {
3000 eq
= &adap
->sge
.ethrxq
[i
];
3002 free_rspq_fl(adap
, &eq
->rspq
,
3003 eq
->fl
.size
? &eq
->fl
: NULL
);
3005 etq
= &adap
->sge
.ethtxq
[i
];
3007 t4_eth_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
3009 free_tx_desc(adap
, &etq
->q
, etq
->q
.in_use
, true);
3010 kfree(etq
->q
.sdesc
);
3011 free_txq(adap
, &etq
->q
);
3015 /* clean up RDMA and iSCSI Rx queues */
3016 t4_free_ofld_rxqs(adap
, adap
->sge
.iscsiqsets
, adap
->sge
.iscsirxq
);
3017 t4_free_ofld_rxqs(adap
, adap
->sge
.niscsitq
, adap
->sge
.iscsitrxq
);
3018 t4_free_ofld_rxqs(adap
, adap
->sge
.rdmaqs
, adap
->sge
.rdmarxq
);
3019 t4_free_ofld_rxqs(adap
, adap
->sge
.rdmaciqs
, adap
->sge
.rdmaciq
);
3021 /* clean up offload Tx queues */
3022 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ofldtxq
); i
++) {
3023 struct sge_ofld_txq
*q
= &adap
->sge
.ofldtxq
[i
];
3026 tasklet_kill(&q
->qresume_tsk
);
3027 t4_ofld_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
3029 free_tx_desc(adap
, &q
->q
, q
->q
.in_use
, false);
3031 __skb_queue_purge(&q
->sendq
);
3032 free_txq(adap
, &q
->q
);
3036 /* clean up control Tx queues */
3037 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ctrlq
); i
++) {
3038 struct sge_ctrl_txq
*cq
= &adap
->sge
.ctrlq
[i
];
3041 tasklet_kill(&cq
->qresume_tsk
);
3042 t4_ctrl_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
3044 __skb_queue_purge(&cq
->sendq
);
3045 free_txq(adap
, &cq
->q
);
3049 if (adap
->sge
.fw_evtq
.desc
)
3050 free_rspq_fl(adap
, &adap
->sge
.fw_evtq
, NULL
);
3052 if (adap
->sge
.intrq
.desc
)
3053 free_rspq_fl(adap
, &adap
->sge
.intrq
, NULL
);
3055 /* clear the reverse egress queue map */
3056 memset(adap
->sge
.egr_map
, 0,
3057 adap
->sge
.egr_sz
* sizeof(*adap
->sge
.egr_map
));
3060 void t4_sge_start(struct adapter
*adap
)
3062 adap
->sge
.ethtxq_rover
= 0;
3063 mod_timer(&adap
->sge
.rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
3064 mod_timer(&adap
->sge
.tx_timer
, jiffies
+ TX_QCHECK_PERIOD
);
3068 * t4_sge_stop - disable SGE operation
3069 * @adap: the adapter
3071 * Stop tasklets and timers associated with the DMA engine. Note that
3072 * this is effective only if measures have been taken to disable any HW
3073 * events that may restart them.
3075 void t4_sge_stop(struct adapter
*adap
)
3078 struct sge
*s
= &adap
->sge
;
3080 if (in_interrupt()) /* actions below require waiting */
3083 if (s
->rx_timer
.function
)
3084 del_timer_sync(&s
->rx_timer
);
3085 if (s
->tx_timer
.function
)
3086 del_timer_sync(&s
->tx_timer
);
3088 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++) {
3089 struct sge_ofld_txq
*q
= &s
->ofldtxq
[i
];
3092 tasklet_kill(&q
->qresume_tsk
);
3094 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++) {
3095 struct sge_ctrl_txq
*cq
= &s
->ctrlq
[i
];
3098 tasklet_kill(&cq
->qresume_tsk
);
3103 * t4_sge_init_soft - grab core SGE values needed by SGE code
3104 * @adap: the adapter
3106 * We need to grab the SGE operating parameters that we need to have
3107 * in order to do our job and make sure we can live with them.
3110 static int t4_sge_init_soft(struct adapter
*adap
)
3112 struct sge
*s
= &adap
->sge
;
3113 u32 fl_small_pg
, fl_large_pg
, fl_small_mtu
, fl_large_mtu
;
3114 u32 timer_value_0_and_1
, timer_value_2_and_3
, timer_value_4_and_5
;
3115 u32 ingress_rx_threshold
;
3118 * Verify that CPL messages are going to the Ingress Queue for
3119 * process_responses() and that only packet data is going to the
3122 if ((t4_read_reg(adap
, SGE_CONTROL_A
) & RXPKTCPLMODE_F
) !=
3123 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X
)) {
3124 dev_err(adap
->pdev_dev
, "bad SGE CPL MODE\n");
3129 * Validate the Host Buffer Register Array indices that we want to
3132 * XXX Note that we should really read through the Host Buffer Size
3133 * XXX register array and find the indices of the Buffer Sizes which
3134 * XXX meet our needs!
3136 #define READ_FL_BUF(x) \
3137 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
3139 fl_small_pg
= READ_FL_BUF(RX_SMALL_PG_BUF
);
3140 fl_large_pg
= READ_FL_BUF(RX_LARGE_PG_BUF
);
3141 fl_small_mtu
= READ_FL_BUF(RX_SMALL_MTU_BUF
);
3142 fl_large_mtu
= READ_FL_BUF(RX_LARGE_MTU_BUF
);
3144 /* We only bother using the Large Page logic if the Large Page Buffer
3145 * is larger than our Page Size Buffer.
3147 if (fl_large_pg
<= fl_small_pg
)
3152 /* The Page Size Buffer must be exactly equal to our Page Size and the
3153 * Large Page Size Buffer should be 0 (per above) or a power of 2.
3155 if (fl_small_pg
!= PAGE_SIZE
||
3156 (fl_large_pg
& (fl_large_pg
-1)) != 0) {
3157 dev_err(adap
->pdev_dev
, "bad SGE FL page buffer sizes [%d, %d]\n",
3158 fl_small_pg
, fl_large_pg
);
3162 s
->fl_pg_order
= ilog2(fl_large_pg
) - PAGE_SHIFT
;
3164 if (fl_small_mtu
< FL_MTU_SMALL_BUFSIZE(adap
) ||
3165 fl_large_mtu
< FL_MTU_LARGE_BUFSIZE(adap
)) {
3166 dev_err(adap
->pdev_dev
, "bad SGE FL MTU sizes [%d, %d]\n",
3167 fl_small_mtu
, fl_large_mtu
);
3172 * Retrieve our RX interrupt holdoff timer values and counter
3173 * threshold values from the SGE parameters.
3175 timer_value_0_and_1
= t4_read_reg(adap
, SGE_TIMER_VALUE_0_AND_1_A
);
3176 timer_value_2_and_3
= t4_read_reg(adap
, SGE_TIMER_VALUE_2_AND_3_A
);
3177 timer_value_4_and_5
= t4_read_reg(adap
, SGE_TIMER_VALUE_4_AND_5_A
);
3178 s
->timer_val
[0] = core_ticks_to_us(adap
,
3179 TIMERVALUE0_G(timer_value_0_and_1
));
3180 s
->timer_val
[1] = core_ticks_to_us(adap
,
3181 TIMERVALUE1_G(timer_value_0_and_1
));
3182 s
->timer_val
[2] = core_ticks_to_us(adap
,
3183 TIMERVALUE2_G(timer_value_2_and_3
));
3184 s
->timer_val
[3] = core_ticks_to_us(adap
,
3185 TIMERVALUE3_G(timer_value_2_and_3
));
3186 s
->timer_val
[4] = core_ticks_to_us(adap
,
3187 TIMERVALUE4_G(timer_value_4_and_5
));
3188 s
->timer_val
[5] = core_ticks_to_us(adap
,
3189 TIMERVALUE5_G(timer_value_4_and_5
));
3191 ingress_rx_threshold
= t4_read_reg(adap
, SGE_INGRESS_RX_THRESHOLD_A
);
3192 s
->counter_val
[0] = THRESHOLD_0_G(ingress_rx_threshold
);
3193 s
->counter_val
[1] = THRESHOLD_1_G(ingress_rx_threshold
);
3194 s
->counter_val
[2] = THRESHOLD_2_G(ingress_rx_threshold
);
3195 s
->counter_val
[3] = THRESHOLD_3_G(ingress_rx_threshold
);
3201 * t4_sge_init - initialize SGE
3202 * @adap: the adapter
3204 * Perform low-level SGE code initialization needed every time after a
3207 int t4_sge_init(struct adapter
*adap
)
3209 struct sge
*s
= &adap
->sge
;
3210 u32 sge_control
, sge_conm_ctrl
;
3211 int ret
, egress_threshold
;
3214 * Ingress Padding Boundary and Egress Status Page Size are set up by
3215 * t4_fixup_host_params().
3217 sge_control
= t4_read_reg(adap
, SGE_CONTROL_A
);
3218 s
->pktshift
= PKTSHIFT_G(sge_control
);
3219 s
->stat_len
= (sge_control
& EGRSTATUSPAGESIZE_F
) ? 128 : 64;
3221 s
->fl_align
= t4_fl_pkt_align(adap
);
3222 ret
= t4_sge_init_soft(adap
);
3227 * A FL with <= fl_starve_thres buffers is starving and a periodic
3228 * timer will attempt to refill it. This needs to be larger than the
3229 * SGE's Egress Congestion Threshold. If it isn't, then we can get
3230 * stuck waiting for new packets while the SGE is waiting for us to
3231 * give it more Free List entries. (Note that the SGE's Egress
3232 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
3233 * there was only a single field to control this. For T5 there's the
3234 * original field which now only applies to Unpacked Mode Free List
3235 * buffers and a new field which only applies to Packed Mode Free List
3238 sge_conm_ctrl
= t4_read_reg(adap
, SGE_CONM_CTRL_A
);
3239 switch (CHELSIO_CHIP_VERSION(adap
->params
.chip
)) {
3241 egress_threshold
= EGRTHRESHOLD_G(sge_conm_ctrl
);
3244 egress_threshold
= EGRTHRESHOLDPACKING_G(sge_conm_ctrl
);
3247 egress_threshold
= T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl
);
3250 dev_err(adap
->pdev_dev
, "Unsupported Chip version %d\n",
3251 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3254 s
->fl_starve_thres
= 2*egress_threshold
+ 1;
3256 t4_idma_monitor_init(adap
, &s
->idma_monitor
);
3258 /* Set up timers used for recuring callbacks to process RX and TX
3259 * administrative tasks.
3261 setup_timer(&s
->rx_timer
, sge_rx_timer_cb
, (unsigned long)adap
);
3262 setup_timer(&s
->tx_timer
, sge_tx_timer_cb
, (unsigned long)adap
);
3264 spin_lock_init(&s
->intrq_lock
);