2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
47 #include <net/busy_poll.h>
48 #ifdef CONFIG_CHELSIO_T4_FCOE
49 #include <scsi/fc/fc_fcoe.h>
50 #endif /* CONFIG_CHELSIO_T4_FCOE */
53 #include "t4_values.h"
56 #include "cxgb4_ptp.h"
57 #include "cxgb4_uld.h"
58 #include "cxgb4_tc_mqprio.h"
62 * Rx buffer size. We use largish buffers if possible but settle for single
63 * pages under memory shortage.
66 # define FL_PG_ORDER 0
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
71 /* RX_PULL_LEN should be <= RX_COPY_THRES */
72 #define RX_COPY_THRES 256
73 #define RX_PULL_LEN 128
76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
79 #define RX_PKT_SKB_LEN 512
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
83 * freeing skbs isn't cheap and it happens while holding locks. We just need
84 * to free packets faster than they arrive, we eventually catch up and keep
85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
86 * also match the CIDX Flush Threshold.
88 #define MAX_TX_RECLAIM 32
91 * Max number of Rx buffers we replenish at a time. Again keep this modest,
92 * allocating buffers isn't cheap either.
94 #define MAX_RX_REFILL 16U
97 * Period of the Rx queue check timer. This timer is infrequent as it has
98 * something to do only when the system experiences severe memory shortage.
100 #define RX_QCHECK_PERIOD (HZ / 2)
103 * Period of the Tx queue check timer.
105 #define TX_QCHECK_PERIOD (HZ / 2)
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
110 #define MAX_TIMER_TX_RECLAIM 100
113 * Timer index used when backing off due to memory shortage.
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
119 * for a full sized WR.
121 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
127 #define MAX_IMM_TX_PKT_LEN 256
130 * Max size of a WR sent through a control Tx queue.
132 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
134 struct rx_sw_desc
{ /* SW state per Rx descriptor */
140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
142 * We could easily support more but there doesn't seem to be much need for
145 #define FL_MTU_SMALL 1500
146 #define FL_MTU_LARGE 9000
148 static inline unsigned int fl_mtu_bufsize(struct adapter
*adapter
,
151 struct sge
*s
= &adapter
->sge
;
153 return ALIGN(s
->pktshift
+ ETH_HLEN
+ VLAN_HLEN
+ mtu
, s
->fl_align
);
156 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
157 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
161 * these to specify the buffer size as an index into the SGE Free List Buffer
162 * Size register array. We also use bit 4, when the buffer has been unmapped
163 * for DMA, but this is of course never sent to the hardware and is only used
164 * to prevent double unmappings. All of the above requires that the Free List
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
167 * Free List Buffer alignment is 32 bytes, this works out for us ...
170 RX_BUF_FLAGS
= 0x1f, /* bottom five bits are special */
171 RX_BUF_SIZE
= 0x0f, /* bottom three bits are for buf sizes */
172 RX_UNMAPPED_BUF
= 0x10, /* buffer is not mapped */
175 * XXX We shouldn't depend on being able to use these indices.
176 * XXX Especially when some other Master PF has initialized the
177 * XXX adapter or we use the Firmware Configuration File. We
178 * XXX should really search through the Host Buffer Size register
179 * XXX array for the appropriately sized buffer indices.
181 RX_SMALL_PG_BUF
= 0x0, /* small (PAGE_SIZE) page buffer */
182 RX_LARGE_PG_BUF
= 0x1, /* buffer large (FL_PG_ORDER) page buffer */
184 RX_SMALL_MTU_BUF
= 0x2, /* small MTU buffer */
185 RX_LARGE_MTU_BUF
= 0x3, /* large MTU buffer */
188 static int timer_pkt_quota
[] = {1, 1, 2, 3, 4, 5};
189 #define MIN_NAPI_WORK 1
191 static inline dma_addr_t
get_buf_addr(const struct rx_sw_desc
*d
)
193 return d
->dma_addr
& ~(dma_addr_t
)RX_BUF_FLAGS
;
196 static inline bool is_buf_mapped(const struct rx_sw_desc
*d
)
198 return !(d
->dma_addr
& RX_UNMAPPED_BUF
);
202 * txq_avail - return the number of available slots in a Tx queue
205 * Returns the number of descriptors in a Tx queue available to write new
208 static inline unsigned int txq_avail(const struct sge_txq
*q
)
210 return q
->size
- 1 - q
->in_use
;
214 * fl_cap - return the capacity of a free-buffer list
217 * Returns the capacity of a free-buffer list. The capacity is less than
218 * the size because one descriptor needs to be left unpopulated, otherwise
219 * HW will think the FL is empty.
221 static inline unsigned int fl_cap(const struct sge_fl
*fl
)
223 return fl
->size
- 8; /* 1 descriptor = 8 buffers */
227 * fl_starving - return whether a Free List is starving.
228 * @adapter: pointer to the adapter
231 * Tests specified Free List to see whether the number of buffers
232 * available to the hardware has falled below our "starvation"
235 static inline bool fl_starving(const struct adapter
*adapter
,
236 const struct sge_fl
*fl
)
238 const struct sge
*s
= &adapter
->sge
;
240 return fl
->avail
- fl
->pend_cred
<= s
->fl_starve_thres
;
243 int cxgb4_map_skb(struct device
*dev
, const struct sk_buff
*skb
,
246 const skb_frag_t
*fp
, *end
;
247 const struct skb_shared_info
*si
;
249 *addr
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
250 if (dma_mapping_error(dev
, *addr
))
253 si
= skb_shinfo(skb
);
254 end
= &si
->frags
[si
->nr_frags
];
256 for (fp
= si
->frags
; fp
< end
; fp
++) {
257 *++addr
= skb_frag_dma_map(dev
, fp
, 0, skb_frag_size(fp
),
259 if (dma_mapping_error(dev
, *addr
))
265 while (fp
-- > si
->frags
)
266 dma_unmap_page(dev
, *--addr
, skb_frag_size(fp
), DMA_TO_DEVICE
);
268 dma_unmap_single(dev
, addr
[-1], skb_headlen(skb
), DMA_TO_DEVICE
);
272 EXPORT_SYMBOL(cxgb4_map_skb
);
274 static void unmap_skb(struct device
*dev
, const struct sk_buff
*skb
,
275 const dma_addr_t
*addr
)
277 const skb_frag_t
*fp
, *end
;
278 const struct skb_shared_info
*si
;
280 dma_unmap_single(dev
, *addr
++, skb_headlen(skb
), DMA_TO_DEVICE
);
282 si
= skb_shinfo(skb
);
283 end
= &si
->frags
[si
->nr_frags
];
284 for (fp
= si
->frags
; fp
< end
; fp
++)
285 dma_unmap_page(dev
, *addr
++, skb_frag_size(fp
), DMA_TO_DEVICE
);
288 #ifdef CONFIG_NEED_DMA_MAP_STATE
290 * deferred_unmap_destructor - unmap a packet when it is freed
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
297 static void deferred_unmap_destructor(struct sk_buff
*skb
)
299 unmap_skb(skb
->dev
->dev
.parent
, skb
, (dma_addr_t
*)skb
->head
);
304 * free_tx_desc - reclaims Tx descriptors and their buffers
306 * @q: the Tx queue to reclaim descriptors from
307 * @n: the number of descriptors to reclaim
308 * @unmap: whether the buffers should be unmapped for DMA
310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
311 * Tx buffers. Called with the Tx queue lock held.
313 void free_tx_desc(struct adapter
*adap
, struct sge_txq
*q
,
314 unsigned int n
, bool unmap
)
316 unsigned int cidx
= q
->cidx
;
317 struct tx_sw_desc
*d
;
321 if (d
->skb
) { /* an SGL is present */
322 if (unmap
&& d
->addr
[0]) {
323 unmap_skb(adap
->pdev_dev
, d
->skb
, d
->addr
);
324 memset(d
->addr
, 0, sizeof(d
->addr
));
326 dev_consume_skb_any(d
->skb
);
330 if (++cidx
== q
->size
) {
339 * Return the number of reclaimable descriptors in a Tx queue.
341 static inline int reclaimable(const struct sge_txq
*q
)
343 int hw_cidx
= ntohs(READ_ONCE(q
->stat
->cidx
));
345 return hw_cidx
< 0 ? hw_cidx
+ q
->size
: hw_cidx
;
349 * reclaim_completed_tx - reclaims completed TX Descriptors
351 * @q: the Tx queue to reclaim completed descriptors from
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
353 * @unmap: whether the buffers should be unmapped for DMA
355 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
356 * and frees the associated buffers if possible. If @max == -1, then
357 * we'll use a defaiult maximum. Called with the TX Queue locked.
359 static inline int reclaim_completed_tx(struct adapter
*adap
, struct sge_txq
*q
,
360 int maxreclaim
, bool unmap
)
362 int reclaim
= reclaimable(q
);
366 * Limit the amount of clean up work we do at a time to keep
367 * the Tx lock hold time O(1).
370 maxreclaim
= MAX_TX_RECLAIM
;
371 if (reclaim
> maxreclaim
)
372 reclaim
= maxreclaim
;
374 free_tx_desc(adap
, q
, reclaim
, unmap
);
375 q
->in_use
-= reclaim
;
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
384 * @q: the Tx queue to reclaim completed descriptors from
385 * @unmap: whether the buffers should be unmapped for DMA
387 * Reclaims Tx descriptors that the SGE has indicated it has processed,
388 * and frees the associated buffers if possible. Called with the Tx
391 void cxgb4_reclaim_completed_tx(struct adapter
*adap
, struct sge_txq
*q
,
394 (void)reclaim_completed_tx(adap
, q
, -1, unmap
);
396 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx
);
398 static inline int get_buf_size(struct adapter
*adapter
,
399 const struct rx_sw_desc
*d
)
401 struct sge
*s
= &adapter
->sge
;
402 unsigned int rx_buf_size_idx
= d
->dma_addr
& RX_BUF_SIZE
;
405 switch (rx_buf_size_idx
) {
406 case RX_SMALL_PG_BUF
:
407 buf_size
= PAGE_SIZE
;
410 case RX_LARGE_PG_BUF
:
411 buf_size
= PAGE_SIZE
<< s
->fl_pg_order
;
414 case RX_SMALL_MTU_BUF
:
415 buf_size
= FL_MTU_SMALL_BUFSIZE(adapter
);
418 case RX_LARGE_MTU_BUF
:
419 buf_size
= FL_MTU_LARGE_BUFSIZE(adapter
);
430 * free_rx_bufs - free the Rx buffers on an SGE free list
432 * @q: the SGE free list to free buffers from
433 * @n: how many buffers to free
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
436 * buffers must be made inaccessible to HW before calling this function.
438 static void free_rx_bufs(struct adapter
*adap
, struct sge_fl
*q
, int n
)
441 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
443 if (is_buf_mapped(d
))
444 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
445 get_buf_size(adap
, d
),
449 if (++q
->cidx
== q
->size
)
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
458 * @q: the SGE free list
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
461 * buffer must be made inaccessible to HW before calling this function.
463 * This is similar to @free_rx_bufs above but does not free the buffer.
464 * Do note that the FL still loses any further access to the buffer.
466 static void unmap_rx_buf(struct adapter
*adap
, struct sge_fl
*q
)
468 struct rx_sw_desc
*d
= &q
->sdesc
[q
->cidx
];
470 if (is_buf_mapped(d
))
471 dma_unmap_page(adap
->pdev_dev
, get_buf_addr(d
),
472 get_buf_size(adap
, d
), PCI_DMA_FROMDEVICE
);
474 if (++q
->cidx
== q
->size
)
479 static inline void ring_fl_db(struct adapter
*adap
, struct sge_fl
*q
)
481 if (q
->pend_cred
>= 8) {
482 u32 val
= adap
->params
.arch
.sge_fl_db
;
484 if (is_t4(adap
->params
.chip
))
485 val
|= PIDX_V(q
->pend_cred
/ 8);
487 val
|= PIDX_T5_V(q
->pend_cred
/ 8);
489 /* Make sure all memory writes to the Free List queue are
490 * committed before we tell the hardware about them.
494 /* If we don't have access to the new User Doorbell (T5+), use
495 * the old doorbell mechanism; otherwise use the new BAR2
498 if (unlikely(q
->bar2_addr
== NULL
)) {
499 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
500 val
| QID_V(q
->cntxt_id
));
502 writel(val
| QID_V(q
->bar2_qid
),
503 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
505 /* This Write memory Barrier will force the write to
506 * the User Doorbell area to be flushed.
514 static inline void set_rx_sw_desc(struct rx_sw_desc
*sd
, struct page
*pg
,
518 sd
->dma_addr
= mapping
; /* includes size low bits */
522 * refill_fl - refill an SGE Rx buffer ring
524 * @q: the ring to refill
525 * @n: the number of new buffers to allocate
526 * @gfp: the gfp flags for the allocations
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
529 * allocated with the supplied gfp flags. The caller must assure that
530 * @n does not exceed the queue's capacity. If afterwards the queue is
531 * found critically low mark it as starving in the bitmap of starving FLs.
533 * Returns the number of buffers allocated.
535 static unsigned int refill_fl(struct adapter
*adap
, struct sge_fl
*q
, int n
,
538 struct sge
*s
= &adap
->sge
;
541 unsigned int cred
= q
->avail
;
542 __be64
*d
= &q
->desc
[q
->pidx
];
543 struct rx_sw_desc
*sd
= &q
->sdesc
[q
->pidx
];
546 #ifdef CONFIG_DEBUG_FS
547 if (test_bit(q
->cntxt_id
- adap
->sge
.egr_start
, adap
->sge
.blocked_fl
))
552 node
= dev_to_node(adap
->pdev_dev
);
554 if (s
->fl_pg_order
== 0)
555 goto alloc_small_pages
;
558 * Prefer large buffers
561 pg
= alloc_pages_node(node
, gfp
| __GFP_COMP
, s
->fl_pg_order
);
563 q
->large_alloc_failed
++;
564 break; /* fall back to single pages */
567 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0,
568 PAGE_SIZE
<< s
->fl_pg_order
,
570 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
571 __free_pages(pg
, s
->fl_pg_order
);
573 goto out
; /* do not try small pages for this error */
575 mapping
|= RX_LARGE_PG_BUF
;
576 *d
++ = cpu_to_be64(mapping
);
578 set_rx_sw_desc(sd
, pg
, mapping
);
582 if (++q
->pidx
== q
->size
) {
592 pg
= alloc_pages_node(node
, gfp
, 0);
598 mapping
= dma_map_page(adap
->pdev_dev
, pg
, 0, PAGE_SIZE
,
600 if (unlikely(dma_mapping_error(adap
->pdev_dev
, mapping
))) {
605 *d
++ = cpu_to_be64(mapping
);
607 set_rx_sw_desc(sd
, pg
, mapping
);
611 if (++q
->pidx
== q
->size
) {
618 out
: cred
= q
->avail
- cred
;
619 q
->pend_cred
+= cred
;
622 if (unlikely(fl_starving(adap
, q
))) {
625 set_bit(q
->cntxt_id
- adap
->sge
.egr_start
,
626 adap
->sge
.starving_fl
);
632 static inline void __refill_fl(struct adapter
*adap
, struct sge_fl
*fl
)
634 refill_fl(adap
, fl
, min(MAX_RX_REFILL
, fl_cap(fl
) - fl
->avail
),
639 * alloc_ring - allocate resources for an SGE descriptor ring
640 * @dev: the PCI device's core device
641 * @nelem: the number of descriptors
642 * @elem_size: the size of each descriptor
643 * @sw_size: the size of the SW state associated with each ring element
644 * @phys: the physical address of the allocated ring
645 * @metadata: address of the array holding the SW state for the ring
646 * @stat_size: extra space in HW ring for status information
647 * @node: preferred node for memory allocations
649 * Allocates resources for an SGE descriptor ring, such as Tx queues,
650 * free buffer lists, or response queues. Each SGE ring requires
651 * space for its HW descriptors plus, optionally, space for the SW state
652 * associated with each HW entry (the metadata). The function returns
653 * three values: the virtual address for the HW ring (the return value
654 * of the function), the bus address of the HW ring, and the address
657 static void *alloc_ring(struct device
*dev
, size_t nelem
, size_t elem_size
,
658 size_t sw_size
, dma_addr_t
*phys
, void *metadata
,
659 size_t stat_size
, int node
)
661 size_t len
= nelem
* elem_size
+ stat_size
;
663 void *p
= dma_alloc_coherent(dev
, len
, phys
, GFP_KERNEL
);
668 s
= kcalloc_node(sw_size
, nelem
, GFP_KERNEL
, node
);
671 dma_free_coherent(dev
, len
, p
, *phys
);
676 *(void **)metadata
= s
;
681 * sgl_len - calculates the size of an SGL of the given capacity
682 * @n: the number of SGL entries
684 * Calculates the number of flits needed for a scatter/gather list that
685 * can hold the given number of entries.
687 static inline unsigned int sgl_len(unsigned int n
)
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
691 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
692 * repeated sequences of { Length[i], Length[i+1], Address[i],
693 * Address[i+1] } (this ensures that all addresses are on 64-bit
694 * boundaries). If N is even, then Length[N+1] should be set to 0 and
695 * Address[N+1] is omitted.
697 * The following calculation incorporates all of the above. It's
698 * somewhat hard to follow but, briefly: the "+2" accounts for the
699 * first two flits which include the DSGL header, Length0 and
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if
706 return (3 * n
) / 2 + (n
& 1) + 2;
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
711 * @n: the number of flits
713 * Returns the number of Tx descriptors needed for the supplied number
716 static inline unsigned int flits_to_desc(unsigned int n
)
718 BUG_ON(n
> SGE_MAX_WR_LEN
/ 8);
719 return DIV_ROUND_UP(n
, 8);
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
725 * @chip_ver: chip version
727 * Returns whether an Ethernet packet is small enough to fit as
728 * immediate data. Return value corresponds to headroom required.
730 static inline int is_eth_imm(const struct sk_buff
*skb
, unsigned int chip_ver
)
734 if (skb
->encapsulation
&& skb_shinfo(skb
)->gso_size
&&
735 chip_ver
> CHELSIO_T5
) {
736 hdrlen
= sizeof(struct cpl_tx_tnl_lso
);
737 hdrlen
+= sizeof(struct cpl_tx_pkt_core
);
738 } else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
741 hdrlen
= skb_shinfo(skb
)->gso_size
?
742 sizeof(struct cpl_tx_pkt_lso_core
) : 0;
743 hdrlen
+= sizeof(struct cpl_tx_pkt
);
745 if (skb
->len
<= MAX_IMM_TX_PKT_LEN
- hdrlen
)
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
753 * @chip_ver: chip version
755 * Returns the number of flits needed for a Tx WR for the given Ethernet
756 * packet, including the needed WR and CPL headers.
758 static inline unsigned int calc_tx_flits(const struct sk_buff
*skb
,
759 unsigned int chip_ver
)
762 int hdrlen
= is_eth_imm(skb
, chip_ver
);
764 /* If the skb is small enough, we can pump it out as a work request
765 * with only immediate data. In that case we just have to have the
766 * TX Packet header plus the skb data in the Work Request.
770 return DIV_ROUND_UP(skb
->len
+ hdrlen
, sizeof(__be64
));
772 /* Otherwise, we're going to have to construct a Scatter gather list
773 * of the skb body and fragments. We also include the flits necessary
774 * for the TX Packet Work Request and CPL. We always have a firmware
775 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
777 * message or, if we're doing a Large Send Offload, an LSO CPL message
778 * with an embedded TX Packet Write CPL message.
780 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
781 if (skb_shinfo(skb
)->gso_size
) {
782 if (skb
->encapsulation
&& chip_ver
> CHELSIO_T5
) {
783 hdrlen
= sizeof(struct fw_eth_tx_pkt_wr
) +
784 sizeof(struct cpl_tx_tnl_lso
);
785 } else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
788 pkt_hdrlen
= eth_get_headlen(skb
->dev
, skb
->data
,
790 hdrlen
= sizeof(struct fw_eth_tx_eo_wr
) +
791 round_up(pkt_hdrlen
, 16);
793 hdrlen
= sizeof(struct fw_eth_tx_pkt_wr
) +
794 sizeof(struct cpl_tx_pkt_lso_core
);
797 hdrlen
+= sizeof(struct cpl_tx_pkt_core
);
798 flits
+= (hdrlen
/ sizeof(__be64
));
800 flits
+= (sizeof(struct fw_eth_tx_pkt_wr
) +
801 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
807 * calc_tx_descs - calculate the number of Tx descriptors for a packet
809 * @chip_ver: chip version
811 * Returns the number of Tx descriptors needed for the given Ethernet
812 * packet, including the needed WR and CPL headers.
814 static inline unsigned int calc_tx_descs(const struct sk_buff
*skb
,
815 unsigned int chip_ver
)
817 return flits_to_desc(calc_tx_flits(skb
, chip_ver
));
821 * cxgb4_write_sgl - populate a scatter/gather list for a packet
823 * @q: the Tx queue we are writing into
824 * @sgl: starting location for writing the SGL
825 * @end: points right after the end of the SGL
826 * @start: start offset into skb main-body data to include in the SGL
827 * @addr: the list of bus addresses for the SGL elements
829 * Generates a gather list for the buffers that make up a packet.
830 * The caller must provide adequate space for the SGL that will be written.
831 * The SGL includes all of the packet's page fragments and the data in its
832 * main body except for the first @start bytes. @sgl must be 16-byte
833 * aligned and within a Tx descriptor with available space. @end points
834 * right after the end of the SGL but does not account for any potential
835 * wrap around, i.e., @end > @sgl.
837 void cxgb4_write_sgl(const struct sk_buff
*skb
, struct sge_txq
*q
,
838 struct ulptx_sgl
*sgl
, u64
*end
, unsigned int start
,
839 const dma_addr_t
*addr
)
842 struct ulptx_sge_pair
*to
;
843 const struct skb_shared_info
*si
= skb_shinfo(skb
);
844 unsigned int nfrags
= si
->nr_frags
;
845 struct ulptx_sge_pair buf
[MAX_SKB_FRAGS
/ 2 + 1];
847 len
= skb_headlen(skb
) - start
;
849 sgl
->len0
= htonl(len
);
850 sgl
->addr0
= cpu_to_be64(addr
[0] + start
);
853 sgl
->len0
= htonl(skb_frag_size(&si
->frags
[0]));
854 sgl
->addr0
= cpu_to_be64(addr
[1]);
857 sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
858 ULPTX_NSGE_V(nfrags
));
859 if (likely(--nfrags
== 0))
862 * Most of the complexity below deals with the possibility we hit the
863 * end of the queue in the middle of writing the SGL. For this case
864 * only we create the SGL in a temporary buffer and then copy it.
866 to
= (u8
*)end
> (u8
*)q
->stat
? buf
: sgl
->sge
;
868 for (i
= (nfrags
!= si
->nr_frags
); nfrags
>= 2; nfrags
-= 2, to
++) {
869 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
870 to
->len
[1] = cpu_to_be32(skb_frag_size(&si
->frags
[++i
]));
871 to
->addr
[0] = cpu_to_be64(addr
[i
]);
872 to
->addr
[1] = cpu_to_be64(addr
[++i
]);
875 to
->len
[0] = cpu_to_be32(skb_frag_size(&si
->frags
[i
]));
876 to
->len
[1] = cpu_to_be32(0);
877 to
->addr
[0] = cpu_to_be64(addr
[i
+ 1]);
879 if (unlikely((u8
*)end
> (u8
*)q
->stat
)) {
880 unsigned int part0
= (u8
*)q
->stat
- (u8
*)sgl
->sge
, part1
;
883 memcpy(sgl
->sge
, buf
, part0
);
884 part1
= (u8
*)end
- (u8
*)q
->stat
;
885 memcpy(q
->desc
, (u8
*)buf
+ part0
, part1
);
886 end
= (void *)q
->desc
+ part1
;
888 if ((uintptr_t)end
& 8) /* 0-pad to multiple of 16 */
891 EXPORT_SYMBOL(cxgb4_write_sgl
);
893 /* This function copies 64 byte coalesced work request to
894 * memory mapped BAR2 space. For coalesced WR SGE fetches
895 * data from the FIFO instead of from Host.
897 static void cxgb_pio_copy(u64 __iomem
*dst
, u64
*src
)
910 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
913 * @n: number of new descriptors to give to HW
915 * Ring the doorbel for a Tx queue.
917 inline void cxgb4_ring_tx_db(struct adapter
*adap
, struct sge_txq
*q
, int n
)
919 /* Make sure that all writes to the TX Descriptors are committed
920 * before we tell the hardware about them.
924 /* If we don't have access to the new User Doorbell (T5+), use the old
925 * doorbell mechanism; otherwise use the new BAR2 mechanism.
927 if (unlikely(q
->bar2_addr
== NULL
)) {
931 /* For T4 we need to participate in the Doorbell Recovery
934 spin_lock_irqsave(&q
->db_lock
, flags
);
936 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
937 QID_V(q
->cntxt_id
) | val
);
940 q
->db_pidx
= q
->pidx
;
941 spin_unlock_irqrestore(&q
->db_lock
, flags
);
943 u32 val
= PIDX_T5_V(n
);
945 /* T4 and later chips share the same PIDX field offset within
946 * the doorbell, but T5 and later shrank the field in order to
947 * gain a bit for Doorbell Priority. The field was absurdly
948 * large in the first place (14 bits) so we just use the T5
949 * and later limits and warn if a Queue ID is too large.
951 WARN_ON(val
& DBPRIO_F
);
953 /* If we're only writing a single TX Descriptor and we can use
954 * Inferred QID registers, we can use the Write Combining
955 * Gather Buffer; otherwise we use the simple doorbell.
957 if (n
== 1 && q
->bar2_qid
== 0) {
961 u64
*wr
= (u64
*)&q
->desc
[index
];
963 cxgb_pio_copy((u64 __iomem
*)
964 (q
->bar2_addr
+ SGE_UDB_WCDOORBELL
),
967 writel(val
| QID_V(q
->bar2_qid
),
968 q
->bar2_addr
+ SGE_UDB_KDOORBELL
);
971 /* This Write Memory Barrier will force the write to the User
972 * Doorbell area to be flushed. This is needed to prevent
973 * writes on different CPUs for the same queue from hitting
974 * the adapter out of order. This is required when some Work
975 * Requests take the Write Combine Gather Buffer path (user
976 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
977 * take the traditional path where we simply increment the
978 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
979 * hardware DMA read the actual Work Request.
984 EXPORT_SYMBOL(cxgb4_ring_tx_db
);
987 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
989 * @q: the Tx queue where the packet will be inlined
990 * @pos: starting position in the Tx queue where to inline the packet
992 * Inline a packet's contents directly into Tx descriptors, starting at
993 * the given position within the Tx DMA ring.
994 * Most of the complexity of this operation is dealing with wrap arounds
995 * in the middle of the packet we want to inline.
997 void cxgb4_inline_tx_skb(const struct sk_buff
*skb
,
998 const struct sge_txq
*q
, void *pos
)
1000 int left
= (void *)q
->stat
- pos
;
1003 if (likely(skb
->len
<= left
)) {
1004 if (likely(!skb
->data_len
))
1005 skb_copy_from_linear_data(skb
, pos
, skb
->len
);
1007 skb_copy_bits(skb
, 0, pos
, skb
->len
);
1010 skb_copy_bits(skb
, 0, pos
, left
);
1011 skb_copy_bits(skb
, left
, q
->desc
, skb
->len
- left
);
1012 pos
= (void *)q
->desc
+ (skb
->len
- left
);
1015 /* 0-pad to multiple of 16 */
1016 p
= PTR_ALIGN(pos
, 8);
1017 if ((uintptr_t)p
& 8)
1020 EXPORT_SYMBOL(cxgb4_inline_tx_skb
);
1022 static void *inline_tx_skb_header(const struct sk_buff
*skb
,
1023 const struct sge_txq
*q
, void *pos
,
1027 int left
= (void *)q
->stat
- pos
;
1029 if (likely(length
<= left
)) {
1030 memcpy(pos
, skb
->data
, length
);
1033 memcpy(pos
, skb
->data
, left
);
1034 memcpy(q
->desc
, skb
->data
+ left
, length
- left
);
1035 pos
= (void *)q
->desc
+ (length
- left
);
1037 /* 0-pad to multiple of 16 */
1038 p
= PTR_ALIGN(pos
, 8);
1039 if ((uintptr_t)p
& 8) {
1047 * Figure out what HW csum a packet wants and return the appropriate control
1050 static u64
hwcsum(enum chip_type chip
, const struct sk_buff
*skb
)
1053 bool inner_hdr_csum
= false;
1056 if (skb
->encapsulation
&&
1057 (CHELSIO_CHIP_VERSION(chip
) > CHELSIO_T5
))
1058 inner_hdr_csum
= true;
1060 if (inner_hdr_csum
) {
1061 ver
= inner_ip_hdr(skb
)->version
;
1062 proto
= (ver
== 4) ? inner_ip_hdr(skb
)->protocol
:
1063 inner_ipv6_hdr(skb
)->nexthdr
;
1065 ver
= ip_hdr(skb
)->version
;
1066 proto
= (ver
== 4) ? ip_hdr(skb
)->protocol
:
1067 ipv6_hdr(skb
)->nexthdr
;
1071 if (proto
== IPPROTO_TCP
)
1072 csum_type
= TX_CSUM_TCPIP
;
1073 else if (proto
== IPPROTO_UDP
)
1074 csum_type
= TX_CSUM_UDPIP
;
1077 * unknown protocol, disable HW csum
1078 * and hope a bad packet is detected
1080 return TXPKT_L4CSUM_DIS_F
;
1084 * this doesn't work with extension headers
1086 if (proto
== IPPROTO_TCP
)
1087 csum_type
= TX_CSUM_TCPIP6
;
1088 else if (proto
== IPPROTO_UDP
)
1089 csum_type
= TX_CSUM_UDPIP6
;
1094 if (likely(csum_type
>= TX_CSUM_TCPIP
)) {
1095 int eth_hdr_len
, l4_len
;
1098 if (inner_hdr_csum
) {
1099 /* This allows checksum offload for all encapsulated
1100 * packets like GRE etc..
1102 l4_len
= skb_inner_network_header_len(skb
);
1103 eth_hdr_len
= skb_inner_network_offset(skb
) - ETH_HLEN
;
1105 l4_len
= skb_network_header_len(skb
);
1106 eth_hdr_len
= skb_network_offset(skb
) - ETH_HLEN
;
1108 hdr_len
= TXPKT_IPHDR_LEN_V(l4_len
);
1110 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1111 hdr_len
|= TXPKT_ETHHDR_LEN_V(eth_hdr_len
);
1113 hdr_len
|= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len
);
1114 return TXPKT_CSUM_TYPE_V(csum_type
) | hdr_len
;
1116 int start
= skb_transport_offset(skb
);
1118 return TXPKT_CSUM_TYPE_V(csum_type
) |
1119 TXPKT_CSUM_START_V(start
) |
1120 TXPKT_CSUM_LOC_V(start
+ skb
->csum_offset
);
1124 static void eth_txq_stop(struct sge_eth_txq
*q
)
1126 netif_tx_stop_queue(q
->txq
);
1130 static inline void txq_advance(struct sge_txq
*q
, unsigned int n
)
1134 if (q
->pidx
>= q
->size
)
1138 #ifdef CONFIG_CHELSIO_T4_FCOE
1140 cxgb_fcoe_offload(struct sk_buff
*skb
, struct adapter
*adap
,
1141 const struct port_info
*pi
, u64
*cntrl
)
1143 const struct cxgb_fcoe
*fcoe
= &pi
->fcoe
;
1145 if (!(fcoe
->flags
& CXGB_FCOE_ENABLED
))
1148 if (skb
->protocol
!= htons(ETH_P_FCOE
))
1151 skb_reset_mac_header(skb
);
1152 skb
->mac_len
= sizeof(struct ethhdr
);
1154 skb_set_network_header(skb
, skb
->mac_len
);
1155 skb_set_transport_header(skb
, skb
->mac_len
+ sizeof(struct fcoe_hdr
));
1157 if (!cxgb_fcoe_sof_eof_supported(adap
, skb
))
1160 /* FC CRC offload */
1161 *cntrl
= TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE
) |
1162 TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
|
1163 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START
) |
1164 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END
) |
1165 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END
);
1168 #endif /* CONFIG_CHELSIO_T4_FCOE */
1170 /* Returns tunnel type if hardware supports offloading of the same.
1171 * It is called only for T5 and onwards.
1173 enum cpl_tx_tnl_lso_type
cxgb_encap_offload_supported(struct sk_buff
*skb
)
1176 enum cpl_tx_tnl_lso_type tnl_type
= TX_TNL_TYPE_OPAQUE
;
1177 struct port_info
*pi
= netdev_priv(skb
->dev
);
1178 struct adapter
*adapter
= pi
->adapter
;
1180 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
1181 skb
->inner_protocol
!= htons(ETH_P_TEB
))
1184 switch (vlan_get_protocol(skb
)) {
1185 case htons(ETH_P_IP
):
1186 l4_hdr
= ip_hdr(skb
)->protocol
;
1188 case htons(ETH_P_IPV6
):
1189 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
1197 if (adapter
->vxlan_port
== udp_hdr(skb
)->dest
)
1198 tnl_type
= TX_TNL_TYPE_VXLAN
;
1199 else if (adapter
->geneve_port
== udp_hdr(skb
)->dest
)
1200 tnl_type
= TX_TNL_TYPE_GENEVE
;
1209 static inline void t6_fill_tnl_lso(struct sk_buff
*skb
,
1210 struct cpl_tx_tnl_lso
*tnl_lso
,
1211 enum cpl_tx_tnl_lso_type tnl_type
)
1214 int in_eth_xtra_len
;
1215 int l3hdr_len
= skb_network_header_len(skb
);
1216 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1217 const struct skb_shared_info
*ssi
= skb_shinfo(skb
);
1218 bool v6
= (ip_hdr(skb
)->version
== 6);
1220 val
= CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO
) |
1221 CPL_TX_TNL_LSO_FIRST_F
|
1222 CPL_TX_TNL_LSO_LAST_F
|
1223 (v6
? CPL_TX_TNL_LSO_IPV6OUT_F
: 0) |
1224 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len
/ 4) |
1225 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len
/ 4) |
1226 (v6
? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F
) |
1227 CPL_TX_TNL_LSO_IPLENSETOUT_F
|
1228 (v6
? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F
);
1229 tnl_lso
->op_to_IpIdSplitOut
= htonl(val
);
1231 tnl_lso
->IpIdOffsetOut
= 0;
1233 /* Get the tunnel header length */
1234 val
= skb_inner_mac_header(skb
) - skb_mac_header(skb
);
1235 in_eth_xtra_len
= skb_inner_network_header(skb
) -
1236 skb_inner_mac_header(skb
) - ETH_HLEN
;
1239 case TX_TNL_TYPE_VXLAN
:
1240 case TX_TNL_TYPE_GENEVE
:
1241 tnl_lso
->UdpLenSetOut_to_TnlHdrLen
=
1242 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F
|
1243 CPL_TX_TNL_LSO_UDPLENSETOUT_F
);
1246 tnl_lso
->UdpLenSetOut_to_TnlHdrLen
= 0;
1250 tnl_lso
->UdpLenSetOut_to_TnlHdrLen
|=
1251 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val
) |
1252 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type
));
1256 val
= CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len
/ 4) |
1257 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb
)->version
== 6) |
1258 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb
) / 4) |
1259 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb
) / 4);
1260 tnl_lso
->Flow_to_TcpHdrLen
= htonl(val
);
1262 tnl_lso
->IpIdOffset
= htons(0);
1264 tnl_lso
->IpIdSplit_to_Mss
= htons(CPL_TX_TNL_LSO_MSS_V(ssi
->gso_size
));
1265 tnl_lso
->TCPSeqOffset
= htonl(0);
1266 tnl_lso
->EthLenOffset_Size
= htonl(CPL_TX_TNL_LSO_SIZE_V(skb
->len
));
1269 static inline void *write_tso_wr(struct adapter
*adap
, struct sk_buff
*skb
,
1270 struct cpl_tx_pkt_lso_core
*lso
)
1272 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1273 int l3hdr_len
= skb_network_header_len(skb
);
1274 const struct skb_shared_info
*ssi
;
1277 ssi
= skb_shinfo(skb
);
1278 if (ssi
->gso_type
& SKB_GSO_TCPV6
)
1281 lso
->lso_ctrl
= htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO
) |
1282 LSO_FIRST_SLICE_F
| LSO_LAST_SLICE_F
|
1284 LSO_ETHHDR_LEN_V(eth_xtra_len
/ 4) |
1285 LSO_IPHDR_LEN_V(l3hdr_len
/ 4) |
1286 LSO_TCPHDR_LEN_V(tcp_hdr(skb
)->doff
));
1287 lso
->ipid_ofst
= htons(0);
1288 lso
->mss
= htons(ssi
->gso_size
);
1289 lso
->seqno_offset
= htonl(0);
1290 if (is_t4(adap
->params
.chip
))
1291 lso
->len
= htonl(skb
->len
);
1293 lso
->len
= htonl(LSO_T5_XFER_SIZE_V(skb
->len
));
1295 return (void *)(lso
+ 1);
1299 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1300 * @adap: the adapter
1301 * @eq: the Ethernet TX Queue
1302 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1304 * We're typically called here to update the state of an Ethernet TX
1305 * Queue with respect to the hardware's progress in consuming the TX
1306 * Work Requests that we've put on that Egress Queue. This happens
1307 * when we get Egress Queue Update messages and also prophylactically
1308 * in regular timer-based Ethernet TX Queue maintenance.
1310 int t4_sge_eth_txq_egress_update(struct adapter
*adap
, struct sge_eth_txq
*eq
,
1313 unsigned int reclaimed
, hw_cidx
;
1314 struct sge_txq
*q
= &eq
->q
;
1317 if (!q
->in_use
|| !__netif_tx_trylock(eq
->txq
))
1320 /* Reclaim pending completed TX Descriptors. */
1321 reclaimed
= reclaim_completed_tx(adap
, &eq
->q
, maxreclaim
, true);
1323 hw_cidx
= ntohs(READ_ONCE(q
->stat
->cidx
));
1324 hw_in_use
= q
->pidx
- hw_cidx
;
1326 hw_in_use
+= q
->size
;
1328 /* If the TX Queue is currently stopped and there's now more than half
1329 * the queue available, restart it. Otherwise bail out since the rest
1330 * of what we want do here is with the possibility of shipping any
1331 * currently buffered Coalesced TX Work Request.
1333 if (netif_tx_queue_stopped(eq
->txq
) && hw_in_use
< (q
->size
/ 2)) {
1334 netif_tx_wake_queue(eq
->txq
);
1338 __netif_tx_unlock(eq
->txq
);
1342 static inline int cxgb4_validate_skb(struct sk_buff
*skb
,
1343 struct net_device
*dev
,
1348 /* The chip min packet length is 10 octets but some firmware
1349 * commands have a minimum packet length requirement. So, play
1350 * safe and reject anything shorter than @min_pkt_len.
1352 if (unlikely(skb
->len
< min_pkt_len
))
1355 /* Discard the packet if the length is greater than mtu */
1356 max_pkt_len
= ETH_HLEN
+ dev
->mtu
;
1358 if (skb_vlan_tagged(skb
))
1359 max_pkt_len
+= VLAN_HLEN
;
1361 if (!skb_shinfo(skb
)->gso_size
&& (unlikely(skb
->len
> max_pkt_len
)))
1367 static void *write_eo_udp_wr(struct sk_buff
*skb
, struct fw_eth_tx_eo_wr
*wr
,
1370 wr
->u
.udpseg
.type
= FW_ETH_TX_EO_TYPE_UDPSEG
;
1371 wr
->u
.udpseg
.ethlen
= skb_network_offset(skb
);
1372 wr
->u
.udpseg
.iplen
= cpu_to_be16(skb_network_header_len(skb
));
1373 wr
->u
.udpseg
.udplen
= sizeof(struct udphdr
);
1374 wr
->u
.udpseg
.rtplen
= 0;
1375 wr
->u
.udpseg
.r4
= 0;
1376 if (skb_shinfo(skb
)->gso_size
)
1377 wr
->u
.udpseg
.mss
= cpu_to_be16(skb_shinfo(skb
)->gso_size
);
1379 wr
->u
.udpseg
.mss
= cpu_to_be16(skb
->len
- hdr_len
);
1380 wr
->u
.udpseg
.schedpktsize
= wr
->u
.udpseg
.mss
;
1381 wr
->u
.udpseg
.plen
= cpu_to_be32(skb
->len
- hdr_len
);
1383 return (void *)(wr
+ 1);
1387 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1389 * @dev: the egress net device
1391 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1393 static netdev_tx_t
cxgb4_eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1395 enum cpl_tx_tnl_lso_type tnl_type
= TX_TNL_TYPE_OPAQUE
;
1396 bool ptp_enabled
= is_ptp_enabled(skb
, dev
);
1397 unsigned int last_desc
, flits
, ndesc
;
1398 u32 wr_mid
, ctrl0
, op
, sgl_off
= 0;
1399 const struct skb_shared_info
*ssi
;
1400 int len
, qidx
, credits
, ret
, left
;
1401 struct tx_sw_desc
*sgl_sdesc
;
1402 struct fw_eth_tx_eo_wr
*eowr
;
1403 struct fw_eth_tx_pkt_wr
*wr
;
1404 struct cpl_tx_pkt_core
*cpl
;
1405 const struct port_info
*pi
;
1406 bool immediate
= false;
1407 u64 cntrl
, *end
, *sgl
;
1408 struct sge_eth_txq
*q
;
1409 unsigned int chip_ver
;
1410 struct adapter
*adap
;
1412 ret
= cxgb4_validate_skb(skb
, dev
, ETH_HLEN
);
1416 pi
= netdev_priv(dev
);
1418 ssi
= skb_shinfo(skb
);
1419 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
1420 if (xfrm_offload(skb
) && !ssi
->gso_size
)
1421 return adap
->uld
[CXGB4_ULD_CRYPTO
].tx_handler(skb
, dev
);
1422 #endif /* CHELSIO_IPSEC_INLINE */
1424 #ifdef CONFIG_CHELSIO_TLS_DEVICE
1426 return adap
->uld
[CXGB4_ULD_CRYPTO
].tx_handler(skb
, dev
);
1427 #endif /* CHELSIO_TLS_DEVICE */
1429 qidx
= skb_get_queue_mapping(skb
);
1431 if (!(adap
->ptp_tx_skb
)) {
1432 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1433 adap
->ptp_tx_skb
= skb_get(skb
);
1437 q
= &adap
->sge
.ptptxq
;
1439 q
= &adap
->sge
.ethtxq
[qidx
+ pi
->first_qset
];
1441 skb_tx_timestamp(skb
);
1443 reclaim_completed_tx(adap
, &q
->q
, -1, true);
1444 cntrl
= TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
;
1446 #ifdef CONFIG_CHELSIO_T4_FCOE
1447 ret
= cxgb_fcoe_offload(skb
, adap
, pi
, &cntrl
);
1448 if (unlikely(ret
== -EOPNOTSUPP
))
1450 #endif /* CONFIG_CHELSIO_T4_FCOE */
1452 chip_ver
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
1453 flits
= calc_tx_flits(skb
, chip_ver
);
1454 ndesc
= flits_to_desc(flits
);
1455 credits
= txq_avail(&q
->q
) - ndesc
;
1457 if (unlikely(credits
< 0)) {
1459 dev_err(adap
->pdev_dev
,
1460 "%s: Tx ring %u full while queue awake!\n",
1462 return NETDEV_TX_BUSY
;
1465 if (is_eth_imm(skb
, chip_ver
))
1468 if (skb
->encapsulation
&& chip_ver
> CHELSIO_T5
)
1469 tnl_type
= cxgb_encap_offload_supported(skb
);
1471 last_desc
= q
->q
.pidx
+ ndesc
- 1;
1472 if (last_desc
>= q
->q
.size
)
1473 last_desc
-= q
->q
.size
;
1474 sgl_sdesc
= &q
->q
.sdesc
[last_desc
];
1477 unlikely(cxgb4_map_skb(adap
->pdev_dev
, skb
, sgl_sdesc
->addr
) < 0)) {
1478 memset(sgl_sdesc
->addr
, 0, sizeof(sgl_sdesc
->addr
));
1483 wr_mid
= FW_WR_LEN16_V(DIV_ROUND_UP(flits
, 2));
1484 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
1485 /* After we're done injecting the Work Request for this
1486 * packet, we'll be below our "stop threshold" so stop the TX
1487 * Queue now and schedule a request for an SGE Egress Queue
1488 * Update message. The queue will get started later on when
1489 * the firmware processes this Work Request and sends us an
1490 * Egress Queue Status Update message indicating that space
1494 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
1497 wr
= (void *)&q
->q
.desc
[q
->q
.pidx
];
1498 eowr
= (void *)&q
->q
.desc
[q
->q
.pidx
];
1499 wr
->equiq_to_len16
= htonl(wr_mid
);
1500 wr
->r3
= cpu_to_be64(0);
1501 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
)
1502 end
= (u64
*)eowr
+ flits
;
1504 end
= (u64
*)wr
+ flits
;
1506 len
= immediate
? skb
->len
: 0;
1507 len
+= sizeof(*cpl
);
1508 if (ssi
->gso_size
&& !(ssi
->gso_type
& SKB_GSO_UDP_L4
)) {
1509 struct cpl_tx_pkt_lso_core
*lso
= (void *)(wr
+ 1);
1510 struct cpl_tx_tnl_lso
*tnl_lso
= (void *)(wr
+ 1);
1513 len
+= sizeof(*tnl_lso
);
1515 len
+= sizeof(*lso
);
1517 wr
->op_immdlen
= htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR
) |
1518 FW_WR_IMMDLEN_V(len
));
1520 struct iphdr
*iph
= ip_hdr(skb
);
1522 t6_fill_tnl_lso(skb
, tnl_lso
, tnl_type
);
1523 cpl
= (void *)(tnl_lso
+ 1);
1524 /* Driver is expected to compute partial checksum that
1525 * does not include the IP Total Length.
1527 if (iph
->version
== 4) {
1530 iph
->check
= ~ip_fast_csum((u8
*)iph
, iph
->ihl
);
1532 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1533 cntrl
= hwcsum(adap
->params
.chip
, skb
);
1535 cpl
= write_tso_wr(adap
, skb
, lso
);
1536 cntrl
= hwcsum(adap
->params
.chip
, skb
);
1538 sgl
= (u64
*)(cpl
+ 1); /* sgl start here */
1540 q
->tx_cso
+= ssi
->gso_segs
;
1541 } else if (ssi
->gso_size
) {
1545 hdrlen
= eth_get_headlen(dev
, skb
->data
, skb_headlen(skb
));
1547 wr
->op_immdlen
= cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR
) |
1548 FW_ETH_TX_EO_WR_IMMDLEN_V(len
));
1549 cpl
= write_eo_udp_wr(skb
, eowr
, hdrlen
);
1550 cntrl
= hwcsum(adap
->params
.chip
, skb
);
1552 start
= (u64
*)(cpl
+ 1);
1553 sgl
= (u64
*)inline_tx_skb_header(skb
, &q
->q
, (void *)start
,
1555 if (unlikely(start
> sgl
)) {
1556 left
= (u8
*)end
- (u8
*)q
->q
.stat
;
1557 end
= (void *)q
->q
.desc
+ left
;
1561 q
->tx_cso
+= ssi
->gso_segs
;
1564 op
= FW_PTP_TX_PKT_WR
;
1566 op
= FW_ETH_TX_PKT_WR
;
1567 wr
->op_immdlen
= htonl(FW_WR_OP_V(op
) |
1568 FW_WR_IMMDLEN_V(len
));
1569 cpl
= (void *)(wr
+ 1);
1570 sgl
= (u64
*)(cpl
+ 1);
1571 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1572 cntrl
= hwcsum(adap
->params
.chip
, skb
) |
1578 if (unlikely((u8
*)sgl
>= (u8
*)q
->q
.stat
)) {
1579 /* If current position is already at the end of the
1580 * txq, reset the current to point to start of the queue
1581 * and update the end ptr as well.
1583 left
= (u8
*)end
- (u8
*)q
->q
.stat
;
1584 end
= (void *)q
->q
.desc
+ left
;
1585 sgl
= (void *)q
->q
.desc
;
1588 if (skb_vlan_tag_present(skb
)) {
1590 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
1591 #ifdef CONFIG_CHELSIO_T4_FCOE
1592 if (skb
->protocol
== htons(ETH_P_FCOE
))
1593 cntrl
|= TXPKT_VLAN_V(
1594 ((skb
->priority
& 0x7) << VLAN_PRIO_SHIFT
));
1595 #endif /* CONFIG_CHELSIO_T4_FCOE */
1598 ctrl0
= TXPKT_OPCODE_V(CPL_TX_PKT_XT
) | TXPKT_INTF_V(pi
->tx_chan
) |
1599 TXPKT_PF_V(adap
->pf
);
1601 ctrl0
|= TXPKT_TSTAMP_F
;
1602 #ifdef CONFIG_CHELSIO_T4_DCB
1603 if (is_t4(adap
->params
.chip
))
1604 ctrl0
|= TXPKT_OVLAN_IDX_V(q
->dcb_prio
);
1606 ctrl0
|= TXPKT_T5_OVLAN_IDX_V(q
->dcb_prio
);
1608 cpl
->ctrl0
= htonl(ctrl0
);
1609 cpl
->pack
= htons(0);
1610 cpl
->len
= htons(skb
->len
);
1611 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1614 cxgb4_inline_tx_skb(skb
, &q
->q
, sgl
);
1615 dev_consume_skb_any(skb
);
1617 cxgb4_write_sgl(skb
, &q
->q
, (void *)sgl
, end
, sgl_off
,
1620 sgl_sdesc
->skb
= skb
;
1623 txq_advance(&q
->q
, ndesc
);
1625 cxgb4_ring_tx_db(adap
, &q
->q
, ndesc
);
1626 return NETDEV_TX_OK
;
1629 dev_kfree_skb_any(skb
);
1630 return NETDEV_TX_OK
;
1635 /* Egress Queue sizes, producer and consumer indices are all in units
1636 * of Egress Context Units bytes. Note that as far as the hardware is
1637 * concerned, the free list is an Egress Queue (the host produces free
1638 * buffers which the hardware consumes) and free list entries are
1639 * 64-bit PCI DMA addresses.
1641 EQ_UNIT
= SGE_EQ_IDXSIZE
,
1642 FL_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
1643 TXD_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
1645 T4VF_ETHTXQ_MAX_HDR
= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
1646 sizeof(struct cpl_tx_pkt_lso_core
) +
1647 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
),
1651 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1654 * Returns whether an Ethernet packet is small enough to fit completely as
1657 static inline int t4vf_is_eth_imm(const struct sk_buff
*skb
)
1659 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1660 * which does not accommodate immediate data. We could dike out all
1661 * of the support code for immediate data but that would tie our hands
1662 * too much if we ever want to enhace the firmware. It would also
1663 * create more differences between the PF and VF Drivers.
1669 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1672 * Returns the number of flits needed for a TX Work Request for the
1673 * given Ethernet packet, including the needed WR and CPL headers.
1675 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff
*skb
)
1679 /* If the skb is small enough, we can pump it out as a work request
1680 * with only immediate data. In that case we just have to have the
1681 * TX Packet header plus the skb data in the Work Request.
1683 if (t4vf_is_eth_imm(skb
))
1684 return DIV_ROUND_UP(skb
->len
+ sizeof(struct cpl_tx_pkt
),
1687 /* Otherwise, we're going to have to construct a Scatter gather list
1688 * of the skb body and fragments. We also include the flits necessary
1689 * for the TX Packet Work Request and CPL. We always have a firmware
1690 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1691 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1692 * message or, if we're doing a Large Send Offload, an LSO CPL message
1693 * with an embedded TX Packet Write CPL message.
1695 flits
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
1696 if (skb_shinfo(skb
)->gso_size
)
1697 flits
+= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
1698 sizeof(struct cpl_tx_pkt_lso_core
) +
1699 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
1701 flits
+= (sizeof(struct fw_eth_tx_pkt_vm_wr
) +
1702 sizeof(struct cpl_tx_pkt_core
)) / sizeof(__be64
);
1707 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1709 * @dev: the egress net device
1711 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1713 static netdev_tx_t
cxgb4_vf_eth_xmit(struct sk_buff
*skb
,
1714 struct net_device
*dev
)
1716 unsigned int last_desc
, flits
, ndesc
;
1717 const struct skb_shared_info
*ssi
;
1718 struct fw_eth_tx_pkt_vm_wr
*wr
;
1719 struct tx_sw_desc
*sgl_sdesc
;
1720 struct cpl_tx_pkt_core
*cpl
;
1721 const struct port_info
*pi
;
1722 struct sge_eth_txq
*txq
;
1723 struct adapter
*adapter
;
1724 int qidx
, credits
, ret
;
1725 size_t fw_hdr_copy_len
;
1729 /* The chip minimum packet length is 10 octets but the firmware
1730 * command that we are using requires that we copy the Ethernet header
1731 * (including the VLAN tag) into the header so we reject anything
1732 * smaller than that ...
1734 fw_hdr_copy_len
= sizeof(wr
->ethmacdst
) + sizeof(wr
->ethmacsrc
) +
1735 sizeof(wr
->ethtype
) + sizeof(wr
->vlantci
);
1736 ret
= cxgb4_validate_skb(skb
, dev
, fw_hdr_copy_len
);
1740 /* Figure out which TX Queue we're going to use. */
1741 pi
= netdev_priv(dev
);
1742 adapter
= pi
->adapter
;
1743 qidx
= skb_get_queue_mapping(skb
);
1744 WARN_ON(qidx
>= pi
->nqsets
);
1745 txq
= &adapter
->sge
.ethtxq
[pi
->first_qset
+ qidx
];
1747 /* Take this opportunity to reclaim any TX Descriptors whose DMA
1748 * transfers have completed.
1750 reclaim_completed_tx(adapter
, &txq
->q
, -1, true);
1752 /* Calculate the number of flits and TX Descriptors we're going to
1753 * need along with how many TX Descriptors will be left over after
1754 * we inject our Work Request.
1756 flits
= t4vf_calc_tx_flits(skb
);
1757 ndesc
= flits_to_desc(flits
);
1758 credits
= txq_avail(&txq
->q
) - ndesc
;
1760 if (unlikely(credits
< 0)) {
1761 /* Not enough room for this packet's Work Request. Stop the
1762 * TX Queue and return a "busy" condition. The queue will get
1763 * started later on when the firmware informs us that space
1767 dev_err(adapter
->pdev_dev
,
1768 "%s: TX ring %u full while queue awake!\n",
1770 return NETDEV_TX_BUSY
;
1773 last_desc
= txq
->q
.pidx
+ ndesc
- 1;
1774 if (last_desc
>= txq
->q
.size
)
1775 last_desc
-= txq
->q
.size
;
1776 sgl_sdesc
= &txq
->q
.sdesc
[last_desc
];
1778 if (!t4vf_is_eth_imm(skb
) &&
1779 unlikely(cxgb4_map_skb(adapter
->pdev_dev
, skb
,
1780 sgl_sdesc
->addr
) < 0)) {
1781 /* We need to map the skb into PCI DMA space (because it can't
1782 * be in-lined directly into the Work Request) and the mapping
1783 * operation failed. Record the error and drop the packet.
1785 memset(sgl_sdesc
->addr
, 0, sizeof(sgl_sdesc
->addr
));
1790 wr_mid
= FW_WR_LEN16_V(DIV_ROUND_UP(flits
, 2));
1791 if (unlikely(credits
< ETHTXQ_STOP_THRES
)) {
1792 /* After we're done injecting the Work Request for this
1793 * packet, we'll be below our "stop threshold" so stop the TX
1794 * Queue now and schedule a request for an SGE Egress Queue
1795 * Update message. The queue will get started later on when
1796 * the firmware processes this Work Request and sends us an
1797 * Egress Queue Status Update message indicating that space
1801 wr_mid
|= FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
;
1804 /* Start filling in our Work Request. Note that we do _not_ handle
1805 * the WR Header wrapping around the TX Descriptor Ring. If our
1806 * maximum header size ever exceeds one TX Descriptor, we'll need to
1807 * do something else here.
1809 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR
, TXD_PER_EQ_UNIT
) > 1);
1810 wr
= (void *)&txq
->q
.desc
[txq
->q
.pidx
];
1811 wr
->equiq_to_len16
= cpu_to_be32(wr_mid
);
1812 wr
->r3
[0] = cpu_to_be32(0);
1813 wr
->r3
[1] = cpu_to_be32(0);
1814 skb_copy_from_linear_data(skb
, (void *)wr
->ethmacdst
, fw_hdr_copy_len
);
1815 end
= (u64
*)wr
+ flits
;
1817 /* If this is a Large Send Offload packet we'll put in an LSO CPL
1818 * message with an encapsulated TX Packet CPL message. Otherwise we
1819 * just use a TX Packet CPL message.
1821 ssi
= skb_shinfo(skb
);
1822 if (ssi
->gso_size
) {
1823 struct cpl_tx_pkt_lso_core
*lso
= (void *)(wr
+ 1);
1824 bool v6
= (ssi
->gso_type
& SKB_GSO_TCPV6
) != 0;
1825 int l3hdr_len
= skb_network_header_len(skb
);
1826 int eth_xtra_len
= skb_network_offset(skb
) - ETH_HLEN
;
1829 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR
) |
1830 FW_WR_IMMDLEN_V(sizeof(*lso
) +
1832 /* Fill in the LSO CPL message. */
1834 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO
) |
1838 LSO_ETHHDR_LEN_V(eth_xtra_len
/ 4) |
1839 LSO_IPHDR_LEN_V(l3hdr_len
/ 4) |
1840 LSO_TCPHDR_LEN_V(tcp_hdr(skb
)->doff
));
1841 lso
->ipid_ofst
= cpu_to_be16(0);
1842 lso
->mss
= cpu_to_be16(ssi
->gso_size
);
1843 lso
->seqno_offset
= cpu_to_be32(0);
1844 if (is_t4(adapter
->params
.chip
))
1845 lso
->len
= cpu_to_be32(skb
->len
);
1847 lso
->len
= cpu_to_be32(LSO_T5_XFER_SIZE_V(skb
->len
));
1849 /* Set up TX Packet CPL pointer, control word and perform
1852 cpl
= (void *)(lso
+ 1);
1854 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
)
1855 cntrl
= TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1857 cntrl
= T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len
);
1859 cntrl
|= TXPKT_CSUM_TYPE_V(v6
?
1860 TX_CSUM_TCPIP6
: TX_CSUM_TCPIP
) |
1861 TXPKT_IPHDR_LEN_V(l3hdr_len
);
1863 txq
->tx_cso
+= ssi
->gso_segs
;
1867 len
= (t4vf_is_eth_imm(skb
)
1868 ? skb
->len
+ sizeof(*cpl
)
1871 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR
) |
1872 FW_WR_IMMDLEN_V(len
));
1874 /* Set up TX Packet CPL pointer, control word and perform
1877 cpl
= (void *)(wr
+ 1);
1878 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1879 cntrl
= hwcsum(adapter
->params
.chip
, skb
) |
1883 cntrl
= TXPKT_L4CSUM_DIS_F
| TXPKT_IPCSUM_DIS_F
;
1887 /* If there's a VLAN tag present, add that to the list of things to
1888 * do in this Work Request.
1890 if (skb_vlan_tag_present(skb
)) {
1892 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
1895 /* Fill in the TX Packet CPL message header. */
1896 cpl
->ctrl0
= cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT
) |
1897 TXPKT_INTF_V(pi
->port_id
) |
1899 cpl
->pack
= cpu_to_be16(0);
1900 cpl
->len
= cpu_to_be16(skb
->len
);
1901 cpl
->ctrl1
= cpu_to_be64(cntrl
);
1903 /* Fill in the body of the TX Packet CPL message with either in-lined
1904 * data or a Scatter/Gather List.
1906 if (t4vf_is_eth_imm(skb
)) {
1907 /* In-line the packet's data and free the skb since we don't
1908 * need it any longer.
1910 cxgb4_inline_tx_skb(skb
, &txq
->q
, cpl
+ 1);
1911 dev_consume_skb_any(skb
);
1913 /* Write the skb's Scatter/Gather list into the TX Packet CPL
1914 * message and retain a pointer to the skb so we can free it
1915 * later when its DMA completes. (We store the skb pointer
1916 * in the Software Descriptor corresponding to the last TX
1917 * Descriptor used by the Work Request.)
1919 * The retained skb will be freed when the corresponding TX
1920 * Descriptors are reclaimed after their DMAs complete.
1921 * However, this could take quite a while since, in general,
1922 * the hardware is set up to be lazy about sending DMA
1923 * completion notifications to us and we mostly perform TX
1924 * reclaims in the transmit routine.
1926 * This is good for performamce but means that we rely on new
1927 * TX packets arriving to run the destructors of completed
1928 * packets, which open up space in their sockets' send queues.
1929 * Sometimes we do not get such new packets causing TX to
1930 * stall. A single UDP transmitter is a good example of this
1931 * situation. We have a clean up timer that periodically
1932 * reclaims completed packets but it doesn't run often enough
1933 * (nor do we want it to) to prevent lengthy stalls. A
1934 * solution to this problem is to run the destructor early,
1935 * after the packet is queued but before it's DMAd. A con is
1936 * that we lie to socket memory accounting, but the amount of
1937 * extra memory is reasonable (limited by the number of TX
1938 * descriptors), the packets do actually get freed quickly by
1939 * new packets almost always, and for protocols like TCP that
1940 * wait for acks to really free up the data the extra memory
1941 * is even less. On the positive side we run the destructors
1942 * on the sending CPU rather than on a potentially different
1943 * completing CPU, usually a good thing.
1945 * Run the destructor before telling the DMA engine about the
1946 * packet to make sure it doesn't complete and get freed
1949 struct ulptx_sgl
*sgl
= (struct ulptx_sgl
*)(cpl
+ 1);
1950 struct sge_txq
*tq
= &txq
->q
;
1952 /* If the Work Request header was an exact multiple of our TX
1953 * Descriptor length, then it's possible that the starting SGL
1954 * pointer lines up exactly with the end of our TX Descriptor
1955 * ring. If that's the case, wrap around to the beginning
1958 if (unlikely((void *)sgl
== (void *)tq
->stat
)) {
1959 sgl
= (void *)tq
->desc
;
1960 end
= (void *)((void *)tq
->desc
+
1961 ((void *)end
- (void *)tq
->stat
));
1964 cxgb4_write_sgl(skb
, tq
, sgl
, end
, 0, sgl_sdesc
->addr
);
1966 sgl_sdesc
->skb
= skb
;
1969 /* Advance our internal TX Queue state, tell the hardware about
1970 * the new TX descriptors and return success.
1972 txq_advance(&txq
->q
, ndesc
);
1974 cxgb4_ring_tx_db(adapter
, &txq
->q
, ndesc
);
1975 return NETDEV_TX_OK
;
1978 /* An error of some sort happened. Free the TX skb and tell the
1979 * OS that we've "dealt" with the packet ...
1981 dev_kfree_skb_any(skb
);
1982 return NETDEV_TX_OK
;
1986 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1987 * @q: the SGE control Tx queue
1989 * This is a variant of cxgb4_reclaim_completed_tx() that is used
1990 * for Tx queues that send only immediate data (presently just
1991 * the control queues) and thus do not have any sk_buffs to release.
1993 static inline void reclaim_completed_tx_imm(struct sge_txq
*q
)
1995 int hw_cidx
= ntohs(READ_ONCE(q
->stat
->cidx
));
1996 int reclaim
= hw_cidx
- q
->cidx
;
2001 q
->in_use
-= reclaim
;
2005 static inline void eosw_txq_advance_index(u32
*idx
, u32 n
, u32 max
)
2015 void cxgb4_eosw_txq_free_desc(struct adapter
*adap
,
2016 struct sge_eosw_txq
*eosw_txq
, u32 ndesc
)
2018 struct tx_sw_desc
*d
;
2020 d
= &eosw_txq
->desc
[eosw_txq
->last_cidx
];
2024 unmap_skb(adap
->pdev_dev
, d
->skb
, d
->addr
);
2025 memset(d
->addr
, 0, sizeof(d
->addr
));
2027 dev_consume_skb_any(d
->skb
);
2030 eosw_txq_advance_index(&eosw_txq
->last_cidx
, 1,
2032 d
= &eosw_txq
->desc
[eosw_txq
->last_cidx
];
2036 static inline void eosw_txq_advance(struct sge_eosw_txq
*eosw_txq
, u32 n
)
2038 eosw_txq_advance_index(&eosw_txq
->pidx
, n
, eosw_txq
->ndesc
);
2039 eosw_txq
->inuse
+= n
;
2042 static inline int eosw_txq_enqueue(struct sge_eosw_txq
*eosw_txq
,
2043 struct sk_buff
*skb
)
2045 if (eosw_txq
->inuse
== eosw_txq
->ndesc
)
2048 eosw_txq
->desc
[eosw_txq
->pidx
].skb
= skb
;
2052 static inline struct sk_buff
*eosw_txq_peek(struct sge_eosw_txq
*eosw_txq
)
2054 return eosw_txq
->desc
[eosw_txq
->last_pidx
].skb
;
2057 static inline u8
ethofld_calc_tx_flits(struct adapter
*adap
,
2058 struct sk_buff
*skb
, u32 hdr_len
)
2063 wrlen
= sizeof(struct fw_eth_tx_eo_wr
) + sizeof(struct cpl_tx_pkt_core
);
2064 if (skb_shinfo(skb
)->gso_size
&&
2065 !(skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
))
2066 wrlen
+= sizeof(struct cpl_tx_pkt_lso_core
);
2068 wrlen
+= roundup(hdr_len
, 16);
2070 /* Packet headers + WR + CPLs */
2071 flits
= DIV_ROUND_UP(wrlen
, 8);
2073 if (skb_shinfo(skb
)->nr_frags
> 0) {
2074 if (skb_headlen(skb
) - hdr_len
)
2075 nsgl
= sgl_len(skb_shinfo(skb
)->nr_frags
+ 1);
2077 nsgl
= sgl_len(skb_shinfo(skb
)->nr_frags
);
2078 } else if (skb
->len
- hdr_len
) {
2082 return flits
+ nsgl
;
2085 static void *write_eo_wr(struct adapter
*adap
, struct sge_eosw_txq
*eosw_txq
,
2086 struct sk_buff
*skb
, struct fw_eth_tx_eo_wr
*wr
,
2087 u32 hdr_len
, u32 wrlen
)
2089 const struct skb_shared_info
*ssi
= skb_shinfo(skb
);
2090 struct cpl_tx_pkt_core
*cpl
;
2091 u32 immd_len
, wrlen16
;
2095 ver
= ip_hdr(skb
)->version
;
2096 proto
= (ver
== 6) ? ipv6_hdr(skb
)->nexthdr
: ip_hdr(skb
)->protocol
;
2098 wrlen16
= DIV_ROUND_UP(wrlen
, 16);
2099 immd_len
= sizeof(struct cpl_tx_pkt_core
);
2100 if (skb_shinfo(skb
)->gso_size
&&
2101 !(skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
))
2102 immd_len
+= sizeof(struct cpl_tx_pkt_lso_core
);
2103 immd_len
+= hdr_len
;
2105 if (!eosw_txq
->ncompl
||
2106 (eosw_txq
->last_compl
+ wrlen16
) >=
2107 (adap
->params
.ofldq_wr_cred
/ 2)) {
2110 eosw_txq
->last_compl
= 0;
2113 wr
->op_immdlen
= cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR
) |
2114 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len
) |
2115 FW_WR_COMPL_V(compl));
2116 wr
->equiq_to_len16
= cpu_to_be32(FW_WR_LEN16_V(wrlen16
) |
2117 FW_WR_FLOWID_V(eosw_txq
->hwtid
));
2119 if (proto
== IPPROTO_UDP
) {
2120 cpl
= write_eo_udp_wr(skb
, wr
, hdr_len
);
2122 wr
->u
.tcpseg
.type
= FW_ETH_TX_EO_TYPE_TCPSEG
;
2123 wr
->u
.tcpseg
.ethlen
= skb_network_offset(skb
);
2124 wr
->u
.tcpseg
.iplen
= cpu_to_be16(skb_network_header_len(skb
));
2125 wr
->u
.tcpseg
.tcplen
= tcp_hdrlen(skb
);
2126 wr
->u
.tcpseg
.tsclk_tsoff
= 0;
2127 wr
->u
.tcpseg
.r4
= 0;
2128 wr
->u
.tcpseg
.r5
= 0;
2129 wr
->u
.tcpseg
.plen
= cpu_to_be32(skb
->len
- hdr_len
);
2131 if (ssi
->gso_size
) {
2132 struct cpl_tx_pkt_lso_core
*lso
= (void *)(wr
+ 1);
2134 wr
->u
.tcpseg
.mss
= cpu_to_be16(ssi
->gso_size
);
2135 cpl
= write_tso_wr(adap
, skb
, lso
);
2137 wr
->u
.tcpseg
.mss
= cpu_to_be16(0xffff);
2138 cpl
= (void *)(wr
+ 1);
2142 eosw_txq
->cred
-= wrlen16
;
2143 eosw_txq
->last_compl
+= wrlen16
;
2147 static int ethofld_hard_xmit(struct net_device
*dev
,
2148 struct sge_eosw_txq
*eosw_txq
)
2150 struct port_info
*pi
= netdev2pinfo(dev
);
2151 struct adapter
*adap
= netdev2adap(dev
);
2152 u32 wrlen
, wrlen16
, hdr_len
, data_len
;
2153 enum sge_eosw_state next_state
;
2154 u64 cntrl
, *start
, *end
, *sgl
;
2155 struct sge_eohw_txq
*eohw_txq
;
2156 struct cpl_tx_pkt_core
*cpl
;
2157 struct fw_eth_tx_eo_wr
*wr
;
2158 bool skip_eotx_wr
= false;
2159 struct tx_sw_desc
*d
;
2160 struct sk_buff
*skb
;
2164 eohw_txq
= &adap
->sge
.eohw_txq
[eosw_txq
->hwqid
];
2165 spin_lock(&eohw_txq
->lock
);
2166 reclaim_completed_tx_imm(&eohw_txq
->q
);
2168 d
= &eosw_txq
->desc
[eosw_txq
->last_pidx
];
2170 skb_tx_timestamp(skb
);
2172 wr
= (struct fw_eth_tx_eo_wr
*)&eohw_txq
->q
.desc
[eohw_txq
->q
.pidx
];
2173 if (unlikely(eosw_txq
->state
!= CXGB4_EO_STATE_ACTIVE
&&
2174 eosw_txq
->last_pidx
== eosw_txq
->flowc_idx
)) {
2177 flits
= DIV_ROUND_UP(hdr_len
, 8);
2178 if (eosw_txq
->state
== CXGB4_EO_STATE_FLOWC_OPEN_SEND
)
2179 next_state
= CXGB4_EO_STATE_FLOWC_OPEN_REPLY
;
2181 next_state
= CXGB4_EO_STATE_FLOWC_CLOSE_REPLY
;
2182 skip_eotx_wr
= true;
2184 hdr_len
= eth_get_headlen(dev
, skb
->data
, skb_headlen(skb
));
2185 data_len
= skb
->len
- hdr_len
;
2186 flits
= ethofld_calc_tx_flits(adap
, skb
, hdr_len
);
2188 ndesc
= flits_to_desc(flits
);
2190 wrlen16
= DIV_ROUND_UP(wrlen
, 16);
2192 left
= txq_avail(&eohw_txq
->q
) - ndesc
;
2194 /* If there are no descriptors left in hardware queues or no
2195 * CPL credits left in software queues, then wait for them
2196 * to come back and retry again. Note that we always request
2197 * for credits update via interrupt for every half credits
2198 * consumed. So, the interrupt will eventually restore the
2199 * credits and invoke the Tx path again.
2201 if (unlikely(left
< 0 || wrlen16
> eosw_txq
->cred
)) {
2206 if (unlikely(skip_eotx_wr
)) {
2208 eosw_txq
->state
= next_state
;
2209 eosw_txq
->cred
-= wrlen16
;
2211 eosw_txq
->last_compl
= 0;
2212 goto write_wr_headers
;
2215 cpl
= write_eo_wr(adap
, eosw_txq
, skb
, wr
, hdr_len
, wrlen
);
2216 cntrl
= hwcsum(adap
->params
.chip
, skb
);
2217 if (skb_vlan_tag_present(skb
))
2218 cntrl
|= TXPKT_VLAN_VLD_F
| TXPKT_VLAN_V(skb_vlan_tag_get(skb
));
2220 cpl
->ctrl0
= cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT
) |
2221 TXPKT_INTF_V(pi
->tx_chan
) |
2222 TXPKT_PF_V(adap
->pf
));
2224 cpl
->len
= cpu_to_be16(skb
->len
);
2225 cpl
->ctrl1
= cpu_to_be64(cntrl
);
2227 start
= (u64
*)(cpl
+ 1);
2230 sgl
= (u64
*)inline_tx_skb_header(skb
, &eohw_txq
->q
, (void *)start
,
2233 ret
= cxgb4_map_skb(adap
->pdev_dev
, skb
, d
->addr
);
2234 if (unlikely(ret
)) {
2235 memset(d
->addr
, 0, sizeof(d
->addr
));
2236 eohw_txq
->mapping_err
++;
2240 end
= (u64
*)wr
+ flits
;
2241 if (unlikely(start
> sgl
)) {
2242 left
= (u8
*)end
- (u8
*)eohw_txq
->q
.stat
;
2243 end
= (void *)eohw_txq
->q
.desc
+ left
;
2246 if (unlikely((u8
*)sgl
>= (u8
*)eohw_txq
->q
.stat
)) {
2247 /* If current position is already at the end of the
2248 * txq, reset the current to point to start of the queue
2249 * and update the end ptr as well.
2251 left
= (u8
*)end
- (u8
*)eohw_txq
->q
.stat
;
2253 end
= (void *)eohw_txq
->q
.desc
+ left
;
2254 sgl
= (void *)eohw_txq
->q
.desc
;
2257 cxgb4_write_sgl(skb
, &eohw_txq
->q
, (void *)sgl
, end
, hdr_len
,
2261 if (skb_shinfo(skb
)->gso_size
) {
2262 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
)
2266 eohw_txq
->tx_cso
+= skb_shinfo(skb
)->gso_segs
;
2267 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2271 if (skb_vlan_tag_present(skb
))
2272 eohw_txq
->vlan_ins
++;
2274 txq_advance(&eohw_txq
->q
, ndesc
);
2275 cxgb4_ring_tx_db(adap
, &eohw_txq
->q
, ndesc
);
2276 eosw_txq_advance_index(&eosw_txq
->last_pidx
, 1, eosw_txq
->ndesc
);
2279 spin_unlock(&eohw_txq
->lock
);
2283 static void ethofld_xmit(struct net_device
*dev
, struct sge_eosw_txq
*eosw_txq
)
2285 struct sk_buff
*skb
;
2288 switch (eosw_txq
->state
) {
2289 case CXGB4_EO_STATE_ACTIVE
:
2290 case CXGB4_EO_STATE_FLOWC_OPEN_SEND
:
2291 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND
:
2292 pktcount
= eosw_txq
->pidx
- eosw_txq
->last_pidx
;
2294 pktcount
+= eosw_txq
->ndesc
;
2296 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY
:
2297 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY
:
2298 case CXGB4_EO_STATE_CLOSED
:
2303 while (pktcount
--) {
2304 skb
= eosw_txq_peek(eosw_txq
);
2306 eosw_txq_advance_index(&eosw_txq
->last_pidx
, 1,
2311 ret
= ethofld_hard_xmit(dev
, eosw_txq
);
2317 static netdev_tx_t
cxgb4_ethofld_xmit(struct sk_buff
*skb
,
2318 struct net_device
*dev
)
2320 struct cxgb4_tc_port_mqprio
*tc_port_mqprio
;
2321 struct port_info
*pi
= netdev2pinfo(dev
);
2322 struct adapter
*adap
= netdev2adap(dev
);
2323 struct sge_eosw_txq
*eosw_txq
;
2327 ret
= cxgb4_validate_skb(skb
, dev
, ETH_HLEN
);
2331 tc_port_mqprio
= &adap
->tc_mqprio
->port_mqprio
[pi
->port_id
];
2332 qid
= skb_get_queue_mapping(skb
) - pi
->nqsets
;
2333 eosw_txq
= &tc_port_mqprio
->eosw_txq
[qid
];
2334 spin_lock_bh(&eosw_txq
->lock
);
2335 if (eosw_txq
->state
!= CXGB4_EO_STATE_ACTIVE
)
2338 ret
= eosw_txq_enqueue(eosw_txq
, skb
);
2342 /* SKB is queued for processing until credits are available.
2343 * So, call the destructor now and we'll free the skb later
2344 * after it has been successfully transmitted.
2348 eosw_txq_advance(eosw_txq
, 1);
2349 ethofld_xmit(dev
, eosw_txq
);
2350 spin_unlock_bh(&eosw_txq
->lock
);
2351 return NETDEV_TX_OK
;
2354 spin_unlock_bh(&eosw_txq
->lock
);
2356 dev_kfree_skb_any(skb
);
2357 return NETDEV_TX_OK
;
2360 netdev_tx_t
t4_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2362 struct port_info
*pi
= netdev_priv(dev
);
2363 u16 qid
= skb_get_queue_mapping(skb
);
2365 if (unlikely(pi
->eth_flags
& PRIV_FLAG_PORT_TX_VM
))
2366 return cxgb4_vf_eth_xmit(skb
, dev
);
2368 if (unlikely(qid
>= pi
->nqsets
))
2369 return cxgb4_ethofld_xmit(skb
, dev
);
2371 if (is_ptp_enabled(skb
, dev
)) {
2372 struct adapter
*adap
= netdev2adap(dev
);
2375 spin_lock(&adap
->ptp_lock
);
2376 ret
= cxgb4_eth_xmit(skb
, dev
);
2377 spin_unlock(&adap
->ptp_lock
);
2381 return cxgb4_eth_xmit(skb
, dev
);
2384 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq
*eosw_txq
)
2386 int pktcount
= eosw_txq
->pidx
- eosw_txq
->last_pidx
;
2387 int pidx
= eosw_txq
->pidx
;
2388 struct sk_buff
*skb
;
2394 pktcount
+= eosw_txq
->ndesc
;
2396 while (pktcount
--) {
2399 pidx
+= eosw_txq
->ndesc
;
2401 skb
= eosw_txq
->desc
[pidx
].skb
;
2403 dev_consume_skb_any(skb
);
2404 eosw_txq
->desc
[pidx
].skb
= NULL
;
2409 eosw_txq
->pidx
= eosw_txq
->last_pidx
+ 1;
2413 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2415 * @eotid: ETHOFLD tid to bind/unbind
2416 * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
2418 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
2419 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
2422 int cxgb4_ethofld_send_flowc(struct net_device
*dev
, u32 eotid
, u32 tc
)
2424 struct port_info
*pi
= netdev2pinfo(dev
);
2425 struct adapter
*adap
= netdev2adap(dev
);
2426 enum sge_eosw_state next_state
;
2427 struct sge_eosw_txq
*eosw_txq
;
2428 u32 len
, len16
, nparams
= 6;
2429 struct fw_flowc_wr
*flowc
;
2430 struct eotid_entry
*entry
;
2431 struct sge_ofld_rxq
*rxq
;
2432 struct sk_buff
*skb
;
2435 len
= sizeof(*flowc
) + sizeof(struct fw_flowc_mnemval
) * nparams
;
2436 len16
= DIV_ROUND_UP(len
, 16);
2438 entry
= cxgb4_lookup_eotid(&adap
->tids
, eotid
);
2442 eosw_txq
= (struct sge_eosw_txq
*)entry
->data
;
2446 skb
= alloc_skb(len
, GFP_KERNEL
);
2450 spin_lock_bh(&eosw_txq
->lock
);
2451 if (tc
!= FW_SCHED_CLS_NONE
) {
2452 if (eosw_txq
->state
!= CXGB4_EO_STATE_CLOSED
)
2455 next_state
= CXGB4_EO_STATE_FLOWC_OPEN_SEND
;
2457 if (eosw_txq
->state
!= CXGB4_EO_STATE_ACTIVE
)
2460 next_state
= CXGB4_EO_STATE_FLOWC_CLOSE_SEND
;
2463 flowc
= __skb_put(skb
, len
);
2464 memset(flowc
, 0, len
);
2466 rxq
= &adap
->sge
.eohw_rxq
[eosw_txq
->hwqid
];
2467 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16_V(len16
) |
2468 FW_WR_FLOWID_V(eosw_txq
->hwtid
));
2469 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR
) |
2470 FW_FLOWC_WR_NPARAMS_V(nparams
) |
2472 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
2473 flowc
->mnemval
[0].val
= cpu_to_be32(FW_PFVF_CMD_PFN_V(adap
->pf
));
2474 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
2475 flowc
->mnemval
[1].val
= cpu_to_be32(pi
->tx_chan
);
2476 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
2477 flowc
->mnemval
[2].val
= cpu_to_be32(pi
->tx_chan
);
2478 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
2479 flowc
->mnemval
[3].val
= cpu_to_be32(rxq
->rspq
.abs_id
);
2480 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SCHEDCLASS
;
2481 flowc
->mnemval
[4].val
= cpu_to_be32(tc
);
2482 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_EOSTATE
;
2483 flowc
->mnemval
[5].val
= cpu_to_be32(tc
== FW_SCHED_CLS_NONE
?
2484 FW_FLOWC_MNEM_EOSTATE_CLOSING
:
2485 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED
);
2487 /* Free up any pending skbs to ensure there's room for
2488 * termination FLOWC.
2490 if (tc
== FW_SCHED_CLS_NONE
)
2491 eosw_txq_flush_pending_skbs(eosw_txq
);
2493 ret
= eosw_txq_enqueue(eosw_txq
, skb
);
2495 dev_consume_skb_any(skb
);
2499 eosw_txq
->state
= next_state
;
2500 eosw_txq
->flowc_idx
= eosw_txq
->pidx
;
2501 eosw_txq_advance(eosw_txq
, 1);
2502 ethofld_xmit(dev
, eosw_txq
);
2505 spin_unlock_bh(&eosw_txq
->lock
);
2510 * is_imm - check whether a packet can be sent as immediate data
2513 * Returns true if a packet can be sent as a WR with immediate data.
2515 static inline int is_imm(const struct sk_buff
*skb
)
2517 return skb
->len
<= MAX_CTRL_WR_LEN
;
2521 * ctrlq_check_stop - check if a control queue is full and should stop
2523 * @wr: most recent WR written to the queue
2525 * Check if a control queue has become full and should be stopped.
2526 * We clean up control queue descriptors very lazily, only when we are out.
2527 * If the queue is still full after reclaiming any completed descriptors
2528 * we suspend it and have the last WR wake it up.
2530 static void ctrlq_check_stop(struct sge_ctrl_txq
*q
, struct fw_wr_hdr
*wr
)
2532 reclaim_completed_tx_imm(&q
->q
);
2533 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
2534 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
2541 * ctrl_xmit - send a packet through an SGE control Tx queue
2542 * @q: the control queue
2545 * Send a packet through an SGE control Tx queue. Packets sent through
2546 * a control queue must fit entirely as immediate data.
2548 static int ctrl_xmit(struct sge_ctrl_txq
*q
, struct sk_buff
*skb
)
2551 struct fw_wr_hdr
*wr
;
2553 if (unlikely(!is_imm(skb
))) {
2556 return NET_XMIT_DROP
;
2559 ndesc
= DIV_ROUND_UP(skb
->len
, sizeof(struct tx_desc
));
2560 spin_lock(&q
->sendq
.lock
);
2562 if (unlikely(q
->full
)) {
2563 skb
->priority
= ndesc
; /* save for restart */
2564 __skb_queue_tail(&q
->sendq
, skb
);
2565 spin_unlock(&q
->sendq
.lock
);
2569 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
2570 cxgb4_inline_tx_skb(skb
, &q
->q
, wr
);
2572 txq_advance(&q
->q
, ndesc
);
2573 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
))
2574 ctrlq_check_stop(q
, wr
);
2576 cxgb4_ring_tx_db(q
->adap
, &q
->q
, ndesc
);
2577 spin_unlock(&q
->sendq
.lock
);
2580 return NET_XMIT_SUCCESS
;
2584 * restart_ctrlq - restart a suspended control queue
2585 * @data: the control queue to restart
2587 * Resumes transmission on a suspended Tx control queue.
2589 static void restart_ctrlq(unsigned long data
)
2591 struct sk_buff
*skb
;
2592 unsigned int written
= 0;
2593 struct sge_ctrl_txq
*q
= (struct sge_ctrl_txq
*)data
;
2595 spin_lock(&q
->sendq
.lock
);
2596 reclaim_completed_tx_imm(&q
->q
);
2597 BUG_ON(txq_avail(&q
->q
) < TXQ_STOP_THRES
); /* q should be empty */
2599 while ((skb
= __skb_dequeue(&q
->sendq
)) != NULL
) {
2600 struct fw_wr_hdr
*wr
;
2601 unsigned int ndesc
= skb
->priority
; /* previously saved */
2604 /* Write descriptors and free skbs outside the lock to limit
2605 * wait times. q->full is still set so new skbs will be queued.
2607 wr
= (struct fw_wr_hdr
*)&q
->q
.desc
[q
->q
.pidx
];
2608 txq_advance(&q
->q
, ndesc
);
2609 spin_unlock(&q
->sendq
.lock
);
2611 cxgb4_inline_tx_skb(skb
, &q
->q
, wr
);
2614 if (unlikely(txq_avail(&q
->q
) < TXQ_STOP_THRES
)) {
2615 unsigned long old
= q
->q
.stops
;
2617 ctrlq_check_stop(q
, wr
);
2618 if (q
->q
.stops
!= old
) { /* suspended anew */
2619 spin_lock(&q
->sendq
.lock
);
2624 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2627 spin_lock(&q
->sendq
.lock
);
2632 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2633 spin_unlock(&q
->sendq
.lock
);
2637 * t4_mgmt_tx - send a management message
2638 * @adap: the adapter
2639 * @skb: the packet containing the management message
2641 * Send a management message through control queue 0.
2643 int t4_mgmt_tx(struct adapter
*adap
, struct sk_buff
*skb
)
2648 ret
= ctrl_xmit(&adap
->sge
.ctrlq
[0], skb
);
2654 * is_ofld_imm - check whether a packet can be sent as immediate data
2657 * Returns true if a packet can be sent as an offload WR with immediate
2658 * data. We currently use the same limit as for Ethernet packets.
2660 static inline int is_ofld_imm(const struct sk_buff
*skb
)
2662 struct work_request_hdr
*req
= (struct work_request_hdr
*)skb
->data
;
2663 unsigned long opcode
= FW_WR_OP_G(ntohl(req
->wr_hi
));
2665 if (opcode
== FW_CRYPTO_LOOKASIDE_WR
)
2666 return skb
->len
<= SGE_MAX_WR_LEN
;
2668 return skb
->len
<= MAX_IMM_TX_PKT_LEN
;
2672 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2675 * Returns the number of flits needed for the given offload packet.
2676 * These packets are already fully constructed and no additional headers
2679 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
2681 unsigned int flits
, cnt
;
2683 if (is_ofld_imm(skb
))
2684 return DIV_ROUND_UP(skb
->len
, 8);
2686 flits
= skb_transport_offset(skb
) / 8U; /* headers */
2687 cnt
= skb_shinfo(skb
)->nr_frags
;
2688 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
2690 return flits
+ sgl_len(cnt
);
2694 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2695 * @q: the queue to stop
2697 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2698 * inability to map packets. A periodic timer attempts to restart
2701 static void txq_stop_maperr(struct sge_uld_txq
*q
)
2705 set_bit(q
->q
.cntxt_id
- q
->adap
->sge
.egr_start
,
2706 q
->adap
->sge
.txq_maperr
);
2710 * ofldtxq_stop - stop an offload Tx queue that has become full
2711 * @q: the queue to stop
2712 * @wr: the Work Request causing the queue to become full
2714 * Stops an offload Tx queue that has become full and modifies the packet
2715 * being written to request a wakeup.
2717 static void ofldtxq_stop(struct sge_uld_txq
*q
, struct fw_wr_hdr
*wr
)
2719 wr
->lo
|= htonl(FW_WR_EQUEQ_F
| FW_WR_EQUIQ_F
);
2725 * service_ofldq - service/restart a suspended offload queue
2726 * @q: the offload queue
2728 * Services an offload Tx queue by moving packets from its Pending Send
2729 * Queue to the Hardware TX ring. The function starts and ends with the
2730 * Send Queue locked, but drops the lock while putting the skb at the
2731 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2732 * allows more skbs to be added to the Send Queue by other threads.
2733 * The packet being processed at the head of the Pending Send Queue is
2734 * left on the queue in case we experience DMA Mapping errors, etc.
2735 * and need to give up and restart later.
2737 * service_ofldq() can be thought of as a task which opportunistically
2738 * uses other threads execution contexts. We use the Offload Queue
2739 * boolean "service_ofldq_running" to make sure that only one instance
2740 * is ever running at a time ...
2742 static void service_ofldq(struct sge_uld_txq
*q
)
2743 __must_hold(&q
->sendq
.lock
)
2745 u64
*pos
, *before
, *end
;
2747 struct sk_buff
*skb
;
2748 struct sge_txq
*txq
;
2750 unsigned int written
= 0;
2751 unsigned int flits
, ndesc
;
2753 /* If another thread is currently in service_ofldq() processing the
2754 * Pending Send Queue then there's nothing to do. Otherwise, flag
2755 * that we're doing the work and continue. Examining/modifying
2756 * the Offload Queue boolean "service_ofldq_running" must be done
2757 * while holding the Pending Send Queue Lock.
2759 if (q
->service_ofldq_running
)
2761 q
->service_ofldq_running
= true;
2763 while ((skb
= skb_peek(&q
->sendq
)) != NULL
&& !q
->full
) {
2764 /* We drop the lock while we're working with the skb at the
2765 * head of the Pending Send Queue. This allows more skbs to
2766 * be added to the Pending Send Queue while we're working on
2767 * this one. We don't need to lock to guard the TX Ring
2768 * updates because only one thread of execution is ever
2769 * allowed into service_ofldq() at a time.
2771 spin_unlock(&q
->sendq
.lock
);
2773 cxgb4_reclaim_completed_tx(q
->adap
, &q
->q
, false);
2775 flits
= skb
->priority
; /* previously saved */
2776 ndesc
= flits_to_desc(flits
);
2777 credits
= txq_avail(&q
->q
) - ndesc
;
2778 BUG_ON(credits
< 0);
2779 if (unlikely(credits
< TXQ_STOP_THRES
))
2780 ofldtxq_stop(q
, (struct fw_wr_hdr
*)skb
->data
);
2782 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
2783 if (is_ofld_imm(skb
))
2784 cxgb4_inline_tx_skb(skb
, &q
->q
, pos
);
2785 else if (cxgb4_map_skb(q
->adap
->pdev_dev
, skb
,
2786 (dma_addr_t
*)skb
->head
)) {
2788 spin_lock(&q
->sendq
.lock
);
2791 int last_desc
, hdr_len
= skb_transport_offset(skb
);
2793 /* The WR headers may not fit within one descriptor.
2794 * So we need to deal with wrap-around here.
2796 before
= (u64
*)pos
;
2797 end
= (u64
*)pos
+ flits
;
2799 pos
= (void *)inline_tx_skb_header(skb
, &q
->q
,
2802 if (before
> (u64
*)pos
) {
2803 left
= (u8
*)end
- (u8
*)txq
->stat
;
2804 end
= (void *)txq
->desc
+ left
;
2807 /* If current position is already at the end of the
2808 * ofld queue, reset the current to point to
2809 * start of the queue and update the end ptr as well.
2811 if (pos
== (u64
*)txq
->stat
) {
2812 left
= (u8
*)end
- (u8
*)txq
->stat
;
2813 end
= (void *)txq
->desc
+ left
;
2814 pos
= (void *)txq
->desc
;
2817 cxgb4_write_sgl(skb
, &q
->q
, (void *)pos
,
2819 (dma_addr_t
*)skb
->head
);
2820 #ifdef CONFIG_NEED_DMA_MAP_STATE
2821 skb
->dev
= q
->adap
->port
[0];
2822 skb
->destructor
= deferred_unmap_destructor
;
2824 last_desc
= q
->q
.pidx
+ ndesc
- 1;
2825 if (last_desc
>= q
->q
.size
)
2826 last_desc
-= q
->q
.size
;
2827 q
->q
.sdesc
[last_desc
].skb
= skb
;
2830 txq_advance(&q
->q
, ndesc
);
2832 if (unlikely(written
> 32)) {
2833 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2837 /* Reacquire the Pending Send Queue Lock so we can unlink the
2838 * skb we've just successfully transferred to the TX Ring and
2839 * loop for the next skb which may be at the head of the
2840 * Pending Send Queue.
2842 spin_lock(&q
->sendq
.lock
);
2843 __skb_unlink(skb
, &q
->sendq
);
2844 if (is_ofld_imm(skb
))
2847 if (likely(written
))
2848 cxgb4_ring_tx_db(q
->adap
, &q
->q
, written
);
2850 /*Indicate that no thread is processing the Pending Send Queue
2853 q
->service_ofldq_running
= false;
2857 * ofld_xmit - send a packet through an offload queue
2858 * @q: the Tx offload queue
2861 * Send an offload packet through an SGE offload queue.
2863 static int ofld_xmit(struct sge_uld_txq
*q
, struct sk_buff
*skb
)
2865 skb
->priority
= calc_tx_flits_ofld(skb
); /* save for restart */
2866 spin_lock(&q
->sendq
.lock
);
2868 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
2869 * that results in this new skb being the only one on the queue, start
2870 * servicing it. If there are other skbs already on the list, then
2871 * either the queue is currently being processed or it's been stopped
2872 * for some reason and it'll be restarted at a later time. Restart
2873 * paths are triggered by events like experiencing a DMA Mapping Error
2874 * or filling the Hardware TX Ring.
2876 __skb_queue_tail(&q
->sendq
, skb
);
2877 if (q
->sendq
.qlen
== 1)
2880 spin_unlock(&q
->sendq
.lock
);
2881 return NET_XMIT_SUCCESS
;
2885 * restart_ofldq - restart a suspended offload queue
2886 * @data: the offload queue to restart
2888 * Resumes transmission on a suspended Tx offload queue.
2890 static void restart_ofldq(unsigned long data
)
2892 struct sge_uld_txq
*q
= (struct sge_uld_txq
*)data
;
2894 spin_lock(&q
->sendq
.lock
);
2895 q
->full
= 0; /* the queue actually is completely empty now */
2897 spin_unlock(&q
->sendq
.lock
);
2901 * skb_txq - return the Tx queue an offload packet should use
2904 * Returns the Tx queue an offload packet should use as indicated by bits
2905 * 1-15 in the packet's queue_mapping.
2907 static inline unsigned int skb_txq(const struct sk_buff
*skb
)
2909 return skb
->queue_mapping
>> 1;
2913 * is_ctrl_pkt - return whether an offload packet is a control packet
2916 * Returns whether an offload packet should use an OFLD or a CTRL
2917 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
2919 static inline unsigned int is_ctrl_pkt(const struct sk_buff
*skb
)
2921 return skb
->queue_mapping
& 1;
2924 static inline int uld_send(struct adapter
*adap
, struct sk_buff
*skb
,
2925 unsigned int tx_uld_type
)
2927 struct sge_uld_txq_info
*txq_info
;
2928 struct sge_uld_txq
*txq
;
2929 unsigned int idx
= skb_txq(skb
);
2931 if (unlikely(is_ctrl_pkt(skb
))) {
2932 /* Single ctrl queue is a requirement for LE workaround path */
2933 if (adap
->tids
.nsftids
)
2935 return ctrl_xmit(&adap
->sge
.ctrlq
[idx
], skb
);
2938 txq_info
= adap
->sge
.uld_txq_info
[tx_uld_type
];
2939 if (unlikely(!txq_info
)) {
2941 return NET_XMIT_DROP
;
2944 txq
= &txq_info
->uldtxq
[idx
];
2945 return ofld_xmit(txq
, skb
);
2949 * t4_ofld_send - send an offload packet
2950 * @adap: the adapter
2953 * Sends an offload packet. We use the packet queue_mapping to select the
2954 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2955 * should be sent as regular or control, bits 1-15 select the queue.
2957 int t4_ofld_send(struct adapter
*adap
, struct sk_buff
*skb
)
2962 ret
= uld_send(adap
, skb
, CXGB4_TX_OFLD
);
2968 * cxgb4_ofld_send - send an offload packet
2969 * @dev: the net device
2972 * Sends an offload packet. This is an exported version of @t4_ofld_send,
2973 * intended for ULDs.
2975 int cxgb4_ofld_send(struct net_device
*dev
, struct sk_buff
*skb
)
2977 return t4_ofld_send(netdev2adap(dev
), skb
);
2979 EXPORT_SYMBOL(cxgb4_ofld_send
);
2981 static void *inline_tx_header(const void *src
,
2982 const struct sge_txq
*q
,
2983 void *pos
, int length
)
2985 int left
= (void *)q
->stat
- pos
;
2988 if (likely(length
<= left
)) {
2989 memcpy(pos
, src
, length
);
2992 memcpy(pos
, src
, left
);
2993 memcpy(q
->desc
, src
+ left
, length
- left
);
2994 pos
= (void *)q
->desc
+ (length
- left
);
2996 /* 0-pad to multiple of 16 */
2997 p
= PTR_ALIGN(pos
, 8);
2998 if ((uintptr_t)p
& 8) {
3006 * ofld_xmit_direct - copy a WR into offload queue
3007 * @q: the Tx offload queue
3008 * @src: location of WR
3011 * Copy an immediate WR into an uncontended SGE offload queue.
3013 static int ofld_xmit_direct(struct sge_uld_txq
*q
, const void *src
,
3020 /* Use the lower limit as the cut-off */
3021 if (len
> MAX_IMM_OFLD_TX_DATA_WR_LEN
) {
3023 return NET_XMIT_DROP
;
3026 /* Don't return NET_XMIT_CN here as the current
3027 * implementation doesn't queue the request
3028 * using an skb when the following conditions not met
3030 if (!spin_trylock(&q
->sendq
.lock
))
3031 return NET_XMIT_DROP
;
3033 if (q
->full
|| !skb_queue_empty(&q
->sendq
) ||
3034 q
->service_ofldq_running
) {
3035 spin_unlock(&q
->sendq
.lock
);
3036 return NET_XMIT_DROP
;
3038 ndesc
= flits_to_desc(DIV_ROUND_UP(len
, 8));
3039 credits
= txq_avail(&q
->q
) - ndesc
;
3040 pos
= (u64
*)&q
->q
.desc
[q
->q
.pidx
];
3042 /* ofldtxq_stop modifies WR header in-situ */
3043 inline_tx_header(src
, &q
->q
, pos
, len
);
3044 if (unlikely(credits
< TXQ_STOP_THRES
))
3045 ofldtxq_stop(q
, (struct fw_wr_hdr
*)pos
);
3046 txq_advance(&q
->q
, ndesc
);
3047 cxgb4_ring_tx_db(q
->adap
, &q
->q
, ndesc
);
3049 spin_unlock(&q
->sendq
.lock
);
3050 return NET_XMIT_SUCCESS
;
3053 int cxgb4_immdata_send(struct net_device
*dev
, unsigned int idx
,
3054 const void *src
, unsigned int len
)
3056 struct sge_uld_txq_info
*txq_info
;
3057 struct sge_uld_txq
*txq
;
3058 struct adapter
*adap
;
3061 adap
= netdev2adap(dev
);
3064 txq_info
= adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
3065 if (unlikely(!txq_info
)) {
3068 return NET_XMIT_DROP
;
3070 txq
= &txq_info
->uldtxq
[idx
];
3072 ret
= ofld_xmit_direct(txq
, src
, len
);
3074 return net_xmit_eval(ret
);
3076 EXPORT_SYMBOL(cxgb4_immdata_send
);
3079 * t4_crypto_send - send crypto packet
3080 * @adap: the adapter
3083 * Sends crypto packet. We use the packet queue_mapping to select the
3084 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3085 * should be sent as regular or control, bits 1-15 select the queue.
3087 static int t4_crypto_send(struct adapter
*adap
, struct sk_buff
*skb
)
3092 ret
= uld_send(adap
, skb
, CXGB4_TX_CRYPTO
);
3098 * cxgb4_crypto_send - send crypto packet
3099 * @dev: the net device
3102 * Sends crypto packet. This is an exported version of @t4_crypto_send,
3103 * intended for ULDs.
3105 int cxgb4_crypto_send(struct net_device
*dev
, struct sk_buff
*skb
)
3107 return t4_crypto_send(netdev2adap(dev
), skb
);
3109 EXPORT_SYMBOL(cxgb4_crypto_send
);
3111 static inline void copy_frags(struct sk_buff
*skb
,
3112 const struct pkt_gl
*gl
, unsigned int offset
)
3116 /* usually there's just one frag */
3117 __skb_fill_page_desc(skb
, 0, gl
->frags
[0].page
,
3118 gl
->frags
[0].offset
+ offset
,
3119 gl
->frags
[0].size
- offset
);
3120 skb_shinfo(skb
)->nr_frags
= gl
->nfrags
;
3121 for (i
= 1; i
< gl
->nfrags
; i
++)
3122 __skb_fill_page_desc(skb
, i
, gl
->frags
[i
].page
,
3123 gl
->frags
[i
].offset
,
3126 /* get a reference to the last page, we don't own it */
3127 get_page(gl
->frags
[gl
->nfrags
- 1].page
);
3131 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3132 * @gl: the gather list
3133 * @skb_len: size of sk_buff main body if it carries fragments
3134 * @pull_len: amount of data to move to the sk_buff's main body
3136 * Builds an sk_buff from the given packet gather list. Returns the
3137 * sk_buff or %NULL if sk_buff allocation failed.
3139 struct sk_buff
*cxgb4_pktgl_to_skb(const struct pkt_gl
*gl
,
3140 unsigned int skb_len
, unsigned int pull_len
)
3142 struct sk_buff
*skb
;
3145 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
3146 * size, which is expected since buffers are at least PAGE_SIZEd.
3147 * In this case packets up to RX_COPY_THRES have only one fragment.
3149 if (gl
->tot_len
<= RX_COPY_THRES
) {
3150 skb
= dev_alloc_skb(gl
->tot_len
);
3153 __skb_put(skb
, gl
->tot_len
);
3154 skb_copy_to_linear_data(skb
, gl
->va
, gl
->tot_len
);
3156 skb
= dev_alloc_skb(skb_len
);
3159 __skb_put(skb
, pull_len
);
3160 skb_copy_to_linear_data(skb
, gl
->va
, pull_len
);
3162 copy_frags(skb
, gl
, pull_len
);
3163 skb
->len
= gl
->tot_len
;
3164 skb
->data_len
= skb
->len
- pull_len
;
3165 skb
->truesize
+= skb
->data_len
;
3169 EXPORT_SYMBOL(cxgb4_pktgl_to_skb
);
3172 * t4_pktgl_free - free a packet gather list
3173 * @gl: the gather list
3175 * Releases the pages of a packet gather list. We do not own the last
3176 * page on the list and do not free it.
3178 static void t4_pktgl_free(const struct pkt_gl
*gl
)
3181 const struct page_frag
*p
;
3183 for (p
= gl
->frags
, n
= gl
->nfrags
- 1; n
--; p
++)
3188 * Process an MPS trace packet. Give it an unused protocol number so it won't
3189 * be delivered to anyone and send it to the stack for capture.
3191 static noinline
int handle_trace_pkt(struct adapter
*adap
,
3192 const struct pkt_gl
*gl
)
3194 struct sk_buff
*skb
;
3196 skb
= cxgb4_pktgl_to_skb(gl
, RX_PULL_LEN
, RX_PULL_LEN
);
3197 if (unlikely(!skb
)) {
3202 if (is_t4(adap
->params
.chip
))
3203 __skb_pull(skb
, sizeof(struct cpl_trace_pkt
));
3205 __skb_pull(skb
, sizeof(struct cpl_t5_trace_pkt
));
3207 skb_reset_mac_header(skb
);
3208 skb
->protocol
= htons(0xffff);
3209 skb
->dev
= adap
->port
[0];
3210 netif_receive_skb(skb
);
3215 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3216 * @adap: the adapter
3217 * @hwtstamps: time stamp structure to update
3218 * @sgetstamp: 60bit iqe timestamp
3220 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3221 * which is in Core Clock ticks into ktime_t and assign it
3223 static void cxgb4_sgetim_to_hwtstamp(struct adapter
*adap
,
3224 struct skb_shared_hwtstamps
*hwtstamps
,
3228 u64 tmp
= (sgetstamp
* 1000 * 1000 + adap
->params
.vpd
.cclk
/ 2);
3230 ns
= div_u64(tmp
, adap
->params
.vpd
.cclk
);
3232 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
3233 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
3236 static void do_gro(struct sge_eth_rxq
*rxq
, const struct pkt_gl
*gl
,
3237 const struct cpl_rx_pkt
*pkt
, unsigned long tnl_hdr_len
)
3239 struct adapter
*adapter
= rxq
->rspq
.adap
;
3240 struct sge
*s
= &adapter
->sge
;
3241 struct port_info
*pi
;
3243 struct sk_buff
*skb
;
3245 skb
= napi_get_frags(&rxq
->rspq
.napi
);
3246 if (unlikely(!skb
)) {
3248 rxq
->stats
.rx_drops
++;
3252 copy_frags(skb
, gl
, s
->pktshift
);
3254 skb
->csum_level
= 1;
3255 skb
->len
= gl
->tot_len
- s
->pktshift
;
3256 skb
->data_len
= skb
->len
;
3257 skb
->truesize
+= skb
->data_len
;
3258 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3259 skb_record_rx_queue(skb
, rxq
->rspq
.idx
);
3260 pi
= netdev_priv(skb
->dev
);
3262 cxgb4_sgetim_to_hwtstamp(adapter
, skb_hwtstamps(skb
),
3264 if (rxq
->rspq
.netdev
->features
& NETIF_F_RXHASH
)
3265 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
3268 if (unlikely(pkt
->vlan_ex
)) {
3269 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
3270 rxq
->stats
.vlan_ex
++;
3272 ret
= napi_gro_frags(&rxq
->rspq
.napi
);
3273 if (ret
== GRO_HELD
)
3274 rxq
->stats
.lro_pkts
++;
3275 else if (ret
== GRO_MERGED
|| ret
== GRO_MERGED_FREE
)
3276 rxq
->stats
.lro_merged
++;
3278 rxq
->stats
.rx_cso
++;
3288 * t4_systim_to_hwstamp - read hardware time stamp
3289 * @adapter: the adapter
3292 * Read Time Stamp from MPS packet and insert in skb which
3293 * is forwarded to PTP application
3295 static noinline
int t4_systim_to_hwstamp(struct adapter
*adapter
,
3296 struct sk_buff
*skb
)
3298 struct skb_shared_hwtstamps
*hwtstamps
;
3299 struct cpl_rx_mps_pkt
*cpl
= NULL
;
3300 unsigned char *data
;
3303 cpl
= (struct cpl_rx_mps_pkt
*)skb
->data
;
3304 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl
->op_to_r1_hi
)) &
3305 X_CPL_RX_MPS_PKT_TYPE_PTP
))
3306 return RX_PTP_PKT_ERR
;
3308 data
= skb
->data
+ sizeof(*cpl
);
3309 skb_pull(skb
, 2 * sizeof(u64
) + sizeof(struct cpl_rx_mps_pkt
));
3310 offset
= ETH_HLEN
+ IPV4_HLEN(skb
->data
) + UDP_HLEN
;
3311 if (skb
->len
< offset
+ OFF_PTP_SEQUENCE_ID
+ sizeof(short))
3312 return RX_PTP_PKT_ERR
;
3314 hwtstamps
= skb_hwtstamps(skb
);
3315 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
3316 hwtstamps
->hwtstamp
= ns_to_ktime(get_unaligned_be64(data
));
3318 return RX_PTP_PKT_SUC
;
3322 * t4_rx_hststamp - Recv PTP Event Message
3323 * @adapter: the adapter
3324 * @rsp: the response queue descriptor holding the RX_PKT message
3325 * @rxq: the response queue holding the RX_PKT message
3328 * PTP enabled and MPS packet, read HW timestamp
3330 static int t4_rx_hststamp(struct adapter
*adapter
, const __be64
*rsp
,
3331 struct sge_eth_rxq
*rxq
, struct sk_buff
*skb
)
3335 if (unlikely((*(u8
*)rsp
== CPL_RX_MPS_PKT
) &&
3336 !is_t4(adapter
->params
.chip
))) {
3337 ret
= t4_systim_to_hwstamp(adapter
, skb
);
3338 if (ret
== RX_PTP_PKT_ERR
) {
3340 rxq
->stats
.rx_drops
++;
3344 return RX_NON_PTP_PKT
;
3348 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3349 * @adapter: the adapter
3351 * @dev: the ingress net device
3353 * Read hardware timestamp for the loopback PTP Tx event message
3355 static int t4_tx_hststamp(struct adapter
*adapter
, struct sk_buff
*skb
,
3356 struct net_device
*dev
)
3358 struct port_info
*pi
= netdev_priv(dev
);
3360 if (!is_t4(adapter
->params
.chip
) && adapter
->ptp_tx_skb
) {
3361 cxgb4_ptp_read_hwstamp(adapter
, pi
);
3369 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3370 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3371 * @rsp: Response Entry pointer into Response Queue
3372 * @gl: Gather List pointer
3374 * For adapters which support the SGE Doorbell Queue Timer facility,
3375 * we configure the Ethernet TX Queues to send CIDX Updates to the
3376 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
3377 * messages. This adds a small load to PCIe Link RX bandwidth and,
3378 * potentially, higher CPU Interrupt load, but allows us to respond
3379 * much more quickly to the CIDX Updates. This is important for
3380 * Upper Layer Software which isn't willing to have a large amount
3381 * of TX Data outstanding before receiving DMA Completions.
3383 static void t4_tx_completion_handler(struct sge_rspq
*rspq
,
3385 const struct pkt_gl
*gl
)
3387 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
3388 struct port_info
*pi
= netdev_priv(rspq
->netdev
);
3389 struct adapter
*adapter
= rspq
->adap
;
3390 struct sge
*s
= &adapter
->sge
;
3391 struct sge_eth_txq
*txq
;
3393 /* skip RSS header */
3396 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
3398 if (unlikely(opcode
== CPL_FW4_MSG
&&
3399 ((const struct cpl_fw4_msg
*)rsp
)->type
==
3402 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
3406 if (unlikely(opcode
!= CPL_SGE_EGR_UPDATE
)) {
3407 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
3412 txq
= &s
->ethtxq
[pi
->first_qset
+ rspq
->idx
];
3413 t4_sge_eth_txq_egress_update(adapter
, txq
, -1);
3417 * t4_ethrx_handler - process an ingress ethernet packet
3418 * @q: the response queue that received the packet
3419 * @rsp: the response queue descriptor holding the RX_PKT message
3420 * @si: the gather list of packet fragments
3422 * Process an ingress ethernet packet and deliver it to the stack.
3424 int t4_ethrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
3425 const struct pkt_gl
*si
)
3428 struct sk_buff
*skb
;
3429 const struct cpl_rx_pkt
*pkt
;
3430 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
3431 struct adapter
*adapter
= q
->adap
;
3432 struct sge
*s
= &q
->adap
->sge
;
3433 int cpl_trace_pkt
= is_t4(q
->adap
->params
.chip
) ?
3434 CPL_TRACE_PKT
: CPL_TRACE_PKT_T5
;
3435 u16 err_vec
, tnl_hdr_len
= 0;
3436 struct port_info
*pi
;
3439 /* If we're looking at TX Queue CIDX Update, handle that separately
3442 if (unlikely((*(u8
*)rsp
== CPL_FW4_MSG
) ||
3443 (*(u8
*)rsp
== CPL_SGE_EGR_UPDATE
))) {
3444 t4_tx_completion_handler(q
, rsp
, si
);
3448 if (unlikely(*(u8
*)rsp
== cpl_trace_pkt
))
3449 return handle_trace_pkt(q
->adap
, si
);
3451 pkt
= (const struct cpl_rx_pkt
*)rsp
;
3452 /* Compressed error vector is enabled for T6 only */
3453 if (q
->adap
->params
.tp
.rx_pkt_encap
) {
3454 err_vec
= T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt
->err_vec
));
3455 tnl_hdr_len
= T6_RX_TNLHDR_LEN_G(ntohs(pkt
->err_vec
));
3457 err_vec
= be16_to_cpu(pkt
->err_vec
);
3460 csum_ok
= pkt
->csum_calc
&& !err_vec
&&
3461 (q
->netdev
->features
& NETIF_F_RXCSUM
);
3464 rxq
->stats
.bad_rx_pkts
++;
3466 if (((pkt
->l2info
& htonl(RXF_TCP_F
)) ||
3468 (q
->netdev
->features
& NETIF_F_GRO
) && csum_ok
&& !pkt
->ip_frag
) {
3469 do_gro(rxq
, si
, pkt
, tnl_hdr_len
);
3473 skb
= cxgb4_pktgl_to_skb(si
, RX_PKT_SKB_LEN
, RX_PULL_LEN
);
3474 if (unlikely(!skb
)) {
3476 rxq
->stats
.rx_drops
++;
3479 pi
= netdev_priv(q
->netdev
);
3481 /* Handle PTP Event Rx packet */
3482 if (unlikely(pi
->ptp_enable
)) {
3483 ret
= t4_rx_hststamp(adapter
, rsp
, rxq
, skb
);
3484 if (ret
== RX_PTP_PKT_ERR
)
3488 __skb_pull(skb
, s
->pktshift
); /* remove ethernet header pad */
3490 /* Handle the PTP Event Tx Loopback packet */
3491 if (unlikely(pi
->ptp_enable
&& !ret
&&
3492 (pkt
->l2info
& htonl(RXF_UDP_F
)) &&
3493 cxgb4_ptp_is_ptp_rx(skb
))) {
3494 if (!t4_tx_hststamp(adapter
, skb
, q
->netdev
))
3498 skb
->protocol
= eth_type_trans(skb
, q
->netdev
);
3499 skb_record_rx_queue(skb
, q
->idx
);
3500 if (skb
->dev
->features
& NETIF_F_RXHASH
)
3501 skb_set_hash(skb
, (__force u32
)pkt
->rsshdr
.hash_val
,
3507 cxgb4_sgetim_to_hwtstamp(q
->adap
, skb_hwtstamps(skb
),
3509 if (csum_ok
&& (pkt
->l2info
& htonl(RXF_UDP_F
| RXF_TCP_F
))) {
3510 if (!pkt
->ip_frag
) {
3511 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3512 rxq
->stats
.rx_cso
++;
3513 } else if (pkt
->l2info
& htonl(RXF_IP_F
)) {
3514 __sum16 c
= (__force __sum16
)pkt
->csum
;
3515 skb
->csum
= csum_unfold(c
);
3518 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3519 skb
->csum_level
= 1;
3521 skb
->ip_summed
= CHECKSUM_COMPLETE
;
3523 rxq
->stats
.rx_cso
++;
3526 skb_checksum_none_assert(skb
);
3527 #ifdef CONFIG_CHELSIO_T4_FCOE
3528 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
3529 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
3531 if (!(pkt
->l2info
& cpu_to_be32(CPL_RX_PKT_FLAGS
))) {
3532 if ((pkt
->l2info
& cpu_to_be32(RXF_FCOE_F
)) &&
3533 (pi
->fcoe
.flags
& CXGB_FCOE_ENABLED
)) {
3534 if (q
->adap
->params
.tp
.rx_pkt_encap
)
3536 T6_COMPR_RXERR_SUM_F
;
3538 csum_ok
= err_vec
& RXERR_CSUM_F
;
3540 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3544 #undef CPL_RX_PKT_FLAGS
3545 #endif /* CONFIG_CHELSIO_T4_FCOE */
3548 if (unlikely(pkt
->vlan_ex
)) {
3549 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(pkt
->vlan
));
3550 rxq
->stats
.vlan_ex
++;
3552 skb_mark_napi_id(skb
, &q
->napi
);
3553 netif_receive_skb(skb
);
3558 * restore_rx_bufs - put back a packet's Rx buffers
3559 * @si: the packet gather list
3560 * @q: the SGE free list
3561 * @frags: number of FL buffers to restore
3563 * Puts back on an FL the Rx buffers associated with @si. The buffers
3564 * have already been unmapped and are left unmapped, we mark them so to
3565 * prevent further unmapping attempts.
3567 * This function undoes a series of @unmap_rx_buf calls when we find out
3568 * that the current packet can't be processed right away afterall and we
3569 * need to come back to it later. This is a very rare event and there's
3570 * no effort to make this particularly efficient.
3572 static void restore_rx_bufs(const struct pkt_gl
*si
, struct sge_fl
*q
,
3575 struct rx_sw_desc
*d
;
3579 q
->cidx
= q
->size
- 1;
3582 d
= &q
->sdesc
[q
->cidx
];
3583 d
->page
= si
->frags
[frags
].page
;
3584 d
->dma_addr
|= RX_UNMAPPED_BUF
;
3590 * is_new_response - check if a response is newly written
3591 * @r: the response descriptor
3592 * @q: the response queue
3594 * Returns true if a response descriptor contains a yet unprocessed
3597 static inline bool is_new_response(const struct rsp_ctrl
*r
,
3598 const struct sge_rspq
*q
)
3600 return (r
->type_gen
>> RSPD_GEN_S
) == q
->gen
;
3604 * rspq_next - advance to the next entry in a response queue
3607 * Updates the state of a response queue to advance it to the next entry.
3609 static inline void rspq_next(struct sge_rspq
*q
)
3611 q
->cur_desc
= (void *)q
->cur_desc
+ q
->iqe_len
;
3612 if (unlikely(++q
->cidx
== q
->size
)) {
3615 q
->cur_desc
= q
->desc
;
3620 * process_responses - process responses from an SGE response queue
3621 * @q: the ingress queue to process
3622 * @budget: how many responses can be processed in this round
3624 * Process responses from an SGE response queue up to the supplied budget.
3625 * Responses include received packets as well as control messages from FW
3628 * Additionally choose the interrupt holdoff time for the next interrupt
3629 * on this queue. If the system is under memory shortage use a fairly
3630 * long delay to help recovery.
3632 static int process_responses(struct sge_rspq
*q
, int budget
)
3635 int budget_left
= budget
;
3636 const struct rsp_ctrl
*rc
;
3637 struct sge_eth_rxq
*rxq
= container_of(q
, struct sge_eth_rxq
, rspq
);
3638 struct adapter
*adapter
= q
->adap
;
3639 struct sge
*s
= &adapter
->sge
;
3641 while (likely(budget_left
)) {
3642 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
3643 if (!is_new_response(rc
, q
)) {
3644 if (q
->flush_handler
)
3645 q
->flush_handler(q
);
3650 rsp_type
= RSPD_TYPE_G(rc
->type_gen
);
3651 if (likely(rsp_type
== RSPD_TYPE_FLBUF_X
)) {
3652 struct page_frag
*fp
;
3654 const struct rx_sw_desc
*rsd
;
3655 u32 len
= ntohl(rc
->pldbuflen_qid
), bufsz
, frags
;
3657 if (len
& RSPD_NEWBUF_F
) {
3658 if (likely(q
->offset
> 0)) {
3659 free_rx_bufs(q
->adap
, &rxq
->fl
, 1);
3662 len
= RSPD_LEN_G(len
);
3666 /* gather packet fragments */
3667 for (frags
= 0, fp
= si
.frags
; ; frags
++, fp
++) {
3668 rsd
= &rxq
->fl
.sdesc
[rxq
->fl
.cidx
];
3669 bufsz
= get_buf_size(adapter
, rsd
);
3670 fp
->page
= rsd
->page
;
3671 fp
->offset
= q
->offset
;
3672 fp
->size
= min(bufsz
, len
);
3676 unmap_rx_buf(q
->adap
, &rxq
->fl
);
3679 si
.sgetstamp
= SGE_TIMESTAMP_G(
3680 be64_to_cpu(rc
->last_flit
));
3682 * Last buffer remains mapped so explicitly make it
3683 * coherent for CPU access.
3685 dma_sync_single_for_cpu(q
->adap
->pdev_dev
,
3687 fp
->size
, DMA_FROM_DEVICE
);
3689 si
.va
= page_address(si
.frags
[0].page
) +
3693 si
.nfrags
= frags
+ 1;
3694 ret
= q
->handler(q
, q
->cur_desc
, &si
);
3695 if (likely(ret
== 0))
3696 q
->offset
+= ALIGN(fp
->size
, s
->fl_align
);
3698 restore_rx_bufs(&si
, &rxq
->fl
, frags
);
3699 } else if (likely(rsp_type
== RSPD_TYPE_CPL_X
)) {
3700 ret
= q
->handler(q
, q
->cur_desc
, NULL
);
3702 ret
= q
->handler(q
, (const __be64
*)rc
, CXGB4_MSG_AN
);
3705 if (unlikely(ret
)) {
3706 /* couldn't process descriptor, back off for recovery */
3707 q
->next_intr_params
= QINTR_TIMER_IDX_V(NOMEM_TMR_IDX
);
3715 if (q
->offset
>= 0 && fl_cap(&rxq
->fl
) - rxq
->fl
.avail
>= 16)
3716 __refill_fl(q
->adap
, &rxq
->fl
);
3717 return budget
- budget_left
;
3721 * napi_rx_handler - the NAPI handler for Rx processing
3722 * @napi: the napi instance
3723 * @budget: how many packets we can process in this round
3725 * Handler for new data events when using NAPI. This does not need any
3726 * locking or protection from interrupts as data interrupts are off at
3727 * this point and other adapter interrupts do not interfere (the latter
3728 * in not a concern at all with MSI-X as non-data interrupts then have
3729 * a separate handler).
3731 static int napi_rx_handler(struct napi_struct
*napi
, int budget
)
3733 unsigned int params
;
3734 struct sge_rspq
*q
= container_of(napi
, struct sge_rspq
, napi
);
3738 work_done
= process_responses(q
, budget
);
3739 if (likely(work_done
< budget
)) {
3742 napi_complete_done(napi
, work_done
);
3743 timer_index
= QINTR_TIMER_IDX_G(q
->next_intr_params
);
3745 if (q
->adaptive_rx
) {
3746 if (work_done
> max(timer_pkt_quota
[timer_index
],
3748 timer_index
= (timer_index
+ 1);
3750 timer_index
= timer_index
- 1;
3752 timer_index
= clamp(timer_index
, 0, SGE_TIMERREGS
- 1);
3753 q
->next_intr_params
=
3754 QINTR_TIMER_IDX_V(timer_index
) |
3756 params
= q
->next_intr_params
;
3758 params
= q
->next_intr_params
;
3759 q
->next_intr_params
= q
->intr_params
;
3762 params
= QINTR_TIMER_IDX_V(7);
3764 val
= CIDXINC_V(work_done
) | SEINTARM_V(params
);
3766 /* If we don't have access to the new User GTS (T5+), use the old
3767 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3769 if (unlikely(q
->bar2_addr
== NULL
)) {
3770 t4_write_reg(q
->adap
, MYPF_REG(SGE_PF_GTS_A
),
3771 val
| INGRESSQID_V((u32
)q
->cntxt_id
));
3773 writel(val
| INGRESSQID_V(q
->bar2_qid
),
3774 q
->bar2_addr
+ SGE_UDB_GTS
);
3780 void cxgb4_ethofld_restart(unsigned long data
)
3782 struct sge_eosw_txq
*eosw_txq
= (struct sge_eosw_txq
*)data
;
3785 spin_lock(&eosw_txq
->lock
);
3786 pktcount
= eosw_txq
->cidx
- eosw_txq
->last_cidx
;
3788 pktcount
+= eosw_txq
->ndesc
;
3791 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq
->netdev
),
3792 eosw_txq
, pktcount
);
3793 eosw_txq
->inuse
-= pktcount
;
3796 /* There may be some packets waiting for completions. So,
3797 * attempt to send these packets now.
3799 ethofld_xmit(eosw_txq
->netdev
, eosw_txq
);
3800 spin_unlock(&eosw_txq
->lock
);
3803 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
3804 * @q: the response queue that received the packet
3805 * @rsp: the response queue descriptor holding the CPL message
3806 * @si: the gather list of packet fragments
3808 * Process a ETHOFLD Tx completion. Increment the cidx here, but
3809 * free up the descriptors in a tasklet later.
3811 int cxgb4_ethofld_rx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
3812 const struct pkt_gl
*si
)
3814 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
3816 /* skip RSS header */
3819 if (opcode
== CPL_FW4_ACK
) {
3820 const struct cpl_fw4_ack
*cpl
;
3821 struct sge_eosw_txq
*eosw_txq
;
3822 struct eotid_entry
*entry
;
3823 struct sk_buff
*skb
;
3828 cpl
= (const struct cpl_fw4_ack
*)rsp
;
3829 eotid
= CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl
))) -
3830 q
->adap
->tids
.eotid_base
;
3831 entry
= cxgb4_lookup_eotid(&q
->adap
->tids
, eotid
);
3835 eosw_txq
= (struct sge_eosw_txq
*)entry
->data
;
3839 spin_lock(&eosw_txq
->lock
);
3840 credits
= cpl
->credits
;
3841 while (credits
> 0) {
3842 skb
= eosw_txq
->desc
[eosw_txq
->cidx
].skb
;
3846 if (unlikely((eosw_txq
->state
==
3847 CXGB4_EO_STATE_FLOWC_OPEN_REPLY
||
3849 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY
) &&
3850 eosw_txq
->cidx
== eosw_txq
->flowc_idx
)) {
3851 flits
= DIV_ROUND_UP(skb
->len
, 8);
3852 if (eosw_txq
->state
==
3853 CXGB4_EO_STATE_FLOWC_OPEN_REPLY
)
3854 eosw_txq
->state
= CXGB4_EO_STATE_ACTIVE
;
3856 eosw_txq
->state
= CXGB4_EO_STATE_CLOSED
;
3857 complete(&eosw_txq
->completion
);
3859 hdr_len
= eth_get_headlen(eosw_txq
->netdev
,
3862 flits
= ethofld_calc_tx_flits(q
->adap
, skb
,
3865 eosw_txq_advance_index(&eosw_txq
->cidx
, 1,
3867 wrlen16
= DIV_ROUND_UP(flits
* 8, 16);
3871 eosw_txq
->cred
+= cpl
->credits
;
3874 spin_unlock(&eosw_txq
->lock
);
3876 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
3877 * if there were packets waiting for completion.
3879 tasklet_schedule(&eosw_txq
->qresume_tsk
);
3887 * The MSI-X interrupt handler for an SGE response queue.
3889 irqreturn_t
t4_sge_intr_msix(int irq
, void *cookie
)
3891 struct sge_rspq
*q
= cookie
;
3893 napi_schedule(&q
->napi
);
3898 * Process the indirect interrupt entries in the interrupt queue and kick off
3899 * NAPI for each queue that has generated an entry.
3901 static unsigned int process_intrq(struct adapter
*adap
)
3903 unsigned int credits
;
3904 const struct rsp_ctrl
*rc
;
3905 struct sge_rspq
*q
= &adap
->sge
.intrq
;
3908 spin_lock(&adap
->sge
.intrq_lock
);
3909 for (credits
= 0; ; credits
++) {
3910 rc
= (void *)q
->cur_desc
+ (q
->iqe_len
- sizeof(*rc
));
3911 if (!is_new_response(rc
, q
))
3915 if (RSPD_TYPE_G(rc
->type_gen
) == RSPD_TYPE_INTR_X
) {
3916 unsigned int qid
= ntohl(rc
->pldbuflen_qid
);
3918 qid
-= adap
->sge
.ingr_start
;
3919 napi_schedule(&adap
->sge
.ingr_map
[qid
]->napi
);
3925 val
= CIDXINC_V(credits
) | SEINTARM_V(q
->intr_params
);
3927 /* If we don't have access to the new User GTS (T5+), use the old
3928 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3930 if (unlikely(q
->bar2_addr
== NULL
)) {
3931 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
3932 val
| INGRESSQID_V(q
->cntxt_id
));
3934 writel(val
| INGRESSQID_V(q
->bar2_qid
),
3935 q
->bar2_addr
+ SGE_UDB_GTS
);
3938 spin_unlock(&adap
->sge
.intrq_lock
);
3943 * The MSI interrupt handler, which handles data events from SGE response queues
3944 * as well as error and other async events as they all use the same MSI vector.
3946 static irqreturn_t
t4_intr_msi(int irq
, void *cookie
)
3948 struct adapter
*adap
= cookie
;
3950 if (adap
->flags
& CXGB4_MASTER_PF
)
3951 t4_slow_intr_handler(adap
);
3952 process_intrq(adap
);
3957 * Interrupt handler for legacy INTx interrupts.
3958 * Handles data events from SGE response queues as well as error and other
3959 * async events as they all use the same interrupt line.
3961 static irqreturn_t
t4_intr_intx(int irq
, void *cookie
)
3963 struct adapter
*adap
= cookie
;
3965 t4_write_reg(adap
, MYPF_REG(PCIE_PF_CLI_A
), 0);
3966 if (((adap
->flags
& CXGB4_MASTER_PF
) && t4_slow_intr_handler(adap
)) |
3967 process_intrq(adap
))
3969 return IRQ_NONE
; /* probably shared interrupt */
3973 * t4_intr_handler - select the top-level interrupt handler
3974 * @adap: the adapter
3976 * Selects the top-level interrupt handler based on the type of interrupts
3977 * (MSI-X, MSI, or INTx).
3979 irq_handler_t
t4_intr_handler(struct adapter
*adap
)
3981 if (adap
->flags
& CXGB4_USING_MSIX
)
3982 return t4_sge_intr_msix
;
3983 if (adap
->flags
& CXGB4_USING_MSI
)
3985 return t4_intr_intx
;
3988 static void sge_rx_timer_cb(struct timer_list
*t
)
3992 struct adapter
*adap
= from_timer(adap
, t
, sge
.rx_timer
);
3993 struct sge
*s
= &adap
->sge
;
3995 for (i
= 0; i
< BITS_TO_LONGS(s
->egr_sz
); i
++)
3996 for (m
= s
->starving_fl
[i
]; m
; m
&= m
- 1) {
3997 struct sge_eth_rxq
*rxq
;
3998 unsigned int id
= __ffs(m
) + i
* BITS_PER_LONG
;
3999 struct sge_fl
*fl
= s
->egr_map
[id
];
4001 clear_bit(id
, s
->starving_fl
);
4002 smp_mb__after_atomic();
4004 if (fl_starving(adap
, fl
)) {
4005 rxq
= container_of(fl
, struct sge_eth_rxq
, fl
);
4006 if (napi_reschedule(&rxq
->rspq
.napi
))
4009 set_bit(id
, s
->starving_fl
);
4012 /* The remainder of the SGE RX Timer Callback routine is dedicated to
4013 * global Master PF activities like checking for chip ingress stalls,
4016 if (!(adap
->flags
& CXGB4_MASTER_PF
))
4019 t4_idma_monitor(adap
, &s
->idma_monitor
, HZ
, RX_QCHECK_PERIOD
);
4022 mod_timer(&s
->rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
4025 static void sge_tx_timer_cb(struct timer_list
*t
)
4027 struct adapter
*adap
= from_timer(adap
, t
, sge
.tx_timer
);
4028 struct sge
*s
= &adap
->sge
;
4029 unsigned long m
, period
;
4030 unsigned int i
, budget
;
4032 for (i
= 0; i
< BITS_TO_LONGS(s
->egr_sz
); i
++)
4033 for (m
= s
->txq_maperr
[i
]; m
; m
&= m
- 1) {
4034 unsigned long id
= __ffs(m
) + i
* BITS_PER_LONG
;
4035 struct sge_uld_txq
*txq
= s
->egr_map
[id
];
4037 clear_bit(id
, s
->txq_maperr
);
4038 tasklet_schedule(&txq
->qresume_tsk
);
4041 if (!is_t4(adap
->params
.chip
)) {
4042 struct sge_eth_txq
*q
= &s
->ptptxq
;
4045 spin_lock(&adap
->ptp_lock
);
4046 avail
= reclaimable(&q
->q
);
4049 free_tx_desc(adap
, &q
->q
, avail
, false);
4050 q
->q
.in_use
-= avail
;
4052 spin_unlock(&adap
->ptp_lock
);
4055 budget
= MAX_TIMER_TX_RECLAIM
;
4056 i
= s
->ethtxq_rover
;
4058 budget
-= t4_sge_eth_txq_egress_update(adap
, &s
->ethtxq
[i
],
4063 if (++i
>= s
->ethqsets
)
4065 } while (i
!= s
->ethtxq_rover
);
4066 s
->ethtxq_rover
= i
;
4069 /* If we found too many reclaimable packets schedule a timer
4070 * in the near future to continue where we left off.
4074 /* We reclaimed all reclaimable TX Descriptors, so reschedule
4075 * at the normal period.
4077 period
= TX_QCHECK_PERIOD
;
4080 mod_timer(&s
->tx_timer
, jiffies
+ period
);
4084 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4085 * @adapter: the adapter
4086 * @qid: the SGE Queue ID
4087 * @qtype: the SGE Queue Type (Egress or Ingress)
4088 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4090 * Returns the BAR2 address for the SGE Queue Registers associated with
4091 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
4092 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
4093 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
4094 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
4096 static void __iomem
*bar2_address(struct adapter
*adapter
,
4098 enum t4_bar2_qtype qtype
,
4099 unsigned int *pbar2_qid
)
4104 ret
= t4_bar2_sge_qregs(adapter
, qid
, qtype
, 0,
4105 &bar2_qoffset
, pbar2_qid
);
4109 return adapter
->bar2
+ bar2_qoffset
;
4112 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4113 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4115 int t4_sge_alloc_rxq(struct adapter
*adap
, struct sge_rspq
*iq
, bool fwevtq
,
4116 struct net_device
*dev
, int intr_idx
,
4117 struct sge_fl
*fl
, rspq_handler_t hnd
,
4118 rspq_flush_handler_t flush_hnd
, int cong
)
4122 struct sge
*s
= &adap
->sge
;
4123 struct port_info
*pi
= netdev_priv(dev
);
4124 int relaxed
= !(adap
->flags
& CXGB4_ROOT_NO_RELAXED_ORDERING
);
4126 /* Size needs to be multiple of 16, including status entry. */
4127 iq
->size
= roundup(iq
->size
, 16);
4129 iq
->desc
= alloc_ring(adap
->pdev_dev
, iq
->size
, iq
->iqe_len
, 0,
4130 &iq
->phys_addr
, NULL
, 0,
4131 dev_to_node(adap
->pdev_dev
));
4135 memset(&c
, 0, sizeof(c
));
4136 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_IQ_CMD
) | FW_CMD_REQUEST_F
|
4137 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
4138 FW_IQ_CMD_PFN_V(adap
->pf
) | FW_IQ_CMD_VFN_V(0));
4139 c
.alloc_to_len16
= htonl(FW_IQ_CMD_ALLOC_F
| FW_IQ_CMD_IQSTART_F
|
4141 c
.type_to_iqandstindex
= htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP
) |
4142 FW_IQ_CMD_IQASYNCH_V(fwevtq
) | FW_IQ_CMD_VIID_V(pi
->viid
) |
4143 FW_IQ_CMD_IQANDST_V(intr_idx
< 0) |
4144 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X
) |
4145 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx
>= 0 ? intr_idx
:
4147 c
.iqdroprss_to_iqesize
= htons(FW_IQ_CMD_IQPCIECH_V(pi
->tx_chan
) |
4148 FW_IQ_CMD_IQGTSMODE_F
|
4149 FW_IQ_CMD_IQINTCNTTHRESH_V(iq
->pktcnt_idx
) |
4150 FW_IQ_CMD_IQESIZE_V(ilog2(iq
->iqe_len
) - 4));
4151 c
.iqsize
= htons(iq
->size
);
4152 c
.iqaddr
= cpu_to_be64(iq
->phys_addr
);
4154 c
.iqns_to_fl0congen
= htonl(FW_IQ_CMD_IQFLINTCONGEN_F
|
4155 FW_IQ_CMD_IQTYPE_V(cong
? FW_IQ_IQTYPE_NIC
4156 : FW_IQ_IQTYPE_OFLD
));
4159 unsigned int chip_ver
=
4160 CHELSIO_CHIP_VERSION(adap
->params
.chip
);
4162 /* Allocate the ring for the hardware free list (with space
4163 * for its status page) along with the associated software
4164 * descriptor ring. The free list size needs to be a multiple
4165 * of the Egress Queue Unit and at least 2 Egress Units larger
4166 * than the SGE's Egress Congrestion Threshold
4167 * (fl_starve_thres - 1).
4169 if (fl
->size
< s
->fl_starve_thres
- 1 + 2 * 8)
4170 fl
->size
= s
->fl_starve_thres
- 1 + 2 * 8;
4171 fl
->size
= roundup(fl
->size
, 8);
4172 fl
->desc
= alloc_ring(adap
->pdev_dev
, fl
->size
, sizeof(__be64
),
4173 sizeof(struct rx_sw_desc
), &fl
->addr
,
4174 &fl
->sdesc
, s
->stat_len
,
4175 dev_to_node(adap
->pdev_dev
));
4179 flsz
= fl
->size
/ 8 + s
->stat_len
/ sizeof(struct tx_desc
);
4180 c
.iqns_to_fl0congen
|= htonl(FW_IQ_CMD_FL0PACKEN_F
|
4181 FW_IQ_CMD_FL0FETCHRO_V(relaxed
) |
4182 FW_IQ_CMD_FL0DATARO_V(relaxed
) |
4183 FW_IQ_CMD_FL0PADEN_F
);
4185 c
.iqns_to_fl0congen
|=
4186 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong
) |
4187 FW_IQ_CMD_FL0CONGCIF_F
|
4188 FW_IQ_CMD_FL0CONGEN_F
);
4189 /* In T6, for egress queue type FL there is internal overhead
4190 * of 16B for header going into FLM module. Hence the maximum
4191 * allowed burst size is 448 bytes. For T4/T5, the hardware
4192 * doesn't coalesce fetch requests if more than 64 bytes of
4193 * Free List pointers are provided, so we use a 128-byte Fetch
4194 * Burst Minimum there (T6 implements coalescing so we can use
4195 * the smaller 64-byte value there).
4197 c
.fl0dcaen_to_fl0cidxfthresh
=
4198 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver
<= CHELSIO_T5
?
4199 FETCHBURSTMIN_128B_X
:
4200 FETCHBURSTMIN_64B_T6_X
) |
4201 FW_IQ_CMD_FL0FBMAX_V((chip_ver
<= CHELSIO_T5
) ?
4202 FETCHBURSTMAX_512B_X
:
4203 FETCHBURSTMAX_256B_X
));
4204 c
.fl0size
= htons(flsz
);
4205 c
.fl0addr
= cpu_to_be64(fl
->addr
);
4208 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
4212 netif_napi_add(dev
, &iq
->napi
, napi_rx_handler
, 64);
4213 iq
->cur_desc
= iq
->desc
;
4216 iq
->next_intr_params
= iq
->intr_params
;
4217 iq
->cntxt_id
= ntohs(c
.iqid
);
4218 iq
->abs_id
= ntohs(c
.physiqid
);
4219 iq
->bar2_addr
= bar2_address(adap
,
4221 T4_BAR2_QTYPE_INGRESS
,
4223 iq
->size
--; /* subtract status entry */
4226 iq
->flush_handler
= flush_hnd
;
4228 memset(&iq
->lro_mgr
, 0, sizeof(struct t4_lro_mgr
));
4229 skb_queue_head_init(&iq
->lro_mgr
.lroq
);
4231 /* set offset to -1 to distinguish ingress queues without FL */
4232 iq
->offset
= fl
? 0 : -1;
4234 adap
->sge
.ingr_map
[iq
->cntxt_id
- adap
->sge
.ingr_start
] = iq
;
4237 fl
->cntxt_id
= ntohs(c
.fl0id
);
4238 fl
->avail
= fl
->pend_cred
= 0;
4239 fl
->pidx
= fl
->cidx
= 0;
4240 fl
->alloc_failed
= fl
->large_alloc_failed
= fl
->starving
= 0;
4241 adap
->sge
.egr_map
[fl
->cntxt_id
- adap
->sge
.egr_start
] = fl
;
4243 /* Note, we must initialize the BAR2 Free List User Doorbell
4244 * information before refilling the Free List!
4246 fl
->bar2_addr
= bar2_address(adap
,
4248 T4_BAR2_QTYPE_EGRESS
,
4250 refill_fl(adap
, fl
, fl_cap(fl
), GFP_KERNEL
);
4253 /* For T5 and later we attempt to set up the Congestion Manager values
4254 * of the new RX Ethernet Queue. This should really be handled by
4255 * firmware because it's more complex than any host driver wants to
4256 * get involved with and it's different per chip and this is almost
4257 * certainly wrong. Firmware would be wrong as well, but it would be
4258 * a lot easier to fix in one place ... For now we do something very
4259 * simple (and hopefully less wrong).
4261 if (!is_t4(adap
->params
.chip
) && cong
>= 0) {
4262 u32 param
, val
, ch_map
= 0;
4264 u16 cng_ch_bits_log
= adap
->params
.arch
.cng_ch_bits_log
;
4266 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
4267 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT
) |
4268 FW_PARAMS_PARAM_YZ_V(iq
->cntxt_id
));
4270 val
= CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X
);
4273 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X
);
4274 for (i
= 0; i
< 4; i
++) {
4275 if (cong
& (1 << i
))
4276 ch_map
|= 1 << (i
<< cng_ch_bits_log
);
4278 val
|= CONMCTXT_CNGCHMAP_V(ch_map
);
4280 ret
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
4283 dev_warn(adap
->pdev_dev
, "Failed to set Congestion"
4284 " Manager Context for Ingress Queue %d: %d\n",
4285 iq
->cntxt_id
, -ret
);
4294 dma_free_coherent(adap
->pdev_dev
, iq
->size
* iq
->iqe_len
,
4295 iq
->desc
, iq
->phys_addr
);
4298 if (fl
&& fl
->desc
) {
4301 dma_free_coherent(adap
->pdev_dev
, flsz
* sizeof(struct tx_desc
),
4302 fl
->desc
, fl
->addr
);
4308 static void init_txq(struct adapter
*adap
, struct sge_txq
*q
, unsigned int id
)
4311 q
->bar2_addr
= bar2_address(adap
,
4313 T4_BAR2_QTYPE_EGRESS
,
4316 q
->cidx
= q
->pidx
= 0;
4317 q
->stops
= q
->restarts
= 0;
4318 q
->stat
= (void *)&q
->desc
[q
->size
];
4319 spin_lock_init(&q
->db_lock
);
4320 adap
->sge
.egr_map
[id
- adap
->sge
.egr_start
] = q
;
4324 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4325 * @adap: the adapter
4326 * @txq: the SGE Ethernet TX Queue to initialize
4327 * @dev: the Linux Network Device
4328 * @netdevq: the corresponding Linux TX Queue
4329 * @iqid: the Ingress Queue to which to deliver CIDX Update messages
4330 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4332 int t4_sge_alloc_eth_txq(struct adapter
*adap
, struct sge_eth_txq
*txq
,
4333 struct net_device
*dev
, struct netdev_queue
*netdevq
,
4334 unsigned int iqid
, u8 dbqt
)
4336 unsigned int chip_ver
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
4337 struct port_info
*pi
= netdev_priv(dev
);
4338 struct sge
*s
= &adap
->sge
;
4339 struct fw_eq_eth_cmd c
;
4342 /* Add status entries */
4343 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
4345 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, txq
->q
.size
,
4346 sizeof(struct tx_desc
), sizeof(struct tx_sw_desc
),
4347 &txq
->q
.phys_addr
, &txq
->q
.sdesc
, s
->stat_len
,
4348 netdev_queue_numa_node_read(netdevq
));
4352 memset(&c
, 0, sizeof(c
));
4353 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD
) | FW_CMD_REQUEST_F
|
4354 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
4355 FW_EQ_ETH_CMD_PFN_V(adap
->pf
) |
4356 FW_EQ_ETH_CMD_VFN_V(0));
4357 c
.alloc_to_len16
= htonl(FW_EQ_ETH_CMD_ALLOC_F
|
4358 FW_EQ_ETH_CMD_EQSTART_F
| FW_LEN16(c
));
4360 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
4361 * mechanism, we use Ingress Queue messages for Hardware Consumer
4362 * Index Updates on the TX Queue. Otherwise we have the Hardware
4363 * write the CIDX Updates into the Status Page at the end of the
4366 c
.autoequiqe_to_viid
= htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F
|
4367 FW_EQ_ETH_CMD_VIID_V(pi
->viid
));
4369 c
.fetchszm_to_iqid
=
4370 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
4371 FW_EQ_ETH_CMD_PCIECHN_V(pi
->tx_chan
) |
4372 FW_EQ_ETH_CMD_FETCHRO_F
| FW_EQ_ETH_CMD_IQID_V(iqid
));
4374 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
4376 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver
<= CHELSIO_T5
4377 ? FETCHBURSTMIN_64B_X
4378 : FETCHBURSTMIN_64B_T6_X
) |
4379 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
4380 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
4381 FW_EQ_ETH_CMD_EQSIZE_V(nentries
));
4383 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
4385 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
4386 * currently configured Timer Index. THis can be changed later via an
4387 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
4388 * Doorbell Queue mode is currently automatically enabled in the
4389 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
4393 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F
|
4394 FW_EQ_ETH_CMD_TIMERIX_V(txq
->dbqtimerix
));
4396 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
4398 kfree(txq
->q
.sdesc
);
4399 txq
->q
.sdesc
= NULL
;
4400 dma_free_coherent(adap
->pdev_dev
,
4401 nentries
* sizeof(struct tx_desc
),
4402 txq
->q
.desc
, txq
->q
.phys_addr
);
4407 txq
->q
.q_type
= CXGB4_TXQ_ETH
;
4408 init_txq(adap
, &txq
->q
, FW_EQ_ETH_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
4414 txq
->mapping_err
= 0;
4420 int t4_sge_alloc_ctrl_txq(struct adapter
*adap
, struct sge_ctrl_txq
*txq
,
4421 struct net_device
*dev
, unsigned int iqid
,
4422 unsigned int cmplqid
)
4424 unsigned int chip_ver
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
4425 struct port_info
*pi
= netdev_priv(dev
);
4426 struct sge
*s
= &adap
->sge
;
4427 struct fw_eq_ctrl_cmd c
;
4430 /* Add status entries */
4431 nentries
= txq
->q
.size
+ s
->stat_len
/ sizeof(struct tx_desc
);
4433 txq
->q
.desc
= alloc_ring(adap
->pdev_dev
, nentries
,
4434 sizeof(struct tx_desc
), 0, &txq
->q
.phys_addr
,
4435 NULL
, 0, dev_to_node(adap
->pdev_dev
));
4439 c
.op_to_vfn
= htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD
) | FW_CMD_REQUEST_F
|
4440 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
4441 FW_EQ_CTRL_CMD_PFN_V(adap
->pf
) |
4442 FW_EQ_CTRL_CMD_VFN_V(0));
4443 c
.alloc_to_len16
= htonl(FW_EQ_CTRL_CMD_ALLOC_F
|
4444 FW_EQ_CTRL_CMD_EQSTART_F
| FW_LEN16(c
));
4445 c
.cmpliqid_eqid
= htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid
));
4446 c
.physeqid_pkd
= htonl(0);
4447 c
.fetchszm_to_iqid
=
4448 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
4449 FW_EQ_CTRL_CMD_PCIECHN_V(pi
->tx_chan
) |
4450 FW_EQ_CTRL_CMD_FETCHRO_F
| FW_EQ_CTRL_CMD_IQID_V(iqid
));
4452 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver
<= CHELSIO_T5
4453 ? FETCHBURSTMIN_64B_X
4454 : FETCHBURSTMIN_64B_T6_X
) |
4455 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
4456 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
4457 FW_EQ_CTRL_CMD_EQSIZE_V(nentries
));
4458 c
.eqaddr
= cpu_to_be64(txq
->q
.phys_addr
);
4460 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
4462 dma_free_coherent(adap
->pdev_dev
,
4463 nentries
* sizeof(struct tx_desc
),
4464 txq
->q
.desc
, txq
->q
.phys_addr
);
4469 txq
->q
.q_type
= CXGB4_TXQ_CTRL
;
4470 init_txq(adap
, &txq
->q
, FW_EQ_CTRL_CMD_EQID_G(ntohl(c
.cmpliqid_eqid
)));
4472 skb_queue_head_init(&txq
->sendq
);
4473 tasklet_init(&txq
->qresume_tsk
, restart_ctrlq
, (unsigned long)txq
);
4478 int t4_sge_mod_ctrl_txq(struct adapter
*adap
, unsigned int eqid
,
4479 unsigned int cmplqid
)
4483 param
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
4484 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL
) |
4485 FW_PARAMS_PARAM_YZ_V(eqid
));
4487 return t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, ¶m
, &val
);
4490 static int t4_sge_alloc_ofld_txq(struct adapter
*adap
, struct sge_txq
*q
,
4491 struct net_device
*dev
, u32 cmd
, u32 iqid
)
4493 unsigned int chip_ver
= CHELSIO_CHIP_VERSION(adap
->params
.chip
);
4494 struct port_info
*pi
= netdev_priv(dev
);
4495 struct sge
*s
= &adap
->sge
;
4496 struct fw_eq_ofld_cmd c
;
4497 u32 fb_min
, nentries
;
4500 /* Add status entries */
4501 nentries
= q
->size
+ s
->stat_len
/ sizeof(struct tx_desc
);
4502 q
->desc
= alloc_ring(adap
->pdev_dev
, q
->size
, sizeof(struct tx_desc
),
4503 sizeof(struct tx_sw_desc
), &q
->phys_addr
,
4504 &q
->sdesc
, s
->stat_len
, NUMA_NO_NODE
);
4508 if (chip_ver
<= CHELSIO_T5
)
4509 fb_min
= FETCHBURSTMIN_64B_X
;
4511 fb_min
= FETCHBURSTMIN_64B_T6_X
;
4513 memset(&c
, 0, sizeof(c
));
4514 c
.op_to_vfn
= htonl(FW_CMD_OP_V(cmd
) | FW_CMD_REQUEST_F
|
4515 FW_CMD_WRITE_F
| FW_CMD_EXEC_F
|
4516 FW_EQ_OFLD_CMD_PFN_V(adap
->pf
) |
4517 FW_EQ_OFLD_CMD_VFN_V(0));
4518 c
.alloc_to_len16
= htonl(FW_EQ_OFLD_CMD_ALLOC_F
|
4519 FW_EQ_OFLD_CMD_EQSTART_F
| FW_LEN16(c
));
4520 c
.fetchszm_to_iqid
=
4521 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X
) |
4522 FW_EQ_OFLD_CMD_PCIECHN_V(pi
->tx_chan
) |
4523 FW_EQ_OFLD_CMD_FETCHRO_F
| FW_EQ_OFLD_CMD_IQID_V(iqid
));
4525 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min
) |
4526 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X
) |
4527 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X
) |
4528 FW_EQ_OFLD_CMD_EQSIZE_V(nentries
));
4529 c
.eqaddr
= cpu_to_be64(q
->phys_addr
);
4531 ret
= t4_wr_mbox(adap
, adap
->mbox
, &c
, sizeof(c
), &c
);
4535 dma_free_coherent(adap
->pdev_dev
,
4536 nentries
* sizeof(struct tx_desc
),
4537 q
->desc
, q
->phys_addr
);
4542 init_txq(adap
, q
, FW_EQ_OFLD_CMD_EQID_G(ntohl(c
.eqid_pkd
)));
4546 int t4_sge_alloc_uld_txq(struct adapter
*adap
, struct sge_uld_txq
*txq
,
4547 struct net_device
*dev
, unsigned int iqid
,
4548 unsigned int uld_type
)
4550 u32 cmd
= FW_EQ_OFLD_CMD
;
4553 if (unlikely(uld_type
== CXGB4_TX_CRYPTO
))
4554 cmd
= FW_EQ_CTRL_CMD
;
4556 ret
= t4_sge_alloc_ofld_txq(adap
, &txq
->q
, dev
, cmd
, iqid
);
4560 txq
->q
.q_type
= CXGB4_TXQ_ULD
;
4562 skb_queue_head_init(&txq
->sendq
);
4563 tasklet_init(&txq
->qresume_tsk
, restart_ofldq
, (unsigned long)txq
);
4565 txq
->mapping_err
= 0;
4569 int t4_sge_alloc_ethofld_txq(struct adapter
*adap
, struct sge_eohw_txq
*txq
,
4570 struct net_device
*dev
, u32 iqid
)
4574 ret
= t4_sge_alloc_ofld_txq(adap
, &txq
->q
, dev
, FW_EQ_OFLD_CMD
, iqid
);
4578 txq
->q
.q_type
= CXGB4_TXQ_ULD
;
4579 spin_lock_init(&txq
->lock
);
4585 txq
->mapping_err
= 0;
4589 void free_txq(struct adapter
*adap
, struct sge_txq
*q
)
4591 struct sge
*s
= &adap
->sge
;
4593 dma_free_coherent(adap
->pdev_dev
,
4594 q
->size
* sizeof(struct tx_desc
) + s
->stat_len
,
4595 q
->desc
, q
->phys_addr
);
4601 void free_rspq_fl(struct adapter
*adap
, struct sge_rspq
*rq
,
4604 struct sge
*s
= &adap
->sge
;
4605 unsigned int fl_id
= fl
? fl
->cntxt_id
: 0xffff;
4607 adap
->sge
.ingr_map
[rq
->cntxt_id
- adap
->sge
.ingr_start
] = NULL
;
4608 t4_iq_free(adap
, adap
->mbox
, adap
->pf
, 0, FW_IQ_TYPE_FL_INT_CAP
,
4609 rq
->cntxt_id
, fl_id
, 0xffff);
4610 dma_free_coherent(adap
->pdev_dev
, (rq
->size
+ 1) * rq
->iqe_len
,
4611 rq
->desc
, rq
->phys_addr
);
4612 netif_napi_del(&rq
->napi
);
4614 rq
->cntxt_id
= rq
->abs_id
= 0;
4618 free_rx_bufs(adap
, fl
, fl
->avail
);
4619 dma_free_coherent(adap
->pdev_dev
, fl
->size
* 8 + s
->stat_len
,
4620 fl
->desc
, fl
->addr
);
4629 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4630 * @adap: the adapter
4631 * @n: number of queues
4632 * @q: pointer to first queue
4634 * Release the resources of a consecutive block of offload Rx queues.
4636 void t4_free_ofld_rxqs(struct adapter
*adap
, int n
, struct sge_ofld_rxq
*q
)
4638 for ( ; n
; n
--, q
++)
4640 free_rspq_fl(adap
, &q
->rspq
,
4641 q
->fl
.size
? &q
->fl
: NULL
);
4644 void t4_sge_free_ethofld_txq(struct adapter
*adap
, struct sge_eohw_txq
*txq
)
4647 t4_ofld_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
4649 free_tx_desc(adap
, &txq
->q
, txq
->q
.in_use
, false);
4650 kfree(txq
->q
.sdesc
);
4651 free_txq(adap
, &txq
->q
);
4656 * t4_free_sge_resources - free SGE resources
4657 * @adap: the adapter
4659 * Frees resources used by the SGE queue sets.
4661 void t4_free_sge_resources(struct adapter
*adap
)
4664 struct sge_eth_rxq
*eq
;
4665 struct sge_eth_txq
*etq
;
4667 /* stop all Rx queues in order to start them draining */
4668 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++) {
4669 eq
= &adap
->sge
.ethrxq
[i
];
4671 t4_iq_stop(adap
, adap
->mbox
, adap
->pf
, 0,
4672 FW_IQ_TYPE_FL_INT_CAP
,
4674 eq
->fl
.size
? eq
->fl
.cntxt_id
: 0xffff,
4678 /* clean up Ethernet Tx/Rx queues */
4679 for (i
= 0; i
< adap
->sge
.ethqsets
; i
++) {
4680 eq
= &adap
->sge
.ethrxq
[i
];
4682 free_rspq_fl(adap
, &eq
->rspq
,
4683 eq
->fl
.size
? &eq
->fl
: NULL
);
4685 cxgb4_free_msix_idx_in_bmap(adap
, eq
->msix
->idx
);
4689 etq
= &adap
->sge
.ethtxq
[i
];
4691 t4_eth_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
4693 __netif_tx_lock_bh(etq
->txq
);
4694 free_tx_desc(adap
, &etq
->q
, etq
->q
.in_use
, true);
4695 __netif_tx_unlock_bh(etq
->txq
);
4696 kfree(etq
->q
.sdesc
);
4697 free_txq(adap
, &etq
->q
);
4701 /* clean up control Tx queues */
4702 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ctrlq
); i
++) {
4703 struct sge_ctrl_txq
*cq
= &adap
->sge
.ctrlq
[i
];
4706 tasklet_kill(&cq
->qresume_tsk
);
4707 t4_ctrl_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
4709 __skb_queue_purge(&cq
->sendq
);
4710 free_txq(adap
, &cq
->q
);
4714 if (adap
->sge
.fw_evtq
.desc
) {
4715 free_rspq_fl(adap
, &adap
->sge
.fw_evtq
, NULL
);
4716 if (adap
->sge
.fwevtq_msix_idx
>= 0)
4717 cxgb4_free_msix_idx_in_bmap(adap
,
4718 adap
->sge
.fwevtq_msix_idx
);
4721 if (adap
->sge
.nd_msix_idx
>= 0)
4722 cxgb4_free_msix_idx_in_bmap(adap
, adap
->sge
.nd_msix_idx
);
4724 if (adap
->sge
.intrq
.desc
)
4725 free_rspq_fl(adap
, &adap
->sge
.intrq
, NULL
);
4727 if (!is_t4(adap
->params
.chip
)) {
4728 etq
= &adap
->sge
.ptptxq
;
4730 t4_eth_eq_free(adap
, adap
->mbox
, adap
->pf
, 0,
4732 spin_lock_bh(&adap
->ptp_lock
);
4733 free_tx_desc(adap
, &etq
->q
, etq
->q
.in_use
, true);
4734 spin_unlock_bh(&adap
->ptp_lock
);
4735 kfree(etq
->q
.sdesc
);
4736 free_txq(adap
, &etq
->q
);
4740 /* clear the reverse egress queue map */
4741 memset(adap
->sge
.egr_map
, 0,
4742 adap
->sge
.egr_sz
* sizeof(*adap
->sge
.egr_map
));
4745 void t4_sge_start(struct adapter
*adap
)
4747 adap
->sge
.ethtxq_rover
= 0;
4748 mod_timer(&adap
->sge
.rx_timer
, jiffies
+ RX_QCHECK_PERIOD
);
4749 mod_timer(&adap
->sge
.tx_timer
, jiffies
+ TX_QCHECK_PERIOD
);
4753 * t4_sge_stop - disable SGE operation
4754 * @adap: the adapter
4756 * Stop tasklets and timers associated with the DMA engine. Note that
4757 * this is effective only if measures have been taken to disable any HW
4758 * events that may restart them.
4760 void t4_sge_stop(struct adapter
*adap
)
4763 struct sge
*s
= &adap
->sge
;
4765 if (in_interrupt()) /* actions below require waiting */
4768 if (s
->rx_timer
.function
)
4769 del_timer_sync(&s
->rx_timer
);
4770 if (s
->tx_timer
.function
)
4771 del_timer_sync(&s
->tx_timer
);
4773 if (is_offload(adap
)) {
4774 struct sge_uld_txq_info
*txq_info
;
4776 txq_info
= adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
4778 struct sge_uld_txq
*txq
= txq_info
->uldtxq
;
4780 for_each_ofldtxq(&adap
->sge
, i
) {
4782 tasklet_kill(&txq
->qresume_tsk
);
4787 if (is_pci_uld(adap
)) {
4788 struct sge_uld_txq_info
*txq_info
;
4790 txq_info
= adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
4792 struct sge_uld_txq
*txq
= txq_info
->uldtxq
;
4794 for_each_ofldtxq(&adap
->sge
, i
) {
4796 tasklet_kill(&txq
->qresume_tsk
);
4801 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++) {
4802 struct sge_ctrl_txq
*cq
= &s
->ctrlq
[i
];
4805 tasklet_kill(&cq
->qresume_tsk
);
4810 * t4_sge_init_soft - grab core SGE values needed by SGE code
4811 * @adap: the adapter
4813 * We need to grab the SGE operating parameters that we need to have
4814 * in order to do our job and make sure we can live with them.
4817 static int t4_sge_init_soft(struct adapter
*adap
)
4819 struct sge
*s
= &adap
->sge
;
4820 u32 fl_small_pg
, fl_large_pg
, fl_small_mtu
, fl_large_mtu
;
4821 u32 timer_value_0_and_1
, timer_value_2_and_3
, timer_value_4_and_5
;
4822 u32 ingress_rx_threshold
;
4825 * Verify that CPL messages are going to the Ingress Queue for
4826 * process_responses() and that only packet data is going to the
4829 if ((t4_read_reg(adap
, SGE_CONTROL_A
) & RXPKTCPLMODE_F
) !=
4830 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X
)) {
4831 dev_err(adap
->pdev_dev
, "bad SGE CPL MODE\n");
4836 * Validate the Host Buffer Register Array indices that we want to
4839 * XXX Note that we should really read through the Host Buffer Size
4840 * XXX register array and find the indices of the Buffer Sizes which
4841 * XXX meet our needs!
4843 #define READ_FL_BUF(x) \
4844 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
4846 fl_small_pg
= READ_FL_BUF(RX_SMALL_PG_BUF
);
4847 fl_large_pg
= READ_FL_BUF(RX_LARGE_PG_BUF
);
4848 fl_small_mtu
= READ_FL_BUF(RX_SMALL_MTU_BUF
);
4849 fl_large_mtu
= READ_FL_BUF(RX_LARGE_MTU_BUF
);
4851 /* We only bother using the Large Page logic if the Large Page Buffer
4852 * is larger than our Page Size Buffer.
4854 if (fl_large_pg
<= fl_small_pg
)
4859 /* The Page Size Buffer must be exactly equal to our Page Size and the
4860 * Large Page Size Buffer should be 0 (per above) or a power of 2.
4862 if (fl_small_pg
!= PAGE_SIZE
||
4863 (fl_large_pg
& (fl_large_pg
-1)) != 0) {
4864 dev_err(adap
->pdev_dev
, "bad SGE FL page buffer sizes [%d, %d]\n",
4865 fl_small_pg
, fl_large_pg
);
4869 s
->fl_pg_order
= ilog2(fl_large_pg
) - PAGE_SHIFT
;
4871 if (fl_small_mtu
< FL_MTU_SMALL_BUFSIZE(adap
) ||
4872 fl_large_mtu
< FL_MTU_LARGE_BUFSIZE(adap
)) {
4873 dev_err(adap
->pdev_dev
, "bad SGE FL MTU sizes [%d, %d]\n",
4874 fl_small_mtu
, fl_large_mtu
);
4879 * Retrieve our RX interrupt holdoff timer values and counter
4880 * threshold values from the SGE parameters.
4882 timer_value_0_and_1
= t4_read_reg(adap
, SGE_TIMER_VALUE_0_AND_1_A
);
4883 timer_value_2_and_3
= t4_read_reg(adap
, SGE_TIMER_VALUE_2_AND_3_A
);
4884 timer_value_4_and_5
= t4_read_reg(adap
, SGE_TIMER_VALUE_4_AND_5_A
);
4885 s
->timer_val
[0] = core_ticks_to_us(adap
,
4886 TIMERVALUE0_G(timer_value_0_and_1
));
4887 s
->timer_val
[1] = core_ticks_to_us(adap
,
4888 TIMERVALUE1_G(timer_value_0_and_1
));
4889 s
->timer_val
[2] = core_ticks_to_us(adap
,
4890 TIMERVALUE2_G(timer_value_2_and_3
));
4891 s
->timer_val
[3] = core_ticks_to_us(adap
,
4892 TIMERVALUE3_G(timer_value_2_and_3
));
4893 s
->timer_val
[4] = core_ticks_to_us(adap
,
4894 TIMERVALUE4_G(timer_value_4_and_5
));
4895 s
->timer_val
[5] = core_ticks_to_us(adap
,
4896 TIMERVALUE5_G(timer_value_4_and_5
));
4898 ingress_rx_threshold
= t4_read_reg(adap
, SGE_INGRESS_RX_THRESHOLD_A
);
4899 s
->counter_val
[0] = THRESHOLD_0_G(ingress_rx_threshold
);
4900 s
->counter_val
[1] = THRESHOLD_1_G(ingress_rx_threshold
);
4901 s
->counter_val
[2] = THRESHOLD_2_G(ingress_rx_threshold
);
4902 s
->counter_val
[3] = THRESHOLD_3_G(ingress_rx_threshold
);
4908 * t4_sge_init - initialize SGE
4909 * @adap: the adapter
4911 * Perform low-level SGE code initialization needed every time after a
4914 int t4_sge_init(struct adapter
*adap
)
4916 struct sge
*s
= &adap
->sge
;
4917 u32 sge_control
, sge_conm_ctrl
;
4918 int ret
, egress_threshold
;
4921 * Ingress Padding Boundary and Egress Status Page Size are set up by
4922 * t4_fixup_host_params().
4924 sge_control
= t4_read_reg(adap
, SGE_CONTROL_A
);
4925 s
->pktshift
= PKTSHIFT_G(sge_control
);
4926 s
->stat_len
= (sge_control
& EGRSTATUSPAGESIZE_F
) ? 128 : 64;
4928 s
->fl_align
= t4_fl_pkt_align(adap
);
4929 ret
= t4_sge_init_soft(adap
);
4934 * A FL with <= fl_starve_thres buffers is starving and a periodic
4935 * timer will attempt to refill it. This needs to be larger than the
4936 * SGE's Egress Congestion Threshold. If it isn't, then we can get
4937 * stuck waiting for new packets while the SGE is waiting for us to
4938 * give it more Free List entries. (Note that the SGE's Egress
4939 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
4940 * there was only a single field to control this. For T5 there's the
4941 * original field which now only applies to Unpacked Mode Free List
4942 * buffers and a new field which only applies to Packed Mode Free List
4945 sge_conm_ctrl
= t4_read_reg(adap
, SGE_CONM_CTRL_A
);
4946 switch (CHELSIO_CHIP_VERSION(adap
->params
.chip
)) {
4948 egress_threshold
= EGRTHRESHOLD_G(sge_conm_ctrl
);
4951 egress_threshold
= EGRTHRESHOLDPACKING_G(sge_conm_ctrl
);
4954 egress_threshold
= T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl
);
4957 dev_err(adap
->pdev_dev
, "Unsupported Chip version %d\n",
4958 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
4961 s
->fl_starve_thres
= 2*egress_threshold
+ 1;
4963 t4_idma_monitor_init(adap
, &s
->idma_monitor
);
4965 /* Set up timers used for recuring callbacks to process RX and TX
4966 * administrative tasks.
4968 timer_setup(&s
->rx_timer
, sge_rx_timer_cb
, 0);
4969 timer_setup(&s
->tx_timer
, sge_tx_timer_cb
, 0);
4971 spin_lock_init(&s
->intrq_lock
);