]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/chelsio/cxgb4vf/sge.c
treewide: replace dev->trans_start update with helper
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb4vf / sge.c
1 /*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/skbuff.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <net/ipv6.h>
42 #include <net/tcp.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/prefetch.h>
45
46 #include "t4vf_common.h"
47 #include "t4vf_defs.h"
48
49 #include "../cxgb4/t4_regs.h"
50 #include "../cxgb4/t4_values.h"
51 #include "../cxgb4/t4fw_api.h"
52 #include "../cxgb4/t4_msg.h"
53
54 /*
55 * Constants ...
56 */
57 enum {
58 /*
59 * Egress Queue sizes, producer and consumer indices are all in units
60 * of Egress Context Units bytes. Note that as far as the hardware is
61 * concerned, the free list is an Egress Queue (the host produces free
62 * buffers which the hardware consumes) and free list entries are
63 * 64-bit PCI DMA addresses.
64 */
65 EQ_UNIT = SGE_EQ_IDXSIZE,
66 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68
69 /*
70 * Max number of TX descriptors we clean up at a time. Should be
71 * modest as freeing skbs isn't cheap and it happens while holding
72 * locks. We just need to free packets faster than they arrive, we
73 * eventually catch up and keep the amortized cost reasonable.
74 */
75 MAX_TX_RECLAIM = 16,
76
77 /*
78 * Max number of Rx buffers we replenish at a time. Again keep this
79 * modest, allocating buffers isn't cheap either.
80 */
81 MAX_RX_REFILL = 16,
82
83 /*
84 * Period of the Rx queue check timer. This timer is infrequent as it
85 * has something to do only when the system experiences severe memory
86 * shortage.
87 */
88 RX_QCHECK_PERIOD = (HZ / 2),
89
90 /*
91 * Period of the TX queue check timer and the maximum number of TX
92 * descriptors to be reclaimed by the TX timer.
93 */
94 TX_QCHECK_PERIOD = (HZ / 2),
95 MAX_TIMER_TX_RECLAIM = 100,
96
97 /*
98 * Suspend an Ethernet TX queue with fewer available descriptors than
99 * this. We always want to have room for a maximum sized packet:
100 * inline immediate data + MAX_SKB_FRAGS. This is the same as
101 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
102 * (see that function and its helpers for a description of the
103 * calculation).
104 */
105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
108 2),
109 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110 sizeof(struct cpl_tx_pkt_lso_core) +
111 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113
114 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115
116 /*
117 * Max TX descriptor space we allow for an Ethernet packet to be
118 * inlined into a WR. This is limited by the maximum value which
119 * we can specify for immediate data in the firmware Ethernet TX
120 * Work Request.
121 */
122 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123
124 /*
125 * Max size of a WR sent through a control TX queue.
126 */
127 MAX_CTRL_WR_LEN = 256,
128
129 /*
130 * Maximum amount of data which we'll ever need to inline into a
131 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
132 */
133 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134 ? MAX_IMM_TX_PKT_LEN
135 : MAX_CTRL_WR_LEN),
136
137 /*
138 * For incoming packets less than RX_COPY_THRES, we copy the data into
139 * an skb rather than referencing the data. We allocate enough
140 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
141 * of the data (header).
142 */
143 RX_COPY_THRES = 256,
144 RX_PULL_LEN = 128,
145
146 /*
147 * Main body length for sk_buffs used for RX Ethernet packets with
148 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
149 * pskb_may_pull() some room.
150 */
151 RX_SKB_LEN = 512,
152 };
153
154 /*
155 * Software state per TX descriptor.
156 */
157 struct tx_sw_desc {
158 struct sk_buff *skb; /* socket buffer of TX data source */
159 struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
160 };
161
162 /*
163 * Software state per RX Free List descriptor. We keep track of the allocated
164 * FL page, its size, and its PCI DMA address (if the page is mapped). The FL
165 * page size and its PCI DMA mapped state are stored in the low bits of the
166 * PCI DMA address as per below.
167 */
168 struct rx_sw_desc {
169 struct page *page; /* Free List page buffer */
170 dma_addr_t dma_addr; /* PCI DMA address (if mapped) */
171 /* and flags (see below) */
172 };
173
174 /*
175 * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the
176 * SGE also uses the low 4 bits to determine the size of the buffer. It uses
177 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
178 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
179 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
180 * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is
181 * maintained in an inverse sense so the hardware never sees that bit high.
182 */
183 enum {
184 RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */
185 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
186 };
187
188 /**
189 * get_buf_addr - return DMA buffer address of software descriptor
190 * @sdesc: pointer to the software buffer descriptor
191 *
192 * Return the DMA buffer address of a software descriptor (stripping out
193 * our low-order flag bits).
194 */
195 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196 {
197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198 }
199
200 /**
201 * is_buf_mapped - is buffer mapped for DMA?
202 * @sdesc: pointer to the software buffer descriptor
203 *
204 * Determine whether the buffer associated with a software descriptor in
205 * mapped for DMA or not.
206 */
207 static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208 {
209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210 }
211
212 /**
213 * need_skb_unmap - does the platform need unmapping of sk_buffs?
214 *
215 * Returns true if the platform needs sk_buff unmapping. The compiler
216 * optimizes away unnecessary code if this returns true.
217 */
218 static inline int need_skb_unmap(void)
219 {
220 #ifdef CONFIG_NEED_DMA_MAP_STATE
221 return 1;
222 #else
223 return 0;
224 #endif
225 }
226
227 /**
228 * txq_avail - return the number of available slots in a TX queue
229 * @tq: the TX queue
230 *
231 * Returns the number of available descriptors in a TX queue.
232 */
233 static inline unsigned int txq_avail(const struct sge_txq *tq)
234 {
235 return tq->size - 1 - tq->in_use;
236 }
237
238 /**
239 * fl_cap - return the capacity of a Free List
240 * @fl: the Free List
241 *
242 * Returns the capacity of a Free List. The capacity is less than the
243 * size because an Egress Queue Index Unit worth of descriptors needs to
244 * be left unpopulated, otherwise the Producer and Consumer indices PIDX
245 * and CIDX will match and the hardware will think the FL is empty.
246 */
247 static inline unsigned int fl_cap(const struct sge_fl *fl)
248 {
249 return fl->size - FL_PER_EQ_UNIT;
250 }
251
252 /**
253 * fl_starving - return whether a Free List is starving.
254 * @adapter: pointer to the adapter
255 * @fl: the Free List
256 *
257 * Tests specified Free List to see whether the number of buffers
258 * available to the hardware has falled below our "starvation"
259 * threshold.
260 */
261 static inline bool fl_starving(const struct adapter *adapter,
262 const struct sge_fl *fl)
263 {
264 const struct sge *s = &adapter->sge;
265
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267 }
268
269 /**
270 * map_skb - map an skb for DMA to the device
271 * @dev: the egress net device
272 * @skb: the packet to map
273 * @addr: a pointer to the base of the DMA mapping array
274 *
275 * Map an skb for DMA to the device and return an array of DMA addresses.
276 */
277 static int map_skb(struct device *dev, const struct sk_buff *skb,
278 dma_addr_t *addr)
279 {
280 const skb_frag_t *fp, *end;
281 const struct skb_shared_info *si;
282
283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, *addr))
285 goto out_err;
286
287 si = skb_shinfo(skb);
288 end = &si->frags[si->nr_frags];
289 for (fp = si->frags; fp < end; fp++) {
290 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291 DMA_TO_DEVICE);
292 if (dma_mapping_error(dev, *addr))
293 goto unwind;
294 }
295 return 0;
296
297 unwind:
298 while (fp-- > si->frags)
299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301
302 out_err:
303 return -ENOMEM;
304 }
305
306 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308 {
309 const struct ulptx_sge_pair *p;
310 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311
312 if (likely(skb_headlen(skb)))
313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315 else {
316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318 nfrags--;
319 }
320
321 /*
322 * the complexity below is because of the possibility of a wrap-around
323 * in the middle of an SGL
324 */
325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327 unmap:
328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332 p++;
333 } else if ((u8 *)p == (u8 *)tq->stat) {
334 p = (const struct ulptx_sge_pair *)tq->desc;
335 goto unmap;
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337 const __be64 *addr = (const __be64 *)tq->desc;
338
339 dma_unmap_page(dev, be64_to_cpu(addr[0]),
340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341 dma_unmap_page(dev, be64_to_cpu(addr[1]),
342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343 p = (const struct ulptx_sge_pair *)&addr[2];
344 } else {
345 const __be64 *addr = (const __be64 *)tq->desc;
346
347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349 dma_unmap_page(dev, be64_to_cpu(addr[0]),
350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351 p = (const struct ulptx_sge_pair *)&addr[1];
352 }
353 }
354 if (nfrags) {
355 __be64 addr;
356
357 if ((u8 *)p == (u8 *)tq->stat)
358 p = (const struct ulptx_sge_pair *)tq->desc;
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360 ? p->addr[0]
361 : *(const __be64 *)tq->desc);
362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363 DMA_TO_DEVICE);
364 }
365 }
366
367 /**
368 * free_tx_desc - reclaims TX descriptors and their buffers
369 * @adapter: the adapter
370 * @tq: the TX queue to reclaim descriptors from
371 * @n: the number of descriptors to reclaim
372 * @unmap: whether the buffers should be unmapped for DMA
373 *
374 * Reclaims TX descriptors from an SGE TX queue and frees the associated
375 * TX buffers. Called with the TX queue lock held.
376 */
377 static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378 unsigned int n, bool unmap)
379 {
380 struct tx_sw_desc *sdesc;
381 unsigned int cidx = tq->cidx;
382 struct device *dev = adapter->pdev_dev;
383
384 const int need_unmap = need_skb_unmap() && unmap;
385
386 sdesc = &tq->sdesc[cidx];
387 while (n--) {
388 /*
389 * If we kept a reference to the original TX skb, we need to
390 * unmap it from PCI DMA space (if required) and free it.
391 */
392 if (sdesc->skb) {
393 if (need_unmap)
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
395 dev_consume_skb_any(sdesc->skb);
396 sdesc->skb = NULL;
397 }
398
399 sdesc++;
400 if (++cidx == tq->size) {
401 cidx = 0;
402 sdesc = tq->sdesc;
403 }
404 }
405 tq->cidx = cidx;
406 }
407
408 /*
409 * Return the number of reclaimable descriptors in a TX queue.
410 */
411 static inline int reclaimable(const struct sge_txq *tq)
412 {
413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
414 int reclaimable = hw_cidx - tq->cidx;
415 if (reclaimable < 0)
416 reclaimable += tq->size;
417 return reclaimable;
418 }
419
420 /**
421 * reclaim_completed_tx - reclaims completed TX descriptors
422 * @adapter: the adapter
423 * @tq: the TX queue to reclaim completed descriptors from
424 * @unmap: whether the buffers should be unmapped for DMA
425 *
426 * Reclaims TX descriptors that the SGE has indicated it has processed,
427 * and frees the associated buffers if possible. Called with the TX
428 * queue locked.
429 */
430 static inline void reclaim_completed_tx(struct adapter *adapter,
431 struct sge_txq *tq,
432 bool unmap)
433 {
434 int avail = reclaimable(tq);
435
436 if (avail) {
437 /*
438 * Limit the amount of clean up work we do at a time to keep
439 * the TX lock hold time O(1).
440 */
441 if (avail > MAX_TX_RECLAIM)
442 avail = MAX_TX_RECLAIM;
443
444 free_tx_desc(adapter, tq, avail, unmap);
445 tq->in_use -= avail;
446 }
447 }
448
449 /**
450 * get_buf_size - return the size of an RX Free List buffer.
451 * @adapter: pointer to the associated adapter
452 * @sdesc: pointer to the software buffer descriptor
453 */
454 static inline int get_buf_size(const struct adapter *adapter,
455 const struct rx_sw_desc *sdesc)
456 {
457 const struct sge *s = &adapter->sge;
458
459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461 }
462
463 /**
464 * free_rx_bufs - free RX buffers on an SGE Free List
465 * @adapter: the adapter
466 * @fl: the SGE Free List to free buffers from
467 * @n: how many buffers to free
468 *
469 * Release the next @n buffers on an SGE Free List RX queue. The
470 * buffers must be made inaccessible to hardware before calling this
471 * function.
472 */
473 static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474 {
475 while (n--) {
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477
478 if (is_buf_mapped(sdesc))
479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
480 get_buf_size(adapter, sdesc),
481 PCI_DMA_FROMDEVICE);
482 put_page(sdesc->page);
483 sdesc->page = NULL;
484 if (++fl->cidx == fl->size)
485 fl->cidx = 0;
486 fl->avail--;
487 }
488 }
489
490 /**
491 * unmap_rx_buf - unmap the current RX buffer on an SGE Free List
492 * @adapter: the adapter
493 * @fl: the SGE Free List
494 *
495 * Unmap the current buffer on an SGE Free List RX queue. The
496 * buffer must be made inaccessible to HW before calling this function.
497 *
498 * This is similar to @free_rx_bufs above but does not free the buffer.
499 * Do note that the FL still loses any further access to the buffer.
500 * This is used predominantly to "transfer ownership" of an FL buffer
501 * to another entity (typically an skb's fragment list).
502 */
503 static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504 {
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506
507 if (is_buf_mapped(sdesc))
508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
509 get_buf_size(adapter, sdesc),
510 PCI_DMA_FROMDEVICE);
511 sdesc->page = NULL;
512 if (++fl->cidx == fl->size)
513 fl->cidx = 0;
514 fl->avail--;
515 }
516
517 /**
518 * ring_fl_db - righ doorbell on free list
519 * @adapter: the adapter
520 * @fl: the Free List whose doorbell should be rung ...
521 *
522 * Tell the Scatter Gather Engine that there are new free list entries
523 * available.
524 */
525 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526 {
527 u32 val = adapter->params.arch.sge_fl_db;
528
529 /* The SGE keeps track of its Producer and Consumer Indices in terms
530 * of Egress Queue Units so we can only tell it about integral numbers
531 * of multiples of Free List Entries per Egress Queue Units ...
532 */
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534 if (is_t4(adapter->params.chip))
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536 else
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
538
539 /* Make sure all memory writes to the Free List queue are
540 * committed before we tell the hardware about them.
541 */
542 wmb();
543
544 /* If we don't have access to the new User Doorbell (T5+), use
545 * the old doorbell mechanism; otherwise use the new BAR2
546 * mechanism.
547 */
548 if (unlikely(fl->bar2_addr == NULL)) {
549 t4_write_reg(adapter,
550 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
551 QID_V(fl->cntxt_id) | val);
552 } else {
553 writel(val | QID_V(fl->bar2_qid),
554 fl->bar2_addr + SGE_UDB_KDOORBELL);
555
556 /* This Write memory Barrier will force the write to
557 * the User Doorbell area to be flushed.
558 */
559 wmb();
560 }
561 fl->pend_cred %= FL_PER_EQ_UNIT;
562 }
563 }
564
565 /**
566 * set_rx_sw_desc - initialize software RX buffer descriptor
567 * @sdesc: pointer to the softwore RX buffer descriptor
568 * @page: pointer to the page data structure backing the RX buffer
569 * @dma_addr: PCI DMA address (possibly with low-bit flags)
570 */
571 static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
572 dma_addr_t dma_addr)
573 {
574 sdesc->page = page;
575 sdesc->dma_addr = dma_addr;
576 }
577
578 /*
579 * Support for poisoning RX buffers ...
580 */
581 #define POISON_BUF_VAL -1
582
583 static inline void poison_buf(struct page *page, size_t sz)
584 {
585 #if POISON_BUF_VAL >= 0
586 memset(page_address(page), POISON_BUF_VAL, sz);
587 #endif
588 }
589
590 /**
591 * refill_fl - refill an SGE RX buffer ring
592 * @adapter: the adapter
593 * @fl: the Free List ring to refill
594 * @n: the number of new buffers to allocate
595 * @gfp: the gfp flags for the allocations
596 *
597 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
598 * allocated with the supplied gfp flags. The caller must assure that
599 * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
600 * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number
601 * of buffers allocated. If afterwards the queue is found critically low,
602 * mark it as starving in the bitmap of starving FLs.
603 */
604 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
605 int n, gfp_t gfp)
606 {
607 struct sge *s = &adapter->sge;
608 struct page *page;
609 dma_addr_t dma_addr;
610 unsigned int cred = fl->avail;
611 __be64 *d = &fl->desc[fl->pidx];
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
613
614 /*
615 * Sanity: ensure that the result of adding n Free List buffers
616 * won't result in wrapping the SGE's Producer Index around to
617 * it's Consumer Index thereby indicating an empty Free List ...
618 */
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
620
621 gfp |= __GFP_NOWARN;
622
623 /*
624 * If we support large pages, prefer large buffers and fail over to
625 * small pages if we can't allocate large pages to satisfy the refill.
626 * If we don't support large pages, drop directly into the small page
627 * allocation code.
628 */
629 if (s->fl_pg_order == 0)
630 goto alloc_small_pages;
631
632 while (n) {
633 page = __dev_alloc_pages(gfp, s->fl_pg_order);
634 if (unlikely(!page)) {
635 /*
636 * We've failed inour attempt to allocate a "large
637 * page". Fail over to the "small page" allocation
638 * below.
639 */
640 fl->large_alloc_failed++;
641 break;
642 }
643 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
644
645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
646 PAGE_SIZE << s->fl_pg_order,
647 PCI_DMA_FROMDEVICE);
648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
649 /*
650 * We've run out of DMA mapping space. Free up the
651 * buffer and return with what we've managed to put
652 * into the free list. We don't want to fail over to
653 * the small page allocation below in this case
654 * because DMA mapping resources are typically
655 * critical resources once they become scarse.
656 */
657 __free_pages(page, s->fl_pg_order);
658 goto out;
659 }
660 dma_addr |= RX_LARGE_BUF;
661 *d++ = cpu_to_be64(dma_addr);
662
663 set_rx_sw_desc(sdesc, page, dma_addr);
664 sdesc++;
665
666 fl->avail++;
667 if (++fl->pidx == fl->size) {
668 fl->pidx = 0;
669 sdesc = fl->sdesc;
670 d = fl->desc;
671 }
672 n--;
673 }
674
675 alloc_small_pages:
676 while (n--) {
677 page = __dev_alloc_page(gfp);
678 if (unlikely(!page)) {
679 fl->alloc_failed++;
680 break;
681 }
682 poison_buf(page, PAGE_SIZE);
683
684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
685 PCI_DMA_FROMDEVICE);
686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
687 put_page(page);
688 break;
689 }
690 *d++ = cpu_to_be64(dma_addr);
691
692 set_rx_sw_desc(sdesc, page, dma_addr);
693 sdesc++;
694
695 fl->avail++;
696 if (++fl->pidx == fl->size) {
697 fl->pidx = 0;
698 sdesc = fl->sdesc;
699 d = fl->desc;
700 }
701 }
702
703 out:
704 /*
705 * Update our accounting state to incorporate the new Free List
706 * buffers, tell the hardware about them and return the number of
707 * buffers which we were able to allocate.
708 */
709 cred = fl->avail - cred;
710 fl->pend_cred += cred;
711 ring_fl_db(adapter, fl);
712
713 if (unlikely(fl_starving(adapter, fl))) {
714 smp_wmb();
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
716 }
717
718 return cred;
719 }
720
721 /*
722 * Refill a Free List to its capacity or the Maximum Refill Increment,
723 * whichever is smaller ...
724 */
725 static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
726 {
727 refill_fl(adapter, fl,
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
729 GFP_ATOMIC);
730 }
731
732 /**
733 * alloc_ring - allocate resources for an SGE descriptor ring
734 * @dev: the PCI device's core device
735 * @nelem: the number of descriptors
736 * @hwsize: the size of each hardware descriptor
737 * @swsize: the size of each software descriptor
738 * @busaddrp: the physical PCI bus address of the allocated ring
739 * @swringp: return address pointer for software ring
740 * @stat_size: extra space in hardware ring for status information
741 *
742 * Allocates resources for an SGE descriptor ring, such as TX queues,
743 * free buffer lists, response queues, etc. Each SGE ring requires
744 * space for its hardware descriptors plus, optionally, space for software
745 * state associated with each hardware entry (the metadata). The function
746 * returns three values: the virtual address for the hardware ring (the
747 * return value of the function), the PCI bus address of the hardware
748 * ring (in *busaddrp), and the address of the software ring (in swringp).
749 * Both the hardware and software rings are returned zeroed out.
750 */
751 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
752 size_t swsize, dma_addr_t *busaddrp, void *swringp,
753 size_t stat_size)
754 {
755 /*
756 * Allocate the hardware ring and PCI DMA bus address space for said.
757 */
758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760
761 if (!hwring)
762 return NULL;
763
764 /*
765 * If the caller wants a software ring, allocate it and return a
766 * pointer to it in *swringp.
767 */
768 BUG_ON((swsize != 0) != (swringp != NULL));
769 if (swsize) {
770 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
771
772 if (!swring) {
773 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
774 return NULL;
775 }
776 *(void **)swringp = swring;
777 }
778
779 /*
780 * Zero out the hardware ring and return its address as our function
781 * value.
782 */
783 memset(hwring, 0, hwlen);
784 return hwring;
785 }
786
787 /**
788 * sgl_len - calculates the size of an SGL of the given capacity
789 * @n: the number of SGL entries
790 *
791 * Calculates the number of flits (8-byte units) needed for a Direct
792 * Scatter/Gather List that can hold the given number of entries.
793 */
794 static inline unsigned int sgl_len(unsigned int n)
795 {
796 /*
797 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
798 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
799 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
800 * repeated sequences of { Length[i], Length[i+1], Address[i],
801 * Address[i+1] } (this ensures that all addresses are on 64-bit
802 * boundaries). If N is even, then Length[N+1] should be set to 0 and
803 * Address[N+1] is omitted.
804 *
805 * The following calculation incorporates all of the above. It's
806 * somewhat hard to follow but, briefly: the "+2" accounts for the
807 * first two flits which include the DSGL header, Length0 and
808 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
809 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
810 * finally the "+((n-1)&1)" adds the one remaining flit needed if
811 * (n-1) is odd ...
812 */
813 n--;
814 return (3 * n) / 2 + (n & 1) + 2;
815 }
816
817 /**
818 * flits_to_desc - returns the num of TX descriptors for the given flits
819 * @flits: the number of flits
820 *
821 * Returns the number of TX descriptors needed for the supplied number
822 * of flits.
823 */
824 static inline unsigned int flits_to_desc(unsigned int flits)
825 {
826 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
827 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
828 }
829
830 /**
831 * is_eth_imm - can an Ethernet packet be sent as immediate data?
832 * @skb: the packet
833 *
834 * Returns whether an Ethernet packet is small enough to fit completely as
835 * immediate data.
836 */
837 static inline int is_eth_imm(const struct sk_buff *skb)
838 {
839 /*
840 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
841 * which does not accommodate immediate data. We could dike out all
842 * of the support code for immediate data but that would tie our hands
843 * too much if we ever want to enhace the firmware. It would also
844 * create more differences between the PF and VF Drivers.
845 */
846 return false;
847 }
848
849 /**
850 * calc_tx_flits - calculate the number of flits for a packet TX WR
851 * @skb: the packet
852 *
853 * Returns the number of flits needed for a TX Work Request for the
854 * given Ethernet packet, including the needed WR and CPL headers.
855 */
856 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
857 {
858 unsigned int flits;
859
860 /*
861 * If the skb is small enough, we can pump it out as a work request
862 * with only immediate data. In that case we just have to have the
863 * TX Packet header plus the skb data in the Work Request.
864 */
865 if (is_eth_imm(skb))
866 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
867 sizeof(__be64));
868
869 /*
870 * Otherwise, we're going to have to construct a Scatter gather list
871 * of the skb body and fragments. We also include the flits necessary
872 * for the TX Packet Work Request and CPL. We always have a firmware
873 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
874 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
875 * message or, if we're doing a Large Send Offload, an LSO CPL message
876 * with an embedded TX Packet Write CPL message.
877 */
878 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
879 if (skb_shinfo(skb)->gso_size)
880 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
881 sizeof(struct cpl_tx_pkt_lso_core) +
882 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
883 else
884 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
885 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
886 return flits;
887 }
888
889 /**
890 * write_sgl - populate a Scatter/Gather List for a packet
891 * @skb: the packet
892 * @tq: the TX queue we are writing into
893 * @sgl: starting location for writing the SGL
894 * @end: points right after the end of the SGL
895 * @start: start offset into skb main-body data to include in the SGL
896 * @addr: the list of DMA bus addresses for the SGL elements
897 *
898 * Generates a Scatter/Gather List for the buffers that make up a packet.
899 * The caller must provide adequate space for the SGL that will be written.
900 * The SGL includes all of the packet's page fragments and the data in its
901 * main body except for the first @start bytes. @pos must be 16-byte
902 * aligned and within a TX descriptor with available space. @end points
903 * write after the end of the SGL but does not account for any potential
904 * wrap around, i.e., @end > @tq->stat.
905 */
906 static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
907 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
908 const dma_addr_t *addr)
909 {
910 unsigned int i, len;
911 struct ulptx_sge_pair *to;
912 const struct skb_shared_info *si = skb_shinfo(skb);
913 unsigned int nfrags = si->nr_frags;
914 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
915
916 len = skb_headlen(skb) - start;
917 if (likely(len)) {
918 sgl->len0 = htonl(len);
919 sgl->addr0 = cpu_to_be64(addr[0] + start);
920 nfrags++;
921 } else {
922 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
923 sgl->addr0 = cpu_to_be64(addr[1]);
924 }
925
926 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
927 ULPTX_NSGE_V(nfrags));
928 if (likely(--nfrags == 0))
929 return;
930 /*
931 * Most of the complexity below deals with the possibility we hit the
932 * end of the queue in the middle of writing the SGL. For this case
933 * only we create the SGL in a temporary buffer and then copy it.
934 */
935 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
936
937 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
938 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
939 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
940 to->addr[0] = cpu_to_be64(addr[i]);
941 to->addr[1] = cpu_to_be64(addr[++i]);
942 }
943 if (nfrags) {
944 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
945 to->len[1] = cpu_to_be32(0);
946 to->addr[0] = cpu_to_be64(addr[i + 1]);
947 }
948 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
949 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
950
951 if (likely(part0))
952 memcpy(sgl->sge, buf, part0);
953 part1 = (u8 *)end - (u8 *)tq->stat;
954 memcpy(tq->desc, (u8 *)buf + part0, part1);
955 end = (void *)tq->desc + part1;
956 }
957 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
958 *end = 0;
959 }
960
961 /**
962 * check_ring_tx_db - check and potentially ring a TX queue's doorbell
963 * @adapter: the adapter
964 * @tq: the TX queue
965 * @n: number of new descriptors to give to HW
966 *
967 * Ring the doorbel for a TX queue.
968 */
969 static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
970 int n)
971 {
972 /* Make sure that all writes to the TX Descriptors are committed
973 * before we tell the hardware about them.
974 */
975 wmb();
976
977 /* If we don't have access to the new User Doorbell (T5+), use the old
978 * doorbell mechanism; otherwise use the new BAR2 mechanism.
979 */
980 if (unlikely(tq->bar2_addr == NULL)) {
981 u32 val = PIDX_V(n);
982
983 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
984 QID_V(tq->cntxt_id) | val);
985 } else {
986 u32 val = PIDX_T5_V(n);
987
988 /* T4 and later chips share the same PIDX field offset within
989 * the doorbell, but T5 and later shrank the field in order to
990 * gain a bit for Doorbell Priority. The field was absurdly
991 * large in the first place (14 bits) so we just use the T5
992 * and later limits and warn if a Queue ID is too large.
993 */
994 WARN_ON(val & DBPRIO_F);
995
996 /* If we're only writing a single Egress Unit and the BAR2
997 * Queue ID is 0, we can use the Write Combining Doorbell
998 * Gather Buffer; otherwise we use the simple doorbell.
999 */
1000 if (n == 1 && tq->bar2_qid == 0) {
1001 unsigned int index = (tq->pidx
1002 ? (tq->pidx - 1)
1003 : (tq->size - 1));
1004 __be64 *src = (__be64 *)&tq->desc[index];
1005 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1006 SGE_UDB_WCDOORBELL);
1007 unsigned int count = EQ_UNIT / sizeof(__be64);
1008
1009 /* Copy the TX Descriptor in a tight loop in order to
1010 * try to get it to the adapter in a single Write
1011 * Combined transfer on the PCI-E Bus. If the Write
1012 * Combine fails (say because of an interrupt, etc.)
1013 * the hardware will simply take the last write as a
1014 * simple doorbell write with a PIDX Increment of 1
1015 * and will fetch the TX Descriptor from memory via
1016 * DMA.
1017 */
1018 while (count) {
1019 /* the (__force u64) is because the compiler
1020 * doesn't understand the endian swizzling
1021 * going on
1022 */
1023 writeq((__force u64)*src, dst);
1024 src++;
1025 dst++;
1026 count--;
1027 }
1028 } else
1029 writel(val | QID_V(tq->bar2_qid),
1030 tq->bar2_addr + SGE_UDB_KDOORBELL);
1031
1032 /* This Write Memory Barrier will force the write to the User
1033 * Doorbell area to be flushed. This is needed to prevent
1034 * writes on different CPUs for the same queue from hitting
1035 * the adapter out of order. This is required when some Work
1036 * Requests take the Write Combine Gather Buffer path (user
1037 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1038 * take the traditional path where we simply increment the
1039 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1040 * hardware DMA read the actual Work Request.
1041 */
1042 wmb();
1043 }
1044 }
1045
1046 /**
1047 * inline_tx_skb - inline a packet's data into TX descriptors
1048 * @skb: the packet
1049 * @tq: the TX queue where the packet will be inlined
1050 * @pos: starting position in the TX queue to inline the packet
1051 *
1052 * Inline a packet's contents directly into TX descriptors, starting at
1053 * the given position within the TX DMA ring.
1054 * Most of the complexity of this operation is dealing with wrap arounds
1055 * in the middle of the packet we want to inline.
1056 */
1057 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1058 void *pos)
1059 {
1060 u64 *p;
1061 int left = (void *)tq->stat - pos;
1062
1063 if (likely(skb->len <= left)) {
1064 if (likely(!skb->data_len))
1065 skb_copy_from_linear_data(skb, pos, skb->len);
1066 else
1067 skb_copy_bits(skb, 0, pos, skb->len);
1068 pos += skb->len;
1069 } else {
1070 skb_copy_bits(skb, 0, pos, left);
1071 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1072 pos = (void *)tq->desc + (skb->len - left);
1073 }
1074
1075 /* 0-pad to multiple of 16 */
1076 p = PTR_ALIGN(pos, 8);
1077 if ((uintptr_t)p & 8)
1078 *p = 0;
1079 }
1080
1081 /*
1082 * Figure out what HW csum a packet wants and return the appropriate control
1083 * bits.
1084 */
1085 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1086 {
1087 int csum_type;
1088 const struct iphdr *iph = ip_hdr(skb);
1089
1090 if (iph->version == 4) {
1091 if (iph->protocol == IPPROTO_TCP)
1092 csum_type = TX_CSUM_TCPIP;
1093 else if (iph->protocol == IPPROTO_UDP)
1094 csum_type = TX_CSUM_UDPIP;
1095 else {
1096 nocsum:
1097 /*
1098 * unknown protocol, disable HW csum
1099 * and hope a bad packet is detected
1100 */
1101 return TXPKT_L4CSUM_DIS_F;
1102 }
1103 } else {
1104 /*
1105 * this doesn't work with extension headers
1106 */
1107 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1108
1109 if (ip6h->nexthdr == IPPROTO_TCP)
1110 csum_type = TX_CSUM_TCPIP6;
1111 else if (ip6h->nexthdr == IPPROTO_UDP)
1112 csum_type = TX_CSUM_UDPIP6;
1113 else
1114 goto nocsum;
1115 }
1116
1117 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1118 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1119 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1120
1121 if (chip <= CHELSIO_T5)
1122 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1123 else
1124 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1125 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1126 } else {
1127 int start = skb_transport_offset(skb);
1128
1129 return TXPKT_CSUM_TYPE_V(csum_type) |
1130 TXPKT_CSUM_START_V(start) |
1131 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1132 }
1133 }
1134
1135 /*
1136 * Stop an Ethernet TX queue and record that state change.
1137 */
1138 static void txq_stop(struct sge_eth_txq *txq)
1139 {
1140 netif_tx_stop_queue(txq->txq);
1141 txq->q.stops++;
1142 }
1143
1144 /*
1145 * Advance our software state for a TX queue by adding n in use descriptors.
1146 */
1147 static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1148 {
1149 tq->in_use += n;
1150 tq->pidx += n;
1151 if (tq->pidx >= tq->size)
1152 tq->pidx -= tq->size;
1153 }
1154
1155 /**
1156 * t4vf_eth_xmit - add a packet to an Ethernet TX queue
1157 * @skb: the packet
1158 * @dev: the egress net device
1159 *
1160 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1161 */
1162 int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1163 {
1164 u32 wr_mid;
1165 u64 cntrl, *end;
1166 int qidx, credits, max_pkt_len;
1167 unsigned int flits, ndesc;
1168 struct adapter *adapter;
1169 struct sge_eth_txq *txq;
1170 const struct port_info *pi;
1171 struct fw_eth_tx_pkt_vm_wr *wr;
1172 struct cpl_tx_pkt_core *cpl;
1173 const struct skb_shared_info *ssi;
1174 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1175 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1176 sizeof(wr->ethmacsrc) +
1177 sizeof(wr->ethtype) +
1178 sizeof(wr->vlantci));
1179
1180 /*
1181 * The chip minimum packet length is 10 octets but the firmware
1182 * command that we are using requires that we copy the Ethernet header
1183 * (including the VLAN tag) into the header so we reject anything
1184 * smaller than that ...
1185 */
1186 if (unlikely(skb->len < fw_hdr_copy_len))
1187 goto out_free;
1188
1189 /* Discard the packet if the length is greater than mtu */
1190 max_pkt_len = ETH_HLEN + dev->mtu;
1191 if (skb_vlan_tag_present(skb))
1192 max_pkt_len += VLAN_HLEN;
1193 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1194 goto out_free;
1195
1196 /*
1197 * Figure out which TX Queue we're going to use.
1198 */
1199 pi = netdev_priv(dev);
1200 adapter = pi->adapter;
1201 qidx = skb_get_queue_mapping(skb);
1202 BUG_ON(qidx >= pi->nqsets);
1203 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1204
1205 /*
1206 * Take this opportunity to reclaim any TX Descriptors whose DMA
1207 * transfers have completed.
1208 */
1209 reclaim_completed_tx(adapter, &txq->q, true);
1210
1211 /*
1212 * Calculate the number of flits and TX Descriptors we're going to
1213 * need along with how many TX Descriptors will be left over after
1214 * we inject our Work Request.
1215 */
1216 flits = calc_tx_flits(skb);
1217 ndesc = flits_to_desc(flits);
1218 credits = txq_avail(&txq->q) - ndesc;
1219
1220 if (unlikely(credits < 0)) {
1221 /*
1222 * Not enough room for this packet's Work Request. Stop the
1223 * TX Queue and return a "busy" condition. The queue will get
1224 * started later on when the firmware informs us that space
1225 * has opened up.
1226 */
1227 txq_stop(txq);
1228 dev_err(adapter->pdev_dev,
1229 "%s: TX ring %u full while queue awake!\n",
1230 dev->name, qidx);
1231 return NETDEV_TX_BUSY;
1232 }
1233
1234 if (!is_eth_imm(skb) &&
1235 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1236 /*
1237 * We need to map the skb into PCI DMA space (because it can't
1238 * be in-lined directly into the Work Request) and the mapping
1239 * operation failed. Record the error and drop the packet.
1240 */
1241 txq->mapping_err++;
1242 goto out_free;
1243 }
1244
1245 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1246 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1247 /*
1248 * After we're done injecting the Work Request for this
1249 * packet, we'll be below our "stop threshold" so stop the TX
1250 * Queue now and schedule a request for an SGE Egress Queue
1251 * Update message. The queue will get started later on when
1252 * the firmware processes this Work Request and sends us an
1253 * Egress Queue Status Update message indicating that space
1254 * has opened up.
1255 */
1256 txq_stop(txq);
1257 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1258 }
1259
1260 /*
1261 * Start filling in our Work Request. Note that we do _not_ handle
1262 * the WR Header wrapping around the TX Descriptor Ring. If our
1263 * maximum header size ever exceeds one TX Descriptor, we'll need to
1264 * do something else here.
1265 */
1266 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1267 wr = (void *)&txq->q.desc[txq->q.pidx];
1268 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1269 wr->r3[0] = cpu_to_be32(0);
1270 wr->r3[1] = cpu_to_be32(0);
1271 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1272 end = (u64 *)wr + flits;
1273
1274 /*
1275 * If this is a Large Send Offload packet we'll put in an LSO CPL
1276 * message with an encapsulated TX Packet CPL message. Otherwise we
1277 * just use a TX Packet CPL message.
1278 */
1279 ssi = skb_shinfo(skb);
1280 if (ssi->gso_size) {
1281 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1282 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1283 int l3hdr_len = skb_network_header_len(skb);
1284 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1285
1286 wr->op_immdlen =
1287 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1288 FW_WR_IMMDLEN_V(sizeof(*lso) +
1289 sizeof(*cpl)));
1290 /*
1291 * Fill in the LSO CPL message.
1292 */
1293 lso->lso_ctrl =
1294 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1295 LSO_FIRST_SLICE_F |
1296 LSO_LAST_SLICE_F |
1297 LSO_IPV6_V(v6) |
1298 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1299 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1300 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1301 lso->ipid_ofst = cpu_to_be16(0);
1302 lso->mss = cpu_to_be16(ssi->gso_size);
1303 lso->seqno_offset = cpu_to_be32(0);
1304 if (is_t4(adapter->params.chip))
1305 lso->len = cpu_to_be32(skb->len);
1306 else
1307 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1308
1309 /*
1310 * Set up TX Packet CPL pointer, control word and perform
1311 * accounting.
1312 */
1313 cpl = (void *)(lso + 1);
1314
1315 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1316 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1317 else
1318 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1319
1320 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1321 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1322 TXPKT_IPHDR_LEN_V(l3hdr_len);
1323 txq->tso++;
1324 txq->tx_cso += ssi->gso_segs;
1325 } else {
1326 int len;
1327
1328 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1329 wr->op_immdlen =
1330 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1331 FW_WR_IMMDLEN_V(len));
1332
1333 /*
1334 * Set up TX Packet CPL pointer, control word and perform
1335 * accounting.
1336 */
1337 cpl = (void *)(wr + 1);
1338 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1339 cntrl = hwcsum(adapter->params.chip, skb) |
1340 TXPKT_IPCSUM_DIS_F;
1341 txq->tx_cso++;
1342 } else
1343 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1344 }
1345
1346 /*
1347 * If there's a VLAN tag present, add that to the list of things to
1348 * do in this Work Request.
1349 */
1350 if (skb_vlan_tag_present(skb)) {
1351 txq->vlan_ins++;
1352 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1353 }
1354
1355 /*
1356 * Fill in the TX Packet CPL message header.
1357 */
1358 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1359 TXPKT_INTF_V(pi->port_id) |
1360 TXPKT_PF_V(0));
1361 cpl->pack = cpu_to_be16(0);
1362 cpl->len = cpu_to_be16(skb->len);
1363 cpl->ctrl1 = cpu_to_be64(cntrl);
1364
1365 #ifdef T4_TRACE
1366 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1367 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1368 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1369 #endif
1370
1371 /*
1372 * Fill in the body of the TX Packet CPL message with either in-lined
1373 * data or a Scatter/Gather List.
1374 */
1375 if (is_eth_imm(skb)) {
1376 /*
1377 * In-line the packet's data and free the skb since we don't
1378 * need it any longer.
1379 */
1380 inline_tx_skb(skb, &txq->q, cpl + 1);
1381 dev_consume_skb_any(skb);
1382 } else {
1383 /*
1384 * Write the skb's Scatter/Gather list into the TX Packet CPL
1385 * message and retain a pointer to the skb so we can free it
1386 * later when its DMA completes. (We store the skb pointer
1387 * in the Software Descriptor corresponding to the last TX
1388 * Descriptor used by the Work Request.)
1389 *
1390 * The retained skb will be freed when the corresponding TX
1391 * Descriptors are reclaimed after their DMAs complete.
1392 * However, this could take quite a while since, in general,
1393 * the hardware is set up to be lazy about sending DMA
1394 * completion notifications to us and we mostly perform TX
1395 * reclaims in the transmit routine.
1396 *
1397 * This is good for performamce but means that we rely on new
1398 * TX packets arriving to run the destructors of completed
1399 * packets, which open up space in their sockets' send queues.
1400 * Sometimes we do not get such new packets causing TX to
1401 * stall. A single UDP transmitter is a good example of this
1402 * situation. We have a clean up timer that periodically
1403 * reclaims completed packets but it doesn't run often enough
1404 * (nor do we want it to) to prevent lengthy stalls. A
1405 * solution to this problem is to run the destructor early,
1406 * after the packet is queued but before it's DMAd. A con is
1407 * that we lie to socket memory accounting, but the amount of
1408 * extra memory is reasonable (limited by the number of TX
1409 * descriptors), the packets do actually get freed quickly by
1410 * new packets almost always, and for protocols like TCP that
1411 * wait for acks to really free up the data the extra memory
1412 * is even less. On the positive side we run the destructors
1413 * on the sending CPU rather than on a potentially different
1414 * completing CPU, usually a good thing.
1415 *
1416 * Run the destructor before telling the DMA engine about the
1417 * packet to make sure it doesn't complete and get freed
1418 * prematurely.
1419 */
1420 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1421 struct sge_txq *tq = &txq->q;
1422 int last_desc;
1423
1424 /*
1425 * If the Work Request header was an exact multiple of our TX
1426 * Descriptor length, then it's possible that the starting SGL
1427 * pointer lines up exactly with the end of our TX Descriptor
1428 * ring. If that's the case, wrap around to the beginning
1429 * here ...
1430 */
1431 if (unlikely((void *)sgl == (void *)tq->stat)) {
1432 sgl = (void *)tq->desc;
1433 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1434 }
1435
1436 write_sgl(skb, tq, sgl, end, 0, addr);
1437 skb_orphan(skb);
1438
1439 last_desc = tq->pidx + ndesc - 1;
1440 if (last_desc >= tq->size)
1441 last_desc -= tq->size;
1442 tq->sdesc[last_desc].skb = skb;
1443 tq->sdesc[last_desc].sgl = sgl;
1444 }
1445
1446 /*
1447 * Advance our internal TX Queue state, tell the hardware about
1448 * the new TX descriptors and return success.
1449 */
1450 txq_advance(&txq->q, ndesc);
1451 netif_trans_update(dev);
1452 ring_tx_db(adapter, &txq->q, ndesc);
1453 return NETDEV_TX_OK;
1454
1455 out_free:
1456 /*
1457 * An error of some sort happened. Free the TX skb and tell the
1458 * OS that we've "dealt" with the packet ...
1459 */
1460 dev_kfree_skb_any(skb);
1461 return NETDEV_TX_OK;
1462 }
1463
1464 /**
1465 * copy_frags - copy fragments from gather list into skb_shared_info
1466 * @skb: destination skb
1467 * @gl: source internal packet gather list
1468 * @offset: packet start offset in first page
1469 *
1470 * Copy an internal packet gather list into a Linux skb_shared_info
1471 * structure.
1472 */
1473 static inline void copy_frags(struct sk_buff *skb,
1474 const struct pkt_gl *gl,
1475 unsigned int offset)
1476 {
1477 int i;
1478
1479 /* usually there's just one frag */
1480 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1481 gl->frags[0].offset + offset,
1482 gl->frags[0].size - offset);
1483 skb_shinfo(skb)->nr_frags = gl->nfrags;
1484 for (i = 1; i < gl->nfrags; i++)
1485 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1486 gl->frags[i].offset,
1487 gl->frags[i].size);
1488
1489 /* get a reference to the last page, we don't own it */
1490 get_page(gl->frags[gl->nfrags - 1].page);
1491 }
1492
1493 /**
1494 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1495 * @gl: the gather list
1496 * @skb_len: size of sk_buff main body if it carries fragments
1497 * @pull_len: amount of data to move to the sk_buff's main body
1498 *
1499 * Builds an sk_buff from the given packet gather list. Returns the
1500 * sk_buff or %NULL if sk_buff allocation failed.
1501 */
1502 static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1503 unsigned int skb_len,
1504 unsigned int pull_len)
1505 {
1506 struct sk_buff *skb;
1507
1508 /*
1509 * If the ingress packet is small enough, allocate an skb large enough
1510 * for all of the data and copy it inline. Otherwise, allocate an skb
1511 * with enough room to pull in the header and reference the rest of
1512 * the data via the skb fragment list.
1513 *
1514 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1515 * buff! size, which is expected since buffers are at least
1516 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1517 * fragment.
1518 */
1519 if (gl->tot_len <= RX_COPY_THRES) {
1520 /* small packets have only one fragment */
1521 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1522 if (unlikely(!skb))
1523 goto out;
1524 __skb_put(skb, gl->tot_len);
1525 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1526 } else {
1527 skb = alloc_skb(skb_len, GFP_ATOMIC);
1528 if (unlikely(!skb))
1529 goto out;
1530 __skb_put(skb, pull_len);
1531 skb_copy_to_linear_data(skb, gl->va, pull_len);
1532
1533 copy_frags(skb, gl, pull_len);
1534 skb->len = gl->tot_len;
1535 skb->data_len = skb->len - pull_len;
1536 skb->truesize += skb->data_len;
1537 }
1538
1539 out:
1540 return skb;
1541 }
1542
1543 /**
1544 * t4vf_pktgl_free - free a packet gather list
1545 * @gl: the gather list
1546 *
1547 * Releases the pages of a packet gather list. We do not own the last
1548 * page on the list and do not free it.
1549 */
1550 static void t4vf_pktgl_free(const struct pkt_gl *gl)
1551 {
1552 int frag;
1553
1554 frag = gl->nfrags - 1;
1555 while (frag--)
1556 put_page(gl->frags[frag].page);
1557 }
1558
1559 /**
1560 * do_gro - perform Generic Receive Offload ingress packet processing
1561 * @rxq: ingress RX Ethernet Queue
1562 * @gl: gather list for ingress packet
1563 * @pkt: CPL header for last packet fragment
1564 *
1565 * Perform Generic Receive Offload (GRO) ingress packet processing.
1566 * We use the standard Linux GRO interfaces for this.
1567 */
1568 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1569 const struct cpl_rx_pkt *pkt)
1570 {
1571 struct adapter *adapter = rxq->rspq.adapter;
1572 struct sge *s = &adapter->sge;
1573 int ret;
1574 struct sk_buff *skb;
1575
1576 skb = napi_get_frags(&rxq->rspq.napi);
1577 if (unlikely(!skb)) {
1578 t4vf_pktgl_free(gl);
1579 rxq->stats.rx_drops++;
1580 return;
1581 }
1582
1583 copy_frags(skb, gl, s->pktshift);
1584 skb->len = gl->tot_len - s->pktshift;
1585 skb->data_len = skb->len;
1586 skb->truesize += skb->data_len;
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 skb_record_rx_queue(skb, rxq->rspq.idx);
1589
1590 if (pkt->vlan_ex) {
1591 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1592 be16_to_cpu(pkt->vlan));
1593 rxq->stats.vlan_ex++;
1594 }
1595 ret = napi_gro_frags(&rxq->rspq.napi);
1596
1597 if (ret == GRO_HELD)
1598 rxq->stats.lro_pkts++;
1599 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1600 rxq->stats.lro_merged++;
1601 rxq->stats.pkts++;
1602 rxq->stats.rx_cso++;
1603 }
1604
1605 /**
1606 * t4vf_ethrx_handler - process an ingress ethernet packet
1607 * @rspq: the response queue that received the packet
1608 * @rsp: the response queue descriptor holding the RX_PKT message
1609 * @gl: the gather list of packet fragments
1610 *
1611 * Process an ingress ethernet packet and deliver it to the stack.
1612 */
1613 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1614 const struct pkt_gl *gl)
1615 {
1616 struct sk_buff *skb;
1617 const struct cpl_rx_pkt *pkt = (void *)rsp;
1618 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1619 (rspq->netdev->features & NETIF_F_RXCSUM);
1620 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1621 struct adapter *adapter = rspq->adapter;
1622 struct sge *s = &adapter->sge;
1623
1624 /*
1625 * If this is a good TCP packet and we have Generic Receive Offload
1626 * enabled, handle the packet in the GRO path.
1627 */
1628 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1629 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1630 !pkt->ip_frag) {
1631 do_gro(rxq, gl, pkt);
1632 return 0;
1633 }
1634
1635 /*
1636 * Convert the Packet Gather List into an skb.
1637 */
1638 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1639 if (unlikely(!skb)) {
1640 t4vf_pktgl_free(gl);
1641 rxq->stats.rx_drops++;
1642 return 0;
1643 }
1644 __skb_pull(skb, s->pktshift);
1645 skb->protocol = eth_type_trans(skb, rspq->netdev);
1646 skb_record_rx_queue(skb, rspq->idx);
1647 rxq->stats.pkts++;
1648
1649 if (csum_ok && !pkt->err_vec &&
1650 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1651 if (!pkt->ip_frag)
1652 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653 else {
1654 __sum16 c = (__force __sum16)pkt->csum;
1655 skb->csum = csum_unfold(c);
1656 skb->ip_summed = CHECKSUM_COMPLETE;
1657 }
1658 rxq->stats.rx_cso++;
1659 } else
1660 skb_checksum_none_assert(skb);
1661
1662 if (pkt->vlan_ex) {
1663 rxq->stats.vlan_ex++;
1664 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
1665 }
1666
1667 netif_receive_skb(skb);
1668
1669 return 0;
1670 }
1671
1672 /**
1673 * is_new_response - check if a response is newly written
1674 * @rc: the response control descriptor
1675 * @rspq: the response queue
1676 *
1677 * Returns true if a response descriptor contains a yet unprocessed
1678 * response.
1679 */
1680 static inline bool is_new_response(const struct rsp_ctrl *rc,
1681 const struct sge_rspq *rspq)
1682 {
1683 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1684 }
1685
1686 /**
1687 * restore_rx_bufs - put back a packet's RX buffers
1688 * @gl: the packet gather list
1689 * @fl: the SGE Free List
1690 * @nfrags: how many fragments in @si
1691 *
1692 * Called when we find out that the current packet, @si, can't be
1693 * processed right away for some reason. This is a very rare event and
1694 * there's no effort to make this suspension/resumption process
1695 * particularly efficient.
1696 *
1697 * We implement the suspension by putting all of the RX buffers associated
1698 * with the current packet back on the original Free List. The buffers
1699 * have already been unmapped and are left unmapped, we mark them as
1700 * unmapped in order to prevent further unmapping attempts. (Effectively
1701 * this function undoes the series of @unmap_rx_buf calls which were done
1702 * to create the current packet's gather list.) This leaves us ready to
1703 * restart processing of the packet the next time we start processing the
1704 * RX Queue ...
1705 */
1706 static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1707 int frags)
1708 {
1709 struct rx_sw_desc *sdesc;
1710
1711 while (frags--) {
1712 if (fl->cidx == 0)
1713 fl->cidx = fl->size - 1;
1714 else
1715 fl->cidx--;
1716 sdesc = &fl->sdesc[fl->cidx];
1717 sdesc->page = gl->frags[frags].page;
1718 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1719 fl->avail++;
1720 }
1721 }
1722
1723 /**
1724 * rspq_next - advance to the next entry in a response queue
1725 * @rspq: the queue
1726 *
1727 * Updates the state of a response queue to advance it to the next entry.
1728 */
1729 static inline void rspq_next(struct sge_rspq *rspq)
1730 {
1731 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1732 if (unlikely(++rspq->cidx == rspq->size)) {
1733 rspq->cidx = 0;
1734 rspq->gen ^= 1;
1735 rspq->cur_desc = rspq->desc;
1736 }
1737 }
1738
1739 /**
1740 * process_responses - process responses from an SGE response queue
1741 * @rspq: the ingress response queue to process
1742 * @budget: how many responses can be processed in this round
1743 *
1744 * Process responses from a Scatter Gather Engine response queue up to
1745 * the supplied budget. Responses include received packets as well as
1746 * control messages from firmware or hardware.
1747 *
1748 * Additionally choose the interrupt holdoff time for the next interrupt
1749 * on this queue. If the system is under memory shortage use a fairly
1750 * long delay to help recovery.
1751 */
1752 static int process_responses(struct sge_rspq *rspq, int budget)
1753 {
1754 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1755 struct adapter *adapter = rspq->adapter;
1756 struct sge *s = &adapter->sge;
1757 int budget_left = budget;
1758
1759 while (likely(budget_left)) {
1760 int ret, rsp_type;
1761 const struct rsp_ctrl *rc;
1762
1763 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1764 if (!is_new_response(rc, rspq))
1765 break;
1766
1767 /*
1768 * Figure out what kind of response we've received from the
1769 * SGE.
1770 */
1771 dma_rmb();
1772 rsp_type = RSPD_TYPE_G(rc->type_gen);
1773 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1774 struct page_frag *fp;
1775 struct pkt_gl gl;
1776 const struct rx_sw_desc *sdesc;
1777 u32 bufsz, frag;
1778 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1779
1780 /*
1781 * If we get a "new buffer" message from the SGE we
1782 * need to move on to the next Free List buffer.
1783 */
1784 if (len & RSPD_NEWBUF_F) {
1785 /*
1786 * We get one "new buffer" message when we
1787 * first start up a queue so we need to ignore
1788 * it when our offset into the buffer is 0.
1789 */
1790 if (likely(rspq->offset > 0)) {
1791 free_rx_bufs(rspq->adapter, &rxq->fl,
1792 1);
1793 rspq->offset = 0;
1794 }
1795 len = RSPD_LEN_G(len);
1796 }
1797 gl.tot_len = len;
1798
1799 /*
1800 * Gather packet fragments.
1801 */
1802 for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1803 BUG_ON(frag >= MAX_SKB_FRAGS);
1804 BUG_ON(rxq->fl.avail == 0);
1805 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1806 bufsz = get_buf_size(adapter, sdesc);
1807 fp->page = sdesc->page;
1808 fp->offset = rspq->offset;
1809 fp->size = min(bufsz, len);
1810 len -= fp->size;
1811 if (!len)
1812 break;
1813 unmap_rx_buf(rspq->adapter, &rxq->fl);
1814 }
1815 gl.nfrags = frag+1;
1816
1817 /*
1818 * Last buffer remains mapped so explicitly make it
1819 * coherent for CPU access and start preloading first
1820 * cache line ...
1821 */
1822 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1823 get_buf_addr(sdesc),
1824 fp->size, DMA_FROM_DEVICE);
1825 gl.va = (page_address(gl.frags[0].page) +
1826 gl.frags[0].offset);
1827 prefetch(gl.va);
1828
1829 /*
1830 * Hand the new ingress packet to the handler for
1831 * this Response Queue.
1832 */
1833 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1834 if (likely(ret == 0))
1835 rspq->offset += ALIGN(fp->size, s->fl_align);
1836 else
1837 restore_rx_bufs(&gl, &rxq->fl, frag);
1838 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1839 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1840 } else {
1841 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1842 ret = 0;
1843 }
1844
1845 if (unlikely(ret)) {
1846 /*
1847 * Couldn't process descriptor, back off for recovery.
1848 * We use the SGE's last timer which has the longest
1849 * interrupt coalescing value ...
1850 */
1851 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1852 rspq->next_intr_params =
1853 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1854 break;
1855 }
1856
1857 rspq_next(rspq);
1858 budget_left--;
1859 }
1860
1861 /*
1862 * If this is a Response Queue with an associated Free List and
1863 * at least two Egress Queue units available in the Free List
1864 * for new buffer pointers, refill the Free List.
1865 */
1866 if (rspq->offset >= 0 &&
1867 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1868 __refill_fl(rspq->adapter, &rxq->fl);
1869 return budget - budget_left;
1870 }
1871
1872 /**
1873 * napi_rx_handler - the NAPI handler for RX processing
1874 * @napi: the napi instance
1875 * @budget: how many packets we can process in this round
1876 *
1877 * Handler for new data events when using NAPI. This does not need any
1878 * locking or protection from interrupts as data interrupts are off at
1879 * this point and other adapter interrupts do not interfere (the latter
1880 * in not a concern at all with MSI-X as non-data interrupts then have
1881 * a separate handler).
1882 */
1883 static int napi_rx_handler(struct napi_struct *napi, int budget)
1884 {
1885 unsigned int intr_params;
1886 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1887 int work_done = process_responses(rspq, budget);
1888 u32 val;
1889
1890 if (likely(work_done < budget)) {
1891 napi_complete(napi);
1892 intr_params = rspq->next_intr_params;
1893 rspq->next_intr_params = rspq->intr_params;
1894 } else
1895 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1896
1897 if (unlikely(work_done == 0))
1898 rspq->unhandled_irqs++;
1899
1900 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1901 /* If we don't have access to the new User GTS (T5+), use the old
1902 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1903 */
1904 if (unlikely(!rspq->bar2_addr)) {
1905 t4_write_reg(rspq->adapter,
1906 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1907 val | INGRESSQID_V((u32)rspq->cntxt_id));
1908 } else {
1909 writel(val | INGRESSQID_V(rspq->bar2_qid),
1910 rspq->bar2_addr + SGE_UDB_GTS);
1911 wmb();
1912 }
1913 return work_done;
1914 }
1915
1916 /*
1917 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1918 * (i.e., response queue serviced by NAPI polling).
1919 */
1920 irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1921 {
1922 struct sge_rspq *rspq = cookie;
1923
1924 napi_schedule(&rspq->napi);
1925 return IRQ_HANDLED;
1926 }
1927
1928 /*
1929 * Process the indirect interrupt entries in the interrupt queue and kick off
1930 * NAPI for each queue that has generated an entry.
1931 */
1932 static unsigned int process_intrq(struct adapter *adapter)
1933 {
1934 struct sge *s = &adapter->sge;
1935 struct sge_rspq *intrq = &s->intrq;
1936 unsigned int work_done;
1937 u32 val;
1938
1939 spin_lock(&adapter->sge.intrq_lock);
1940 for (work_done = 0; ; work_done++) {
1941 const struct rsp_ctrl *rc;
1942 unsigned int qid, iq_idx;
1943 struct sge_rspq *rspq;
1944
1945 /*
1946 * Grab the next response from the interrupt queue and bail
1947 * out if it's not a new response.
1948 */
1949 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1950 if (!is_new_response(rc, intrq))
1951 break;
1952
1953 /*
1954 * If the response isn't a forwarded interrupt message issue a
1955 * error and go on to the next response message. This should
1956 * never happen ...
1957 */
1958 dma_rmb();
1959 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1960 dev_err(adapter->pdev_dev,
1961 "Unexpected INTRQ response type %d\n",
1962 RSPD_TYPE_G(rc->type_gen));
1963 continue;
1964 }
1965
1966 /*
1967 * Extract the Queue ID from the interrupt message and perform
1968 * sanity checking to make sure it really refers to one of our
1969 * Ingress Queues which is active and matches the queue's ID.
1970 * None of these error conditions should ever happen so we may
1971 * want to either make them fatal and/or conditionalized under
1972 * DEBUG.
1973 */
1974 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1975 iq_idx = IQ_IDX(s, qid);
1976 if (unlikely(iq_idx >= MAX_INGQ)) {
1977 dev_err(adapter->pdev_dev,
1978 "Ingress QID %d out of range\n", qid);
1979 continue;
1980 }
1981 rspq = s->ingr_map[iq_idx];
1982 if (unlikely(rspq == NULL)) {
1983 dev_err(adapter->pdev_dev,
1984 "Ingress QID %d RSPQ=NULL\n", qid);
1985 continue;
1986 }
1987 if (unlikely(rspq->abs_id != qid)) {
1988 dev_err(adapter->pdev_dev,
1989 "Ingress QID %d refers to RSPQ %d\n",
1990 qid, rspq->abs_id);
1991 continue;
1992 }
1993
1994 /*
1995 * Schedule NAPI processing on the indicated Response Queue
1996 * and move on to the next entry in the Forwarded Interrupt
1997 * Queue.
1998 */
1999 napi_schedule(&rspq->napi);
2000 rspq_next(intrq);
2001 }
2002
2003 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2004 /* If we don't have access to the new User GTS (T5+), use the old
2005 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2006 */
2007 if (unlikely(!intrq->bar2_addr)) {
2008 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2009 val | INGRESSQID_V(intrq->cntxt_id));
2010 } else {
2011 writel(val | INGRESSQID_V(intrq->bar2_qid),
2012 intrq->bar2_addr + SGE_UDB_GTS);
2013 wmb();
2014 }
2015
2016 spin_unlock(&adapter->sge.intrq_lock);
2017
2018 return work_done;
2019 }
2020
2021 /*
2022 * The MSI interrupt handler handles data events from SGE response queues as
2023 * well as error and other async events as they all use the same MSI vector.
2024 */
2025 static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2026 {
2027 struct adapter *adapter = cookie;
2028
2029 process_intrq(adapter);
2030 return IRQ_HANDLED;
2031 }
2032
2033 /**
2034 * t4vf_intr_handler - select the top-level interrupt handler
2035 * @adapter: the adapter
2036 *
2037 * Selects the top-level interrupt handler based on the type of interrupts
2038 * (MSI-X or MSI).
2039 */
2040 irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2041 {
2042 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2043 if (adapter->flags & USING_MSIX)
2044 return t4vf_sge_intr_msix;
2045 else
2046 return t4vf_intr_msi;
2047 }
2048
2049 /**
2050 * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
2051 * @data: the adapter
2052 *
2053 * Runs periodically from a timer to perform maintenance of SGE RX queues.
2054 *
2055 * a) Replenishes RX queues that have run out due to memory shortage.
2056 * Normally new RX buffers are added when existing ones are consumed but
2057 * when out of memory a queue can become empty. We schedule NAPI to do
2058 * the actual refill.
2059 */
2060 static void sge_rx_timer_cb(unsigned long data)
2061 {
2062 struct adapter *adapter = (struct adapter *)data;
2063 struct sge *s = &adapter->sge;
2064 unsigned int i;
2065
2066 /*
2067 * Scan the "Starving Free Lists" flag array looking for any Free
2068 * Lists in need of more free buffers. If we find one and it's not
2069 * being actively polled, then bump its "starving" counter and attempt
2070 * to refill it. If we're successful in adding enough buffers to push
2071 * the Free List over the starving threshold, then we can clear its
2072 * "starving" status.
2073 */
2074 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2075 unsigned long m;
2076
2077 for (m = s->starving_fl[i]; m; m &= m - 1) {
2078 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2079 struct sge_fl *fl = s->egr_map[id];
2080
2081 clear_bit(id, s->starving_fl);
2082 smp_mb__after_atomic();
2083
2084 /*
2085 * Since we are accessing fl without a lock there's a
2086 * small probability of a false positive where we
2087 * schedule napi but the FL is no longer starving.
2088 * No biggie.
2089 */
2090 if (fl_starving(adapter, fl)) {
2091 struct sge_eth_rxq *rxq;
2092
2093 rxq = container_of(fl, struct sge_eth_rxq, fl);
2094 if (napi_reschedule(&rxq->rspq.napi))
2095 fl->starving++;
2096 else
2097 set_bit(id, s->starving_fl);
2098 }
2099 }
2100 }
2101
2102 /*
2103 * Reschedule the next scan for starving Free Lists ...
2104 */
2105 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2106 }
2107
2108 /**
2109 * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
2110 * @data: the adapter
2111 *
2112 * Runs periodically from a timer to perform maintenance of SGE TX queues.
2113 *
2114 * b) Reclaims completed Tx packets for the Ethernet queues. Normally
2115 * packets are cleaned up by new Tx packets, this timer cleans up packets
2116 * when no new packets are being submitted. This is essential for pktgen,
2117 * at least.
2118 */
2119 static void sge_tx_timer_cb(unsigned long data)
2120 {
2121 struct adapter *adapter = (struct adapter *)data;
2122 struct sge *s = &adapter->sge;
2123 unsigned int i, budget;
2124
2125 budget = MAX_TIMER_TX_RECLAIM;
2126 i = s->ethtxq_rover;
2127 do {
2128 struct sge_eth_txq *txq = &s->ethtxq[i];
2129
2130 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2131 int avail = reclaimable(&txq->q);
2132
2133 if (avail > budget)
2134 avail = budget;
2135
2136 free_tx_desc(adapter, &txq->q, avail, true);
2137 txq->q.in_use -= avail;
2138 __netif_tx_unlock(txq->txq);
2139
2140 budget -= avail;
2141 if (!budget)
2142 break;
2143 }
2144
2145 i++;
2146 if (i >= s->ethqsets)
2147 i = 0;
2148 } while (i != s->ethtxq_rover);
2149 s->ethtxq_rover = i;
2150
2151 /*
2152 * If we found too many reclaimable packets schedule a timer in the
2153 * near future to continue where we left off. Otherwise the next timer
2154 * will be at its normal interval.
2155 */
2156 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2157 }
2158
2159 /**
2160 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2161 * @adapter: the adapter
2162 * @qid: the SGE Queue ID
2163 * @qtype: the SGE Queue Type (Egress or Ingress)
2164 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2165 *
2166 * Returns the BAR2 address for the SGE Queue Registers associated with
2167 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2168 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2169 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2170 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2171 */
2172 static void __iomem *bar2_address(struct adapter *adapter,
2173 unsigned int qid,
2174 enum t4_bar2_qtype qtype,
2175 unsigned int *pbar2_qid)
2176 {
2177 u64 bar2_qoffset;
2178 int ret;
2179
2180 ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2181 &bar2_qoffset, pbar2_qid);
2182 if (ret)
2183 return NULL;
2184
2185 return adapter->bar2 + bar2_qoffset;
2186 }
2187
2188 /**
2189 * t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2190 * @adapter: the adapter
2191 * @rspq: pointer to to the new rxq's Response Queue to be filled in
2192 * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2193 * @dev: the network device associated with the new rspq
2194 * @intr_dest: MSI-X vector index (overriden in MSI mode)
2195 * @fl: pointer to the new rxq's Free List to be filled in
2196 * @hnd: the interrupt handler to invoke for the rspq
2197 */
2198 int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2199 bool iqasynch, struct net_device *dev,
2200 int intr_dest,
2201 struct sge_fl *fl, rspq_handler_t hnd)
2202 {
2203 struct sge *s = &adapter->sge;
2204 struct port_info *pi = netdev_priv(dev);
2205 struct fw_iq_cmd cmd, rpl;
2206 int ret, iqandst, flsz = 0;
2207
2208 /*
2209 * If we're using MSI interrupts and we're not initializing the
2210 * Forwarded Interrupt Queue itself, then set up this queue for
2211 * indirect interrupts to the Forwarded Interrupt Queue. Obviously
2212 * the Forwarded Interrupt Queue must be set up before any other
2213 * ingress queue ...
2214 */
2215 if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
2216 iqandst = SGE_INTRDST_IQ;
2217 intr_dest = adapter->sge.intrq.abs_id;
2218 } else
2219 iqandst = SGE_INTRDST_PCI;
2220
2221 /*
2222 * Allocate the hardware ring for the Response Queue. The size needs
2223 * to be a multiple of 16 which includes the mandatory status entry
2224 * (regardless of whether the Status Page capabilities are enabled or
2225 * not).
2226 */
2227 rspq->size = roundup(rspq->size, 16);
2228 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2229 0, &rspq->phys_addr, NULL, 0);
2230 if (!rspq->desc)
2231 return -ENOMEM;
2232
2233 /*
2234 * Fill in the Ingress Queue Command. Note: Ideally this code would
2235 * be in t4vf_hw.c but there are so many parameters and dependencies
2236 * on our Linux SGE state that we would end up having to pass tons of
2237 * parameters. We'll have to think about how this might be migrated
2238 * into OS-independent common code ...
2239 */
2240 memset(&cmd, 0, sizeof(cmd));
2241 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2242 FW_CMD_REQUEST_F |
2243 FW_CMD_WRITE_F |
2244 FW_CMD_EXEC_F);
2245 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2246 FW_IQ_CMD_IQSTART_F |
2247 FW_LEN16(cmd));
2248 cmd.type_to_iqandstindex =
2249 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2250 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2251 FW_IQ_CMD_VIID_V(pi->viid) |
2252 FW_IQ_CMD_IQANDST_V(iqandst) |
2253 FW_IQ_CMD_IQANUS_V(1) |
2254 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2255 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2256 cmd.iqdroprss_to_iqesize =
2257 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2258 FW_IQ_CMD_IQGTSMODE_F |
2259 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2260 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2261 cmd.iqsize = cpu_to_be16(rspq->size);
2262 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2263
2264 if (fl) {
2265 enum chip_type chip =
2266 CHELSIO_CHIP_VERSION(adapter->params.chip);
2267 /*
2268 * Allocate the ring for the hardware free list (with space
2269 * for its status page) along with the associated software
2270 * descriptor ring. The free list size needs to be a multiple
2271 * of the Egress Queue Unit and at least 2 Egress Units larger
2272 * than the SGE's Egress Congrestion Threshold
2273 * (fl_starve_thres - 1).
2274 */
2275 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2276 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2277 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2278 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2279 sizeof(__be64), sizeof(struct rx_sw_desc),
2280 &fl->addr, &fl->sdesc, s->stat_len);
2281 if (!fl->desc) {
2282 ret = -ENOMEM;
2283 goto err;
2284 }
2285
2286 /*
2287 * Calculate the size of the hardware free list ring plus
2288 * Status Page (which the SGE will place after the end of the
2289 * free list ring) in Egress Queue Units.
2290 */
2291 flsz = (fl->size / FL_PER_EQ_UNIT +
2292 s->stat_len / EQ_UNIT);
2293
2294 /*
2295 * Fill in all the relevant firmware Ingress Queue Command
2296 * fields for the free list.
2297 */
2298 cmd.iqns_to_fl0congen =
2299 cpu_to_be32(
2300 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2301 FW_IQ_CMD_FL0PACKEN_F |
2302 FW_IQ_CMD_FL0PADEN_F);
2303
2304 /* In T6, for egress queue type FL there is internal overhead
2305 * of 16B for header going into FLM module. Hence the maximum
2306 * allowed burst size is 448 bytes. For T4/T5, the hardware
2307 * doesn't coalesce fetch requests if more than 64 bytes of
2308 * Free List pointers are provided, so we use a 128-byte Fetch
2309 * Burst Minimum there (T6 implements coalescing so we can use
2310 * the smaller 64-byte value there).
2311 */
2312 cmd.fl0dcaen_to_fl0cidxfthresh =
2313 cpu_to_be16(
2314 FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
2315 FETCHBURSTMIN_128B_X :
2316 FETCHBURSTMIN_64B_X) |
2317 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
2318 FETCHBURSTMAX_512B_X :
2319 FETCHBURSTMAX_256B_X));
2320 cmd.fl0size = cpu_to_be16(flsz);
2321 cmd.fl0addr = cpu_to_be64(fl->addr);
2322 }
2323
2324 /*
2325 * Issue the firmware Ingress Queue Command and extract the results if
2326 * it completes successfully.
2327 */
2328 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2329 if (ret)
2330 goto err;
2331
2332 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2333 rspq->cur_desc = rspq->desc;
2334 rspq->cidx = 0;
2335 rspq->gen = 1;
2336 rspq->next_intr_params = rspq->intr_params;
2337 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2338 rspq->bar2_addr = bar2_address(adapter,
2339 rspq->cntxt_id,
2340 T4_BAR2_QTYPE_INGRESS,
2341 &rspq->bar2_qid);
2342 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2343 rspq->size--; /* subtract status entry */
2344 rspq->adapter = adapter;
2345 rspq->netdev = dev;
2346 rspq->handler = hnd;
2347
2348 /* set offset to -1 to distinguish ingress queues without FL */
2349 rspq->offset = fl ? 0 : -1;
2350
2351 if (fl) {
2352 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2353 fl->avail = 0;
2354 fl->pend_cred = 0;
2355 fl->pidx = 0;
2356 fl->cidx = 0;
2357 fl->alloc_failed = 0;
2358 fl->large_alloc_failed = 0;
2359 fl->starving = 0;
2360
2361 /* Note, we must initialize the BAR2 Free List User Doorbell
2362 * information before refilling the Free List!
2363 */
2364 fl->bar2_addr = bar2_address(adapter,
2365 fl->cntxt_id,
2366 T4_BAR2_QTYPE_EGRESS,
2367 &fl->bar2_qid);
2368
2369 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2370 }
2371
2372 return 0;
2373
2374 err:
2375 /*
2376 * An error occurred. Clean up our partial allocation state and
2377 * return the error.
2378 */
2379 if (rspq->desc) {
2380 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2381 rspq->desc, rspq->phys_addr);
2382 rspq->desc = NULL;
2383 }
2384 if (fl && fl->desc) {
2385 kfree(fl->sdesc);
2386 fl->sdesc = NULL;
2387 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2388 fl->desc, fl->addr);
2389 fl->desc = NULL;
2390 }
2391 return ret;
2392 }
2393
2394 /**
2395 * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2396 * @adapter: the adapter
2397 * @txq: pointer to the new txq to be filled in
2398 * @devq: the network TX queue associated with the new txq
2399 * @iqid: the relative ingress queue ID to which events relating to
2400 * the new txq should be directed
2401 */
2402 int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2403 struct net_device *dev, struct netdev_queue *devq,
2404 unsigned int iqid)
2405 {
2406 struct sge *s = &adapter->sge;
2407 int ret, nentries;
2408 struct fw_eq_eth_cmd cmd, rpl;
2409 struct port_info *pi = netdev_priv(dev);
2410
2411 /*
2412 * Calculate the size of the hardware TX Queue (including the Status
2413 * Page on the end of the TX Queue) in units of TX Descriptors.
2414 */
2415 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2416
2417 /*
2418 * Allocate the hardware ring for the TX ring (with space for its
2419 * status page) along with the associated software descriptor ring.
2420 */
2421 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2422 sizeof(struct tx_desc),
2423 sizeof(struct tx_sw_desc),
2424 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2425 if (!txq->q.desc)
2426 return -ENOMEM;
2427
2428 /*
2429 * Fill in the Egress Queue Command. Note: As with the direct use of
2430 * the firmware Ingress Queue COmmand above in our RXQ allocation
2431 * routine, ideally, this code would be in t4vf_hw.c. Again, we'll
2432 * have to see if there's some reasonable way to parameterize it
2433 * into the common code ...
2434 */
2435 memset(&cmd, 0, sizeof(cmd));
2436 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2437 FW_CMD_REQUEST_F |
2438 FW_CMD_WRITE_F |
2439 FW_CMD_EXEC_F);
2440 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2441 FW_EQ_ETH_CMD_EQSTART_F |
2442 FW_LEN16(cmd));
2443 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2444 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2445 cmd.fetchszm_to_iqid =
2446 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2447 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2448 FW_EQ_ETH_CMD_IQID_V(iqid));
2449 cmd.dcaen_to_eqsize =
2450 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
2451 FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
2452 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2453 SGE_CIDXFLUSHTHRESH_32) |
2454 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2455 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2456
2457 /*
2458 * Issue the firmware Egress Queue Command and extract the results if
2459 * it completes successfully.
2460 */
2461 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2462 if (ret) {
2463 /*
2464 * The girmware Ingress Queue Command failed for some reason.
2465 * Free up our partial allocation state and return the error.
2466 */
2467 kfree(txq->q.sdesc);
2468 txq->q.sdesc = NULL;
2469 dma_free_coherent(adapter->pdev_dev,
2470 nentries * sizeof(struct tx_desc),
2471 txq->q.desc, txq->q.phys_addr);
2472 txq->q.desc = NULL;
2473 return ret;
2474 }
2475
2476 txq->q.in_use = 0;
2477 txq->q.cidx = 0;
2478 txq->q.pidx = 0;
2479 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2480 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2481 txq->q.bar2_addr = bar2_address(adapter,
2482 txq->q.cntxt_id,
2483 T4_BAR2_QTYPE_EGRESS,
2484 &txq->q.bar2_qid);
2485 txq->q.abs_id =
2486 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2487 txq->txq = devq;
2488 txq->tso = 0;
2489 txq->tx_cso = 0;
2490 txq->vlan_ins = 0;
2491 txq->q.stops = 0;
2492 txq->q.restarts = 0;
2493 txq->mapping_err = 0;
2494 return 0;
2495 }
2496
2497 /*
2498 * Free the DMA map resources associated with a TX queue.
2499 */
2500 static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2501 {
2502 struct sge *s = &adapter->sge;
2503
2504 dma_free_coherent(adapter->pdev_dev,
2505 tq->size * sizeof(*tq->desc) + s->stat_len,
2506 tq->desc, tq->phys_addr);
2507 tq->cntxt_id = 0;
2508 tq->sdesc = NULL;
2509 tq->desc = NULL;
2510 }
2511
2512 /*
2513 * Free the resources associated with a response queue (possibly including a
2514 * free list).
2515 */
2516 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2517 struct sge_fl *fl)
2518 {
2519 struct sge *s = &adapter->sge;
2520 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2521
2522 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2523 rspq->cntxt_id, flid, 0xffff);
2524 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2525 rspq->desc, rspq->phys_addr);
2526 netif_napi_del(&rspq->napi);
2527 rspq->netdev = NULL;
2528 rspq->cntxt_id = 0;
2529 rspq->abs_id = 0;
2530 rspq->desc = NULL;
2531
2532 if (fl) {
2533 free_rx_bufs(adapter, fl, fl->avail);
2534 dma_free_coherent(adapter->pdev_dev,
2535 fl->size * sizeof(*fl->desc) + s->stat_len,
2536 fl->desc, fl->addr);
2537 kfree(fl->sdesc);
2538 fl->sdesc = NULL;
2539 fl->cntxt_id = 0;
2540 fl->desc = NULL;
2541 }
2542 }
2543
2544 /**
2545 * t4vf_free_sge_resources - free SGE resources
2546 * @adapter: the adapter
2547 *
2548 * Frees resources used by the SGE queue sets.
2549 */
2550 void t4vf_free_sge_resources(struct adapter *adapter)
2551 {
2552 struct sge *s = &adapter->sge;
2553 struct sge_eth_rxq *rxq = s->ethrxq;
2554 struct sge_eth_txq *txq = s->ethtxq;
2555 struct sge_rspq *evtq = &s->fw_evtq;
2556 struct sge_rspq *intrq = &s->intrq;
2557 int qs;
2558
2559 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2560 if (rxq->rspq.desc)
2561 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2562 if (txq->q.desc) {
2563 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2564 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2565 kfree(txq->q.sdesc);
2566 free_txq(adapter, &txq->q);
2567 }
2568 }
2569 if (evtq->desc)
2570 free_rspq_fl(adapter, evtq, NULL);
2571 if (intrq->desc)
2572 free_rspq_fl(adapter, intrq, NULL);
2573 }
2574
2575 /**
2576 * t4vf_sge_start - enable SGE operation
2577 * @adapter: the adapter
2578 *
2579 * Start tasklets and timers associated with the DMA engine.
2580 */
2581 void t4vf_sge_start(struct adapter *adapter)
2582 {
2583 adapter->sge.ethtxq_rover = 0;
2584 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2585 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2586 }
2587
2588 /**
2589 * t4vf_sge_stop - disable SGE operation
2590 * @adapter: the adapter
2591 *
2592 * Stop tasklets and timers associated with the DMA engine. Note that
2593 * this is effective only if measures have been taken to disable any HW
2594 * events that may restart them.
2595 */
2596 void t4vf_sge_stop(struct adapter *adapter)
2597 {
2598 struct sge *s = &adapter->sge;
2599
2600 if (s->rx_timer.function)
2601 del_timer_sync(&s->rx_timer);
2602 if (s->tx_timer.function)
2603 del_timer_sync(&s->tx_timer);
2604 }
2605
2606 /**
2607 * t4vf_sge_init - initialize SGE
2608 * @adapter: the adapter
2609 *
2610 * Performs SGE initialization needed every time after a chip reset.
2611 * We do not initialize any of the queue sets here, instead the driver
2612 * top-level must request those individually. We also do not enable DMA
2613 * here, that should be done after the queues have been set up.
2614 */
2615 int t4vf_sge_init(struct adapter *adapter)
2616 {
2617 struct sge_params *sge_params = &adapter->params.sge;
2618 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2619 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2620 struct sge *s = &adapter->sge;
2621
2622 /*
2623 * Start by vetting the basic SGE parameters which have been set up by
2624 * the Physical Function Driver. Ideally we should be able to deal
2625 * with _any_ configuration. Practice is different ...
2626 */
2627 if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2628 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2629 fl0, fl1);
2630 return -EINVAL;
2631 }
2632 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2633 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2634 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2635 return -EINVAL;
2636 }
2637
2638 /*
2639 * Now translate the adapter parameters into our internal forms.
2640 */
2641 if (fl1)
2642 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2643 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2644 ? 128 : 64);
2645 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2646 s->fl_align = t4vf_fl_pkt_align(adapter);
2647
2648 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2649 * timer will attempt to refill it. This needs to be larger than the
2650 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2651 * stuck waiting for new packets while the SGE is waiting for us to
2652 * give it more Free List entries. (Note that the SGE's Egress
2653 * Congestion Threshold is in units of 2 Free List pointers.)
2654 */
2655 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2656 case CHELSIO_T4:
2657 s->fl_starve_thres =
2658 EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2659 break;
2660 case CHELSIO_T5:
2661 s->fl_starve_thres =
2662 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2663 break;
2664 case CHELSIO_T6:
2665 default:
2666 s->fl_starve_thres =
2667 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2668 break;
2669 }
2670 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2671
2672 /*
2673 * Set up tasklet timers.
2674 */
2675 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
2676 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
2677
2678 /*
2679 * Initialize Forwarded Interrupt Queue lock.
2680 */
2681 spin_lock_init(&s->intrq_lock);
2682
2683 return 0;
2684 }