]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/cxgb3/sge.c
cxgb3: Fix transmit queue stop mechanism
[mirror_ubuntu-artful-kernel.git] / drivers / net / cxgb3 / sge.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
e0994eb1 48
4d22de3e 49#define SGE_RX_COPY_THRES 256
cf992af5 50#define SGE_RX_PULL_LEN 128
4d22de3e 51
e0994eb1 52/*
cf992af5
DLR
53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
e0994eb1 56 */
cf992af5
DLR
57#define FL0_PG_CHUNK_SIZE 2048
58
e0994eb1 59#define SGE_RX_DROP_THRES 16
4d22de3e
DLR
60
61/*
62 * Period of the Tx buffer reclaim timer. This timer does not need to run
63 * frequently as Tx buffers are usually reclaimed by new Tx packets.
64 */
65#define TX_RECLAIM_PERIOD (HZ / 4)
66
67/* WR size in bytes */
68#define WR_LEN (WR_FLITS * 8)
69
70/*
71 * Types of Tx queues in each queue set. Order here matters, do not change.
72 */
73enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
74
75/* Values for sge_txq.flags */
76enum {
77 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
78 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
79};
80
81struct tx_desc {
fb8e4444 82 __be64 flit[TX_DESC_FLITS];
4d22de3e
DLR
83};
84
85struct rx_desc {
86 __be32 addr_lo;
87 __be32 len_gen;
88 __be32 gen2;
89 __be32 addr_hi;
90};
91
92struct tx_sw_desc { /* SW state per Tx descriptor */
93 struct sk_buff *skb;
23561c94
DLR
94 u8 eop; /* set if last descriptor for packet */
95 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
96 u8 fragidx; /* first page fragment associated with descriptor */
97 s8 sflit; /* start flit of first SGL entry in descriptor */
4d22de3e
DLR
98};
99
cf992af5 100struct rx_sw_desc { /* SW state per Rx descriptor */
e0994eb1
DLR
101 union {
102 struct sk_buff *skb;
cf992af5
DLR
103 struct fl_pg_chunk pg_chunk;
104 };
105 DECLARE_PCI_UNMAP_ADDR(dma_addr);
4d22de3e
DLR
106};
107
108struct rsp_desc { /* response queue descriptor */
109 struct rss_header rss_hdr;
110 __be32 flags;
111 __be32 len_cq;
112 u8 imm_data[47];
113 u8 intr_gen;
114};
115
99d7cf30
DLR
116/*
117 * Holds unmapping information for Tx packets that need deferred unmapping.
118 * This structure lives at skb->head and must be allocated by callers.
119 */
120struct deferred_unmap_info {
121 struct pci_dev *pdev;
122 dma_addr_t addr[MAX_SKB_FRAGS + 1];
123};
124
4d22de3e
DLR
125/*
126 * Maps a number of flits to the number of Tx descriptors that can hold them.
127 * The formula is
128 *
129 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
130 *
131 * HW allows up to 4 descriptors to be combined into a WR.
132 */
133static u8 flit_desc_map[] = {
134 0,
135#if SGE_NUM_GENBITS == 1
136 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
138 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
139 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
140#elif SGE_NUM_GENBITS == 2
141 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
142 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
143 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
144 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
145#else
146# error "SGE_NUM_GENBITS must be 1 or 2"
147#endif
148};
149
150static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
151{
152 return container_of(q, struct sge_qset, fl[qidx]);
153}
154
155static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
156{
157 return container_of(q, struct sge_qset, rspq);
158}
159
160static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
161{
162 return container_of(q, struct sge_qset, txq[qidx]);
163}
164
165/**
166 * refill_rspq - replenish an SGE response queue
167 * @adapter: the adapter
168 * @q: the response queue to replenish
169 * @credits: how many new responses to make available
170 *
171 * Replenishes a response queue by making the supplied number of responses
172 * available to HW.
173 */
174static inline void refill_rspq(struct adapter *adapter,
175 const struct sge_rspq *q, unsigned int credits)
176{
afefce66 177 rmb();
4d22de3e
DLR
178 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
179 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
180}
181
182/**
183 * need_skb_unmap - does the platform need unmapping of sk_buffs?
184 *
185 * Returns true if the platfrom needs sk_buff unmapping. The compiler
186 * optimizes away unecessary code if this returns true.
187 */
188static inline int need_skb_unmap(void)
189{
190 /*
191 * This structure is used to tell if the platfrom needs buffer
192 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
193 */
194 struct dummy {
195 DECLARE_PCI_UNMAP_ADDR(addr);
196 };
197
198 return sizeof(struct dummy) != 0;
199}
200
201/**
202 * unmap_skb - unmap a packet main body and its page fragments
203 * @skb: the packet
204 * @q: the Tx queue containing Tx descriptors for the packet
205 * @cidx: index of Tx descriptor
206 * @pdev: the PCI device
207 *
208 * Unmap the main body of an sk_buff and its page fragments, if any.
209 * Because of the fairly complicated structure of our SGLs and the desire
23561c94
DLR
210 * to conserve space for metadata, the information necessary to unmap an
211 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
212 * descriptors (the physical addresses of the various data buffers), and
213 * the SW descriptor state (assorted indices). The send functions
214 * initialize the indices for the first packet descriptor so we can unmap
215 * the buffers held in the first Tx descriptor here, and we have enough
216 * information at this point to set the state for the next Tx descriptor.
217 *
218 * Note that it is possible to clean up the first descriptor of a packet
219 * before the send routines have written the next descriptors, but this
220 * race does not cause any problem. We just end up writing the unmapping
221 * info for the descriptor first.
4d22de3e
DLR
222 */
223static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
224 unsigned int cidx, struct pci_dev *pdev)
225{
226 const struct sg_ent *sgp;
23561c94
DLR
227 struct tx_sw_desc *d = &q->sdesc[cidx];
228 int nfrags, frag_idx, curflit, j = d->addr_idx;
4d22de3e 229
23561c94
DLR
230 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
231 frag_idx = d->fragidx;
4d22de3e 232
23561c94
DLR
233 if (frag_idx == 0 && skb_headlen(skb)) {
234 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
235 skb_headlen(skb), PCI_DMA_TODEVICE);
4d22de3e
DLR
236 j = 1;
237 }
238
23561c94 239 curflit = d->sflit + 1 + j;
4d22de3e
DLR
240 nfrags = skb_shinfo(skb)->nr_frags;
241
242 while (frag_idx < nfrags && curflit < WR_FLITS) {
243 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
244 skb_shinfo(skb)->frags[frag_idx].size,
245 PCI_DMA_TODEVICE);
246 j ^= 1;
247 if (j == 0) {
248 sgp++;
249 curflit++;
250 }
251 curflit++;
252 frag_idx++;
253 }
254
23561c94
DLR
255 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
256 d = cidx + 1 == q->size ? q->sdesc : d + 1;
257 d->fragidx = frag_idx;
258 d->addr_idx = j;
259 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
4d22de3e
DLR
260 }
261}
262
263/**
264 * free_tx_desc - reclaims Tx descriptors and their buffers
265 * @adapter: the adapter
266 * @q: the Tx queue to reclaim descriptors from
267 * @n: the number of descriptors to reclaim
268 *
269 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
270 * Tx buffers. Called with the Tx queue lock held.
271 */
272static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
273 unsigned int n)
274{
275 struct tx_sw_desc *d;
276 struct pci_dev *pdev = adapter->pdev;
277 unsigned int cidx = q->cidx;
278
99d7cf30
DLR
279 const int need_unmap = need_skb_unmap() &&
280 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
281
4d22de3e
DLR
282 d = &q->sdesc[cidx];
283 while (n--) {
284 if (d->skb) { /* an SGL is present */
99d7cf30 285 if (need_unmap)
4d22de3e 286 unmap_skb(d->skb, q, cidx, pdev);
23561c94 287 if (d->eop)
4d22de3e
DLR
288 kfree_skb(d->skb);
289 }
290 ++d;
291 if (++cidx == q->size) {
292 cidx = 0;
293 d = q->sdesc;
294 }
295 }
296 q->cidx = cidx;
297}
298
299/**
300 * reclaim_completed_tx - reclaims completed Tx descriptors
301 * @adapter: the adapter
302 * @q: the Tx queue to reclaim completed descriptors from
303 *
304 * Reclaims Tx descriptors that the SGE has indicated it has processed,
305 * and frees the associated buffers if possible. Called with the Tx
306 * queue's lock held.
307 */
308static inline void reclaim_completed_tx(struct adapter *adapter,
309 struct sge_txq *q)
310{
311 unsigned int reclaim = q->processed - q->cleaned;
312
313 if (reclaim) {
314 free_tx_desc(adapter, q, reclaim);
315 q->cleaned += reclaim;
316 q->in_use -= reclaim;
317 }
318}
319
320/**
321 * should_restart_tx - are there enough resources to restart a Tx queue?
322 * @q: the Tx queue
323 *
324 * Checks if there are enough descriptors to restart a suspended Tx queue.
325 */
326static inline int should_restart_tx(const struct sge_txq *q)
327{
328 unsigned int r = q->processed - q->cleaned;
329
330 return q->in_use - r < (q->size >> 1);
331}
332
333/**
334 * free_rx_bufs - free the Rx buffers on an SGE free list
335 * @pdev: the PCI device associated with the adapter
336 * @rxq: the SGE free list to clean up
337 *
338 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
339 * this queue should be stopped before calling this function.
340 */
341static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
342{
343 unsigned int cidx = q->cidx;
344
345 while (q->credits--) {
346 struct rx_sw_desc *d = &q->sdesc[cidx];
347
348 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
349 q->buf_size, PCI_DMA_FROMDEVICE);
cf992af5
DLR
350 if (q->use_pages) {
351 put_page(d->pg_chunk.page);
352 d->pg_chunk.page = NULL;
e0994eb1 353 } else {
cf992af5
DLR
354 kfree_skb(d->skb);
355 d->skb = NULL;
e0994eb1 356 }
4d22de3e
DLR
357 if (++cidx == q->size)
358 cidx = 0;
359 }
e0994eb1 360
cf992af5
DLR
361 if (q->pg_chunk.page) {
362 __free_page(q->pg_chunk.page);
363 q->pg_chunk.page = NULL;
364 }
4d22de3e
DLR
365}
366
367/**
368 * add_one_rx_buf - add a packet buffer to a free-buffer list
cf992af5 369 * @va: buffer start VA
4d22de3e
DLR
370 * @len: the buffer length
371 * @d: the HW Rx descriptor to write
372 * @sd: the SW Rx descriptor to write
373 * @gen: the generation bit value
374 * @pdev: the PCI device associated with the adapter
375 *
376 * Add a buffer of the given length to the supplied HW and SW Rx
377 * descriptors.
378 */
cf992af5 379static inline void add_one_rx_buf(void *va, unsigned int len,
4d22de3e
DLR
380 struct rx_desc *d, struct rx_sw_desc *sd,
381 unsigned int gen, struct pci_dev *pdev)
382{
383 dma_addr_t mapping;
384
e0994eb1 385 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
4d22de3e
DLR
386 pci_unmap_addr_set(sd, dma_addr, mapping);
387
388 d->addr_lo = cpu_to_be32(mapping);
389 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
390 wmb();
391 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
392 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
393}
394
cf992af5
DLR
395static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
396{
397 if (!q->pg_chunk.page) {
398 q->pg_chunk.page = alloc_page(gfp);
399 if (unlikely(!q->pg_chunk.page))
400 return -ENOMEM;
401 q->pg_chunk.va = page_address(q->pg_chunk.page);
402 q->pg_chunk.offset = 0;
403 }
404 sd->pg_chunk = q->pg_chunk;
405
406 q->pg_chunk.offset += q->buf_size;
407 if (q->pg_chunk.offset == PAGE_SIZE)
408 q->pg_chunk.page = NULL;
409 else {
410 q->pg_chunk.va += q->buf_size;
411 get_page(q->pg_chunk.page);
412 }
413 return 0;
414}
415
4d22de3e
DLR
416/**
417 * refill_fl - refill an SGE free-buffer list
418 * @adapter: the adapter
419 * @q: the free-list to refill
420 * @n: the number of new buffers to allocate
421 * @gfp: the gfp flags for allocating new buffers
422 *
423 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
424 * allocated with the supplied gfp flags. The caller must assure that
425 * @n does not exceed the queue's capacity.
426 */
427static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
428{
cf992af5 429 void *buf_start;
4d22de3e
DLR
430 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
431 struct rx_desc *d = &q->desc[q->pidx];
432
433 while (n--) {
cf992af5
DLR
434 if (q->use_pages) {
435 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
436nomem: q->alloc_failed++;
e0994eb1
DLR
437 break;
438 }
cf992af5 439 buf_start = sd->pg_chunk.va;
e0994eb1 440 } else {
cf992af5 441 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
e0994eb1 442
cf992af5
DLR
443 if (!skb)
444 goto nomem;
e0994eb1 445
cf992af5
DLR
446 sd->skb = skb;
447 buf_start = skb->data;
e0994eb1
DLR
448 }
449
cf992af5
DLR
450 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
451 adap->pdev);
4d22de3e
DLR
452 d++;
453 sd++;
454 if (++q->pidx == q->size) {
455 q->pidx = 0;
456 q->gen ^= 1;
457 sd = q->sdesc;
458 d = q->desc;
459 }
460 q->credits++;
461 }
afefce66 462 wmb();
4d22de3e
DLR
463 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
464}
465
466static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
467{
468 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
469}
470
471/**
472 * recycle_rx_buf - recycle a receive buffer
473 * @adapter: the adapter
474 * @q: the SGE free list
475 * @idx: index of buffer to recycle
476 *
477 * Recycles the specified buffer on the given free list by adding it at
478 * the next available slot on the list.
479 */
480static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
481 unsigned int idx)
482{
483 struct rx_desc *from = &q->desc[idx];
484 struct rx_desc *to = &q->desc[q->pidx];
485
cf992af5 486 q->sdesc[q->pidx] = q->sdesc[idx];
4d22de3e
DLR
487 to->addr_lo = from->addr_lo; /* already big endian */
488 to->addr_hi = from->addr_hi; /* likewise */
489 wmb();
490 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
491 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
492 q->credits++;
493
494 if (++q->pidx == q->size) {
495 q->pidx = 0;
496 q->gen ^= 1;
497 }
498 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
499}
500
501/**
502 * alloc_ring - allocate resources for an SGE descriptor ring
503 * @pdev: the PCI device
504 * @nelem: the number of descriptors
505 * @elem_size: the size of each descriptor
506 * @sw_size: the size of the SW state associated with each ring element
507 * @phys: the physical address of the allocated ring
508 * @metadata: address of the array holding the SW state for the ring
509 *
510 * Allocates resources for an SGE descriptor ring, such as Tx queues,
511 * free buffer lists, or response queues. Each SGE ring requires
512 * space for its HW descriptors plus, optionally, space for the SW state
513 * associated with each HW entry (the metadata). The function returns
514 * three values: the virtual address for the HW ring (the return value
515 * of the function), the physical address of the HW ring, and the address
516 * of the SW ring.
517 */
518static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
e0994eb1 519 size_t sw_size, dma_addr_t * phys, void *metadata)
4d22de3e
DLR
520{
521 size_t len = nelem * elem_size;
522 void *s = NULL;
523 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
524
525 if (!p)
526 return NULL;
527 if (sw_size) {
528 s = kcalloc(nelem, sw_size, GFP_KERNEL);
529
530 if (!s) {
531 dma_free_coherent(&pdev->dev, len, p, *phys);
532 return NULL;
533 }
534 }
535 if (metadata)
536 *(void **)metadata = s;
537 memset(p, 0, len);
538 return p;
539}
540
541/**
542 * free_qset - free the resources of an SGE queue set
543 * @adapter: the adapter owning the queue set
544 * @q: the queue set
545 *
546 * Release the HW and SW resources associated with an SGE queue set, such
547 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
548 * queue set must be quiesced prior to calling this.
549 */
9265fabf 550static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
4d22de3e
DLR
551{
552 int i;
553 struct pci_dev *pdev = adapter->pdev;
554
555 if (q->tx_reclaim_timer.function)
556 del_timer_sync(&q->tx_reclaim_timer);
557
558 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
559 if (q->fl[i].desc) {
560 spin_lock(&adapter->sge.reg_lock);
561 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
562 spin_unlock(&adapter->sge.reg_lock);
563 free_rx_bufs(pdev, &q->fl[i]);
564 kfree(q->fl[i].sdesc);
565 dma_free_coherent(&pdev->dev,
566 q->fl[i].size *
567 sizeof(struct rx_desc), q->fl[i].desc,
568 q->fl[i].phys_addr);
569 }
570
571 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
572 if (q->txq[i].desc) {
573 spin_lock(&adapter->sge.reg_lock);
574 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
575 spin_unlock(&adapter->sge.reg_lock);
576 if (q->txq[i].sdesc) {
577 free_tx_desc(adapter, &q->txq[i],
578 q->txq[i].in_use);
579 kfree(q->txq[i].sdesc);
580 }
581 dma_free_coherent(&pdev->dev,
582 q->txq[i].size *
583 sizeof(struct tx_desc),
584 q->txq[i].desc, q->txq[i].phys_addr);
585 __skb_queue_purge(&q->txq[i].sendq);
586 }
587
588 if (q->rspq.desc) {
589 spin_lock(&adapter->sge.reg_lock);
590 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
591 spin_unlock(&adapter->sge.reg_lock);
592 dma_free_coherent(&pdev->dev,
593 q->rspq.size * sizeof(struct rsp_desc),
594 q->rspq.desc, q->rspq.phys_addr);
595 }
596
4d22de3e
DLR
597 memset(q, 0, sizeof(*q));
598}
599
600/**
601 * init_qset_cntxt - initialize an SGE queue set context info
602 * @qs: the queue set
603 * @id: the queue set id
604 *
605 * Initializes the TIDs and context ids for the queues of a queue set.
606 */
607static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
608{
609 qs->rspq.cntxt_id = id;
610 qs->fl[0].cntxt_id = 2 * id;
611 qs->fl[1].cntxt_id = 2 * id + 1;
612 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
613 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
614 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
615 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
616 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
617}
618
619/**
620 * sgl_len - calculates the size of an SGL of the given capacity
621 * @n: the number of SGL entries
622 *
623 * Calculates the number of flits needed for a scatter/gather list that
624 * can hold the given number of entries.
625 */
626static inline unsigned int sgl_len(unsigned int n)
627{
628 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
629 return (3 * n) / 2 + (n & 1);
630}
631
632/**
633 * flits_to_desc - returns the num of Tx descriptors for the given flits
634 * @n: the number of flits
635 *
636 * Calculates the number of Tx descriptors needed for the supplied number
637 * of flits.
638 */
639static inline unsigned int flits_to_desc(unsigned int n)
640{
641 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
642 return flit_desc_map[n];
643}
644
cf992af5
DLR
645/**
646 * get_packet - return the next ingress packet buffer from a free list
647 * @adap: the adapter that received the packet
648 * @fl: the SGE free list holding the packet
649 * @len: the packet length including any SGE padding
650 * @drop_thres: # of remaining buffers before we start dropping packets
651 *
652 * Get the next packet from a free list and complete setup of the
653 * sk_buff. If the packet is small we make a copy and recycle the
654 * original buffer, otherwise we use the original buffer itself. If a
655 * positive drop threshold is supplied packets are dropped and their
656 * buffers recycled if (a) the number of remaining buffers is under the
657 * threshold and the packet is too big to copy, or (b) the packet should
658 * be copied but there is no memory for the copy.
659 */
660static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
661 unsigned int len, unsigned int drop_thres)
662{
663 struct sk_buff *skb = NULL;
664 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
665
666 prefetch(sd->skb->data);
667 fl->credits--;
668
669 if (len <= SGE_RX_COPY_THRES) {
670 skb = alloc_skb(len, GFP_ATOMIC);
671 if (likely(skb != NULL)) {
672 __skb_put(skb, len);
673 pci_dma_sync_single_for_cpu(adap->pdev,
674 pci_unmap_addr(sd, dma_addr), len,
675 PCI_DMA_FROMDEVICE);
676 memcpy(skb->data, sd->skb->data, len);
677 pci_dma_sync_single_for_device(adap->pdev,
678 pci_unmap_addr(sd, dma_addr), len,
679 PCI_DMA_FROMDEVICE);
680 } else if (!drop_thres)
681 goto use_orig_buf;
682recycle:
683 recycle_rx_buf(adap, fl, fl->cidx);
684 return skb;
685 }
686
687 if (unlikely(fl->credits < drop_thres))
688 goto recycle;
689
690use_orig_buf:
691 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
692 fl->buf_size, PCI_DMA_FROMDEVICE);
693 skb = sd->skb;
694 skb_put(skb, len);
695 __refill_fl(adap, fl);
696 return skb;
697}
698
699/**
700 * get_packet_pg - return the next ingress packet buffer from a free list
701 * @adap: the adapter that received the packet
702 * @fl: the SGE free list holding the packet
703 * @len: the packet length including any SGE padding
704 * @drop_thres: # of remaining buffers before we start dropping packets
705 *
706 * Get the next packet from a free list populated with page chunks.
707 * If the packet is small we make a copy and recycle the original buffer,
708 * otherwise we attach the original buffer as a page fragment to a fresh
709 * sk_buff. If a positive drop threshold is supplied packets are dropped
710 * and their buffers recycled if (a) the number of remaining buffers is
711 * under the threshold and the packet is too big to copy, or (b) there's
712 * no system memory.
713 *
714 * Note: this function is similar to @get_packet but deals with Rx buffers
715 * that are page chunks rather than sk_buffs.
716 */
717static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
718 unsigned int len, unsigned int drop_thres)
719{
720 struct sk_buff *skb = NULL;
721 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
722
723 if (len <= SGE_RX_COPY_THRES) {
724 skb = alloc_skb(len, GFP_ATOMIC);
725 if (likely(skb != NULL)) {
726 __skb_put(skb, len);
727 pci_dma_sync_single_for_cpu(adap->pdev,
728 pci_unmap_addr(sd, dma_addr), len,
729 PCI_DMA_FROMDEVICE);
730 memcpy(skb->data, sd->pg_chunk.va, len);
731 pci_dma_sync_single_for_device(adap->pdev,
732 pci_unmap_addr(sd, dma_addr), len,
733 PCI_DMA_FROMDEVICE);
734 } else if (!drop_thres)
735 return NULL;
736recycle:
737 fl->credits--;
738 recycle_rx_buf(adap, fl, fl->cidx);
739 return skb;
740 }
741
742 if (unlikely(fl->credits <= drop_thres))
743 goto recycle;
744
745 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
746 if (unlikely(!skb)) {
747 if (!drop_thres)
748 return NULL;
749 goto recycle;
750 }
751
752 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
753 fl->buf_size, PCI_DMA_FROMDEVICE);
754 __skb_put(skb, SGE_RX_PULL_LEN);
755 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
756 skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
757 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
758 len - SGE_RX_PULL_LEN);
759 skb->len = len;
760 skb->data_len = len - SGE_RX_PULL_LEN;
761 skb->truesize += skb->data_len;
762
763 fl->credits--;
764 /*
765 * We do not refill FLs here, we let the caller do it to overlap a
766 * prefetch.
767 */
768 return skb;
769}
770
4d22de3e
DLR
771/**
772 * get_imm_packet - return the next ingress packet buffer from a response
773 * @resp: the response descriptor containing the packet data
774 *
775 * Return a packet containing the immediate data of the given response.
776 */
777static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
778{
779 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
780
781 if (skb) {
782 __skb_put(skb, IMMED_PKT_SIZE);
27d7ff46 783 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
4d22de3e
DLR
784 }
785 return skb;
786}
787
788/**
789 * calc_tx_descs - calculate the number of Tx descriptors for a packet
790 * @skb: the packet
791 *
792 * Returns the number of Tx descriptors needed for the given Ethernet
793 * packet. Ethernet packets require addition of WR and CPL headers.
794 */
795static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
796{
797 unsigned int flits;
798
799 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
800 return 1;
801
802 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
803 if (skb_shinfo(skb)->gso_size)
804 flits++;
805 return flits_to_desc(flits);
806}
807
808/**
809 * make_sgl - populate a scatter/gather list for a packet
810 * @skb: the packet
811 * @sgp: the SGL to populate
812 * @start: start address of skb main body data to include in the SGL
813 * @len: length of skb main body data to include in the SGL
814 * @pdev: the PCI device
815 *
816 * Generates a scatter/gather list for the buffers that make up a packet
817 * and returns the SGL size in 8-byte words. The caller must size the SGL
818 * appropriately.
819 */
820static inline unsigned int make_sgl(const struct sk_buff *skb,
821 struct sg_ent *sgp, unsigned char *start,
822 unsigned int len, struct pci_dev *pdev)
823{
824 dma_addr_t mapping;
825 unsigned int i, j = 0, nfrags;
826
827 if (len) {
828 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
829 sgp->len[0] = cpu_to_be32(len);
830 sgp->addr[0] = cpu_to_be64(mapping);
831 j = 1;
832 }
833
834 nfrags = skb_shinfo(skb)->nr_frags;
835 for (i = 0; i < nfrags; i++) {
836 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
837
838 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
839 frag->size, PCI_DMA_TODEVICE);
840 sgp->len[j] = cpu_to_be32(frag->size);
841 sgp->addr[j] = cpu_to_be64(mapping);
842 j ^= 1;
843 if (j == 0)
844 ++sgp;
845 }
846 if (j)
847 sgp->len[j] = 0;
848 return ((nfrags + (len != 0)) * 3) / 2 + j;
849}
850
851/**
852 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
853 * @adap: the adapter
854 * @q: the Tx queue
855 *
856 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
857 * where the HW is going to sleep just after we checked, however,
858 * then the interrupt handler will detect the outstanding TX packet
859 * and ring the doorbell for us.
860 *
861 * When GTS is disabled we unconditionally ring the doorbell.
862 */
863static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
864{
865#if USE_GTS
866 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
867 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
868 set_bit(TXQ_LAST_PKT_DB, &q->flags);
869 t3_write_reg(adap, A_SG_KDOORBELL,
870 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
871 }
872#else
873 wmb(); /* write descriptors before telling HW */
874 t3_write_reg(adap, A_SG_KDOORBELL,
875 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
876#endif
877}
878
879static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
880{
881#if SGE_NUM_GENBITS == 2
882 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
883#endif
884}
885
886/**
887 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
888 * @ndesc: number of Tx descriptors spanned by the SGL
889 * @skb: the packet corresponding to the WR
890 * @d: first Tx descriptor to be written
891 * @pidx: index of above descriptors
892 * @q: the SGE Tx queue
893 * @sgl: the SGL
894 * @flits: number of flits to the start of the SGL in the first descriptor
895 * @sgl_flits: the SGL size in flits
896 * @gen: the Tx descriptor generation
897 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
898 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
899 *
900 * Write a work request header and an associated SGL. If the SGL is
901 * small enough to fit into one Tx descriptor it has already been written
902 * and we just need to write the WR header. Otherwise we distribute the
903 * SGL across the number of descriptors it spans.
904 */
905static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
906 struct tx_desc *d, unsigned int pidx,
907 const struct sge_txq *q,
908 const struct sg_ent *sgl,
909 unsigned int flits, unsigned int sgl_flits,
fb8e4444
AV
910 unsigned int gen, __be32 wr_hi,
911 __be32 wr_lo)
4d22de3e
DLR
912{
913 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
914 struct tx_sw_desc *sd = &q->sdesc[pidx];
915
916 sd->skb = skb;
917 if (need_skb_unmap()) {
23561c94
DLR
918 sd->fragidx = 0;
919 sd->addr_idx = 0;
920 sd->sflit = flits;
4d22de3e
DLR
921 }
922
923 if (likely(ndesc == 1)) {
23561c94 924 sd->eop = 1;
4d22de3e
DLR
925 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
926 V_WR_SGLSFLT(flits)) | wr_hi;
927 wmb();
928 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
929 V_WR_GEN(gen)) | wr_lo;
930 wr_gen2(d, gen);
931 } else {
932 unsigned int ogen = gen;
933 const u64 *fp = (const u64 *)sgl;
934 struct work_request_hdr *wp = wrp;
935
936 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
937 V_WR_SGLSFLT(flits)) | wr_hi;
938
939 while (sgl_flits) {
940 unsigned int avail = WR_FLITS - flits;
941
942 if (avail > sgl_flits)
943 avail = sgl_flits;
944 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
945 sgl_flits -= avail;
946 ndesc--;
947 if (!sgl_flits)
948 break;
949
950 fp += avail;
951 d++;
23561c94 952 sd->eop = 0;
4d22de3e
DLR
953 sd++;
954 if (++pidx == q->size) {
955 pidx = 0;
956 gen ^= 1;
957 d = q->desc;
958 sd = q->sdesc;
959 }
960
961 sd->skb = skb;
962 wrp = (struct work_request_hdr *)d;
963 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
964 V_WR_SGLSFLT(1)) | wr_hi;
965 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
966 sgl_flits + 1)) |
967 V_WR_GEN(gen)) | wr_lo;
968 wr_gen2(d, gen);
969 flits = 1;
970 }
23561c94 971 sd->eop = 1;
4d22de3e
DLR
972 wrp->wr_hi |= htonl(F_WR_EOP);
973 wmb();
974 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
975 wr_gen2((struct tx_desc *)wp, ogen);
976 WARN_ON(ndesc != 0);
977 }
978}
979
980/**
981 * write_tx_pkt_wr - write a TX_PKT work request
982 * @adap: the adapter
983 * @skb: the packet to send
984 * @pi: the egress interface
985 * @pidx: index of the first Tx descriptor to write
986 * @gen: the generation value to use
987 * @q: the Tx queue
988 * @ndesc: number of descriptors the packet will occupy
989 * @compl: the value of the COMPL bit to use
990 *
991 * Generate a TX_PKT work request to send the supplied packet.
992 */
993static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
994 const struct port_info *pi,
995 unsigned int pidx, unsigned int gen,
996 struct sge_txq *q, unsigned int ndesc,
997 unsigned int compl)
998{
999 unsigned int flits, sgl_flits, cntrl, tso_info;
1000 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1001 struct tx_desc *d = &q->desc[pidx];
1002 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1003
1004 cpl->len = htonl(skb->len | 0x80000000);
1005 cntrl = V_TXPKT_INTF(pi->port_id);
1006
1007 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1008 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1009
1010 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1011 if (tso_info) {
1012 int eth_type;
1013 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1014
1015 d->flit[2] = 0;
1016 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1017 hdr->cntrl = htonl(cntrl);
bbe735e4 1018 eth_type = skb_network_offset(skb) == ETH_HLEN ?
4d22de3e
DLR
1019 CPL_ETH_II : CPL_ETH_II_VLAN;
1020 tso_info |= V_LSO_ETH_TYPE(eth_type) |
eddc9ec5 1021 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
aa8223c7 1022 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
4d22de3e
DLR
1023 hdr->lso_info = htonl(tso_info);
1024 flits = 3;
1025 } else {
1026 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1027 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1028 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1029 cpl->cntrl = htonl(cntrl);
1030
1031 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1032 q->sdesc[pidx].skb = NULL;
1033 if (!skb->data_len)
d626f62b
ACM
1034 skb_copy_from_linear_data(skb, &d->flit[2],
1035 skb->len);
4d22de3e
DLR
1036 else
1037 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1038
1039 flits = (skb->len + 7) / 8 + 2;
1040 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1041 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1042 | F_WR_SOP | F_WR_EOP | compl);
1043 wmb();
1044 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1045 V_WR_TID(q->token));
1046 wr_gen2(d, gen);
1047 kfree_skb(skb);
1048 return;
1049 }
1050
1051 flits = 2;
1052 }
1053
1054 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1055 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
4d22de3e
DLR
1056
1057 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1058 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1059 htonl(V_WR_TID(q->token)));
1060}
1061
a8cc21f6
KK
1062static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1063 struct sge_txq *q)
1064{
1065 netif_stop_queue(dev);
1066 set_bit(TXQ_ETH, &qs->txq_stopped);
1067 q->stops++;
1068}
1069
4d22de3e
DLR
1070/**
1071 * eth_xmit - add a packet to the Ethernet Tx queue
1072 * @skb: the packet
1073 * @dev: the egress net device
1074 *
1075 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1076 */
1077int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1078{
1079 unsigned int ndesc, pidx, credits, gen, compl;
1080 const struct port_info *pi = netdev_priv(dev);
5fbf816f 1081 struct adapter *adap = pi->adapter;
bea3348e 1082 struct sge_qset *qs = pi->qs;
4d22de3e
DLR
1083 struct sge_txq *q = &qs->txq[TXQ_ETH];
1084
1085 /*
1086 * The chip min packet length is 9 octets but play safe and reject
1087 * anything shorter than an Ethernet header.
1088 */
1089 if (unlikely(skb->len < ETH_HLEN)) {
1090 dev_kfree_skb(skb);
1091 return NETDEV_TX_OK;
1092 }
1093
1094 spin_lock(&q->lock);
1095 reclaim_completed_tx(adap, q);
1096
1097 credits = q->size - q->in_use;
1098 ndesc = calc_tx_descs(skb);
1099
1100 if (unlikely(credits < ndesc)) {
a8cc21f6
KK
1101 t3_stop_queue(dev, qs, q);
1102 dev_err(&adap->pdev->dev,
1103 "%s: Tx ring %u full while queue awake!\n",
1104 dev->name, q->cntxt_id & 7);
4d22de3e
DLR
1105 spin_unlock(&q->lock);
1106 return NETDEV_TX_BUSY;
1107 }
1108
1109 q->in_use += ndesc;
cd7e9034
DLR
1110 if (unlikely(credits - ndesc < q->stop_thres)) {
1111 t3_stop_queue(dev, qs, q);
1112
1113 if (should_restart_tx(q) &&
1114 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1115 q->restarts++;
1116 netif_wake_queue(dev);
1117 }
1118 }
4d22de3e
DLR
1119
1120 gen = q->gen;
1121 q->unacked += ndesc;
1122 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1123 q->unacked &= 7;
1124 pidx = q->pidx;
1125 q->pidx += ndesc;
1126 if (q->pidx >= q->size) {
1127 q->pidx -= q->size;
1128 q->gen ^= 1;
1129 }
1130
1131 /* update port statistics */
1132 if (skb->ip_summed == CHECKSUM_COMPLETE)
1133 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1134 if (skb_shinfo(skb)->gso_size)
1135 qs->port_stats[SGE_PSTAT_TSO]++;
1136 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1137 qs->port_stats[SGE_PSTAT_VLANINS]++;
1138
1139 dev->trans_start = jiffies;
1140 spin_unlock(&q->lock);
1141
1142 /*
1143 * We do not use Tx completion interrupts to free DMAd Tx packets.
1144 * This is good for performamce but means that we rely on new Tx
1145 * packets arriving to run the destructors of completed packets,
1146 * which open up space in their sockets' send queues. Sometimes
1147 * we do not get such new packets causing Tx to stall. A single
1148 * UDP transmitter is a good example of this situation. We have
1149 * a clean up timer that periodically reclaims completed packets
1150 * but it doesn't run often enough (nor do we want it to) to prevent
1151 * lengthy stalls. A solution to this problem is to run the
1152 * destructor early, after the packet is queued but before it's DMAd.
1153 * A cons is that we lie to socket memory accounting, but the amount
1154 * of extra memory is reasonable (limited by the number of Tx
1155 * descriptors), the packets do actually get freed quickly by new
1156 * packets almost always, and for protocols like TCP that wait for
1157 * acks to really free up the data the extra memory is even less.
1158 * On the positive side we run the destructors on the sending CPU
1159 * rather than on a potentially different completing CPU, usually a
1160 * good thing. We also run them without holding our Tx queue lock,
1161 * unlike what reclaim_completed_tx() would otherwise do.
1162 *
1163 * Run the destructor before telling the DMA engine about the packet
1164 * to make sure it doesn't complete and get freed prematurely.
1165 */
1166 if (likely(!skb_shared(skb)))
1167 skb_orphan(skb);
1168
1169 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1170 check_ring_tx_db(adap, q);
1171 return NETDEV_TX_OK;
1172}
1173
1174/**
1175 * write_imm - write a packet into a Tx descriptor as immediate data
1176 * @d: the Tx descriptor to write
1177 * @skb: the packet
1178 * @len: the length of packet data to write as immediate data
1179 * @gen: the generation bit value to write
1180 *
1181 * Writes a packet as immediate data into a Tx descriptor. The packet
1182 * contains a work request at its beginning. We must write the packet
27186dc3
DLR
1183 * carefully so the SGE doesn't read it accidentally before it's written
1184 * in its entirety.
4d22de3e
DLR
1185 */
1186static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1187 unsigned int len, unsigned int gen)
1188{
1189 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1190 struct work_request_hdr *to = (struct work_request_hdr *)d;
1191
27186dc3
DLR
1192 if (likely(!skb->data_len))
1193 memcpy(&to[1], &from[1], len - sizeof(*from));
1194 else
1195 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1196
4d22de3e
DLR
1197 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1198 V_WR_BCNTLFLT(len & 7));
1199 wmb();
1200 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1201 V_WR_LEN((len + 7) / 8));
1202 wr_gen2(d, gen);
1203 kfree_skb(skb);
1204}
1205
1206/**
1207 * check_desc_avail - check descriptor availability on a send queue
1208 * @adap: the adapter
1209 * @q: the send queue
1210 * @skb: the packet needing the descriptors
1211 * @ndesc: the number of Tx descriptors needed
1212 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1213 *
1214 * Checks if the requested number of Tx descriptors is available on an
1215 * SGE send queue. If the queue is already suspended or not enough
1216 * descriptors are available the packet is queued for later transmission.
1217 * Must be called with the Tx queue locked.
1218 *
1219 * Returns 0 if enough descriptors are available, 1 if there aren't
1220 * enough descriptors and the packet has been queued, and 2 if the caller
1221 * needs to retry because there weren't enough descriptors at the
1222 * beginning of the call but some freed up in the mean time.
1223 */
1224static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1225 struct sk_buff *skb, unsigned int ndesc,
1226 unsigned int qid)
1227{
1228 if (unlikely(!skb_queue_empty(&q->sendq))) {
1229 addq_exit:__skb_queue_tail(&q->sendq, skb);
1230 return 1;
1231 }
1232 if (unlikely(q->size - q->in_use < ndesc)) {
1233 struct sge_qset *qs = txq_to_qset(q, qid);
1234
1235 set_bit(qid, &qs->txq_stopped);
1236 smp_mb__after_clear_bit();
1237
1238 if (should_restart_tx(q) &&
1239 test_and_clear_bit(qid, &qs->txq_stopped))
1240 return 2;
1241
1242 q->stops++;
1243 goto addq_exit;
1244 }
1245 return 0;
1246}
1247
1248/**
1249 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1250 * @q: the SGE control Tx queue
1251 *
1252 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1253 * that send only immediate data (presently just the control queues) and
1254 * thus do not have any sk_buffs to release.
1255 */
1256static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1257{
1258 unsigned int reclaim = q->processed - q->cleaned;
1259
1260 q->in_use -= reclaim;
1261 q->cleaned += reclaim;
1262}
1263
1264static inline int immediate(const struct sk_buff *skb)
1265{
27186dc3 1266 return skb->len <= WR_LEN;
4d22de3e
DLR
1267}
1268
1269/**
1270 * ctrl_xmit - send a packet through an SGE control Tx queue
1271 * @adap: the adapter
1272 * @q: the control queue
1273 * @skb: the packet
1274 *
1275 * Send a packet through an SGE control Tx queue. Packets sent through
1276 * a control queue must fit entirely as immediate data in a single Tx
1277 * descriptor and have no page fragments.
1278 */
1279static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1280 struct sk_buff *skb)
1281{
1282 int ret;
1283 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1284
1285 if (unlikely(!immediate(skb))) {
1286 WARN_ON(1);
1287 dev_kfree_skb(skb);
1288 return NET_XMIT_SUCCESS;
1289 }
1290
1291 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1292 wrp->wr_lo = htonl(V_WR_TID(q->token));
1293
1294 spin_lock(&q->lock);
1295 again:reclaim_completed_tx_imm(q);
1296
1297 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1298 if (unlikely(ret)) {
1299 if (ret == 1) {
1300 spin_unlock(&q->lock);
1301 return NET_XMIT_CN;
1302 }
1303 goto again;
1304 }
1305
1306 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1307
1308 q->in_use++;
1309 if (++q->pidx >= q->size) {
1310 q->pidx = 0;
1311 q->gen ^= 1;
1312 }
1313 spin_unlock(&q->lock);
1314 wmb();
1315 t3_write_reg(adap, A_SG_KDOORBELL,
1316 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1317 return NET_XMIT_SUCCESS;
1318}
1319
1320/**
1321 * restart_ctrlq - restart a suspended control queue
1322 * @qs: the queue set cotaining the control queue
1323 *
1324 * Resumes transmission on a suspended Tx control queue.
1325 */
1326static void restart_ctrlq(unsigned long data)
1327{
1328 struct sk_buff *skb;
1329 struct sge_qset *qs = (struct sge_qset *)data;
1330 struct sge_txq *q = &qs->txq[TXQ_CTRL];
4d22de3e
DLR
1331
1332 spin_lock(&q->lock);
1333 again:reclaim_completed_tx_imm(q);
1334
bea3348e
SH
1335 while (q->in_use < q->size &&
1336 (skb = __skb_dequeue(&q->sendq)) != NULL) {
4d22de3e
DLR
1337
1338 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1339
1340 if (++q->pidx >= q->size) {
1341 q->pidx = 0;
1342 q->gen ^= 1;
1343 }
1344 q->in_use++;
1345 }
1346
1347 if (!skb_queue_empty(&q->sendq)) {
1348 set_bit(TXQ_CTRL, &qs->txq_stopped);
1349 smp_mb__after_clear_bit();
1350
1351 if (should_restart_tx(q) &&
1352 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1353 goto again;
1354 q->stops++;
1355 }
1356
1357 spin_unlock(&q->lock);
afefce66 1358 wmb();
bea3348e 1359 t3_write_reg(qs->adap, A_SG_KDOORBELL,
4d22de3e
DLR
1360 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1361}
1362
14ab9892
DLR
1363/*
1364 * Send a management message through control queue 0
1365 */
1366int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1367{
bc4b6b52
DLR
1368 int ret;
1369 local_bh_disable();
1370 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1371 local_bh_enable();
1372
1373 return ret;
14ab9892
DLR
1374}
1375
99d7cf30
DLR
1376/**
1377 * deferred_unmap_destructor - unmap a packet when it is freed
1378 * @skb: the packet
1379 *
1380 * This is the packet destructor used for Tx packets that need to remain
1381 * mapped until they are freed rather than until their Tx descriptors are
1382 * freed.
1383 */
1384static void deferred_unmap_destructor(struct sk_buff *skb)
1385{
1386 int i;
1387 const dma_addr_t *p;
1388 const struct skb_shared_info *si;
1389 const struct deferred_unmap_info *dui;
99d7cf30
DLR
1390
1391 dui = (struct deferred_unmap_info *)skb->head;
1392 p = dui->addr;
1393
23561c94
DLR
1394 if (skb->tail - skb->transport_header)
1395 pci_unmap_single(dui->pdev, *p++,
1396 skb->tail - skb->transport_header,
1397 PCI_DMA_TODEVICE);
99d7cf30
DLR
1398
1399 si = skb_shinfo(skb);
1400 for (i = 0; i < si->nr_frags; i++)
1401 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1402 PCI_DMA_TODEVICE);
1403}
1404
1405static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1406 const struct sg_ent *sgl, int sgl_flits)
1407{
1408 dma_addr_t *p;
1409 struct deferred_unmap_info *dui;
1410
1411 dui = (struct deferred_unmap_info *)skb->head;
1412 dui->pdev = pdev;
1413 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1414 *p++ = be64_to_cpu(sgl->addr[0]);
1415 *p++ = be64_to_cpu(sgl->addr[1]);
1416 }
1417 if (sgl_flits)
1418 *p = be64_to_cpu(sgl->addr[0]);
1419}
1420
4d22de3e
DLR
1421/**
1422 * write_ofld_wr - write an offload work request
1423 * @adap: the adapter
1424 * @skb: the packet to send
1425 * @q: the Tx queue
1426 * @pidx: index of the first Tx descriptor to write
1427 * @gen: the generation value to use
1428 * @ndesc: number of descriptors the packet will occupy
1429 *
1430 * Write an offload work request to send the supplied packet. The packet
1431 * data already carry the work request with most fields populated.
1432 */
1433static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1434 struct sge_txq *q, unsigned int pidx,
1435 unsigned int gen, unsigned int ndesc)
1436{
1437 unsigned int sgl_flits, flits;
1438 struct work_request_hdr *from;
1439 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1440 struct tx_desc *d = &q->desc[pidx];
1441
1442 if (immediate(skb)) {
1443 q->sdesc[pidx].skb = NULL;
1444 write_imm(d, skb, skb->len, gen);
1445 return;
1446 }
1447
1448 /* Only TX_DATA builds SGLs */
1449
1450 from = (struct work_request_hdr *)skb->data;
ea2ae17d
ACM
1451 memcpy(&d->flit[1], &from[1],
1452 skb_transport_offset(skb) - sizeof(*from));
4d22de3e 1453
ea2ae17d 1454 flits = skb_transport_offset(skb) / 8;
4d22de3e 1455 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
9c70220b 1456 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
27a884dc 1457 skb->tail - skb->transport_header,
4d22de3e 1458 adap->pdev);
99d7cf30
DLR
1459 if (need_skb_unmap()) {
1460 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1461 skb->destructor = deferred_unmap_destructor;
99d7cf30 1462 }
4d22de3e
DLR
1463
1464 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1465 gen, from->wr_hi, from->wr_lo);
1466}
1467
1468/**
1469 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1470 * @skb: the packet
1471 *
1472 * Returns the number of Tx descriptors needed for the given offload
1473 * packet. These packets are already fully constructed.
1474 */
1475static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1476{
27186dc3 1477 unsigned int flits, cnt;
4d22de3e 1478
27186dc3 1479 if (skb->len <= WR_LEN)
4d22de3e
DLR
1480 return 1; /* packet fits as immediate data */
1481
ea2ae17d 1482 flits = skb_transport_offset(skb) / 8; /* headers */
27186dc3 1483 cnt = skb_shinfo(skb)->nr_frags;
27a884dc 1484 if (skb->tail != skb->transport_header)
4d22de3e
DLR
1485 cnt++;
1486 return flits_to_desc(flits + sgl_len(cnt));
1487}
1488
1489/**
1490 * ofld_xmit - send a packet through an offload queue
1491 * @adap: the adapter
1492 * @q: the Tx offload queue
1493 * @skb: the packet
1494 *
1495 * Send an offload packet through an SGE offload queue.
1496 */
1497static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1498 struct sk_buff *skb)
1499{
1500 int ret;
1501 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1502
1503 spin_lock(&q->lock);
1504 again:reclaim_completed_tx(adap, q);
1505
1506 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1507 if (unlikely(ret)) {
1508 if (ret == 1) {
1509 skb->priority = ndesc; /* save for restart */
1510 spin_unlock(&q->lock);
1511 return NET_XMIT_CN;
1512 }
1513 goto again;
1514 }
1515
1516 gen = q->gen;
1517 q->in_use += ndesc;
1518 pidx = q->pidx;
1519 q->pidx += ndesc;
1520 if (q->pidx >= q->size) {
1521 q->pidx -= q->size;
1522 q->gen ^= 1;
1523 }
1524 spin_unlock(&q->lock);
1525
1526 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1527 check_ring_tx_db(adap, q);
1528 return NET_XMIT_SUCCESS;
1529}
1530
1531/**
1532 * restart_offloadq - restart a suspended offload queue
1533 * @qs: the queue set cotaining the offload queue
1534 *
1535 * Resumes transmission on a suspended Tx offload queue.
1536 */
1537static void restart_offloadq(unsigned long data)
1538{
1539 struct sk_buff *skb;
1540 struct sge_qset *qs = (struct sge_qset *)data;
1541 struct sge_txq *q = &qs->txq[TXQ_OFLD];
5fbf816f
DLR
1542 const struct port_info *pi = netdev_priv(qs->netdev);
1543 struct adapter *adap = pi->adapter;
4d22de3e
DLR
1544
1545 spin_lock(&q->lock);
1546 again:reclaim_completed_tx(adap, q);
1547
1548 while ((skb = skb_peek(&q->sendq)) != NULL) {
1549 unsigned int gen, pidx;
1550 unsigned int ndesc = skb->priority;
1551
1552 if (unlikely(q->size - q->in_use < ndesc)) {
1553 set_bit(TXQ_OFLD, &qs->txq_stopped);
1554 smp_mb__after_clear_bit();
1555
1556 if (should_restart_tx(q) &&
1557 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1558 goto again;
1559 q->stops++;
1560 break;
1561 }
1562
1563 gen = q->gen;
1564 q->in_use += ndesc;
1565 pidx = q->pidx;
1566 q->pidx += ndesc;
1567 if (q->pidx >= q->size) {
1568 q->pidx -= q->size;
1569 q->gen ^= 1;
1570 }
1571 __skb_unlink(skb, &q->sendq);
1572 spin_unlock(&q->lock);
1573
1574 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1575 spin_lock(&q->lock);
1576 }
1577 spin_unlock(&q->lock);
1578
1579#if USE_GTS
1580 set_bit(TXQ_RUNNING, &q->flags);
1581 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1582#endif
afefce66 1583 wmb();
4d22de3e
DLR
1584 t3_write_reg(adap, A_SG_KDOORBELL,
1585 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1586}
1587
1588/**
1589 * queue_set - return the queue set a packet should use
1590 * @skb: the packet
1591 *
1592 * Maps a packet to the SGE queue set it should use. The desired queue
1593 * set is carried in bits 1-3 in the packet's priority.
1594 */
1595static inline int queue_set(const struct sk_buff *skb)
1596{
1597 return skb->priority >> 1;
1598}
1599
1600/**
1601 * is_ctrl_pkt - return whether an offload packet is a control packet
1602 * @skb: the packet
1603 *
1604 * Determines whether an offload packet should use an OFLD or a CTRL
1605 * Tx queue. This is indicated by bit 0 in the packet's priority.
1606 */
1607static inline int is_ctrl_pkt(const struct sk_buff *skb)
1608{
1609 return skb->priority & 1;
1610}
1611
1612/**
1613 * t3_offload_tx - send an offload packet
1614 * @tdev: the offload device to send to
1615 * @skb: the packet
1616 *
1617 * Sends an offload packet. We use the packet priority to select the
1618 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1619 * should be sent as regular or control, bits 1-3 select the queue set.
1620 */
1621int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1622{
1623 struct adapter *adap = tdev2adap(tdev);
1624 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1625
1626 if (unlikely(is_ctrl_pkt(skb)))
1627 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1628
1629 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1630}
1631
1632/**
1633 * offload_enqueue - add an offload packet to an SGE offload receive queue
1634 * @q: the SGE response queue
1635 * @skb: the packet
1636 *
1637 * Add a new offload packet to an SGE response queue's offload packet
1638 * queue. If the packet is the first on the queue it schedules the RX
1639 * softirq to process the queue.
1640 */
1641static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1642{
1643 skb->next = skb->prev = NULL;
1644 if (q->rx_tail)
1645 q->rx_tail->next = skb;
1646 else {
1647 struct sge_qset *qs = rspq_to_qset(q);
1648
bea3348e 1649 napi_schedule(&qs->napi);
4d22de3e
DLR
1650 q->rx_head = skb;
1651 }
1652 q->rx_tail = skb;
1653}
1654
1655/**
1656 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1657 * @tdev: the offload device that will be receiving the packets
1658 * @q: the SGE response queue that assembled the bundle
1659 * @skbs: the partial bundle
1660 * @n: the number of packets in the bundle
1661 *
1662 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1663 */
1664static inline void deliver_partial_bundle(struct t3cdev *tdev,
1665 struct sge_rspq *q,
1666 struct sk_buff *skbs[], int n)
1667{
1668 if (n) {
1669 q->offload_bundles++;
1670 tdev->recv(tdev, skbs, n);
1671 }
1672}
1673
1674/**
1675 * ofld_poll - NAPI handler for offload packets in interrupt mode
1676 * @dev: the network device doing the polling
1677 * @budget: polling budget
1678 *
1679 * The NAPI handler for offload packets when a response queue is serviced
1680 * by the hard interrupt handler, i.e., when it's operating in non-polling
1681 * mode. Creates small packet batches and sends them through the offload
1682 * receive handler. Batches need to be of modest size as we do prefetches
1683 * on the packets in each.
1684 */
bea3348e 1685static int ofld_poll(struct napi_struct *napi, int budget)
4d22de3e 1686{
bea3348e 1687 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
4d22de3e 1688 struct sge_rspq *q = &qs->rspq;
bea3348e
SH
1689 struct adapter *adapter = qs->adap;
1690 int work_done = 0;
4d22de3e 1691
bea3348e 1692 while (work_done < budget) {
4d22de3e
DLR
1693 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1694 int ngathered;
1695
1696 spin_lock_irq(&q->lock);
1697 head = q->rx_head;
1698 if (!head) {
bea3348e 1699 napi_complete(napi);
4d22de3e 1700 spin_unlock_irq(&q->lock);
bea3348e 1701 return work_done;
4d22de3e
DLR
1702 }
1703
1704 tail = q->rx_tail;
1705 q->rx_head = q->rx_tail = NULL;
1706 spin_unlock_irq(&q->lock);
1707
bea3348e 1708 for (ngathered = 0; work_done < budget && head; work_done++) {
4d22de3e
DLR
1709 prefetch(head->data);
1710 skbs[ngathered] = head;
1711 head = head->next;
1712 skbs[ngathered]->next = NULL;
1713 if (++ngathered == RX_BUNDLE_SIZE) {
1714 q->offload_bundles++;
1715 adapter->tdev.recv(&adapter->tdev, skbs,
1716 ngathered);
1717 ngathered = 0;
1718 }
1719 }
1720 if (head) { /* splice remaining packets back onto Rx queue */
1721 spin_lock_irq(&q->lock);
1722 tail->next = q->rx_head;
1723 if (!q->rx_head)
1724 q->rx_tail = tail;
1725 q->rx_head = head;
1726 spin_unlock_irq(&q->lock);
1727 }
1728 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1729 }
bea3348e
SH
1730
1731 return work_done;
4d22de3e
DLR
1732}
1733
1734/**
1735 * rx_offload - process a received offload packet
1736 * @tdev: the offload device receiving the packet
1737 * @rq: the response queue that received the packet
1738 * @skb: the packet
1739 * @rx_gather: a gather list of packets if we are building a bundle
1740 * @gather_idx: index of the next available slot in the bundle
1741 *
1742 * Process an ingress offload pakcet and add it to the offload ingress
1743 * queue. Returns the index of the next available slot in the bundle.
1744 */
1745static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1746 struct sk_buff *skb, struct sk_buff *rx_gather[],
1747 unsigned int gather_idx)
1748{
459a98ed 1749 skb_reset_mac_header(skb);
c1d2bbe1 1750 skb_reset_network_header(skb);
badff6d0 1751 skb_reset_transport_header(skb);
4d22de3e
DLR
1752
1753 if (rq->polling) {
1754 rx_gather[gather_idx++] = skb;
1755 if (gather_idx == RX_BUNDLE_SIZE) {
1756 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1757 gather_idx = 0;
1758 rq->offload_bundles++;
1759 }
1760 } else
1761 offload_enqueue(rq, skb);
1762
1763 return gather_idx;
1764}
1765
4d22de3e
DLR
1766/**
1767 * restart_tx - check whether to restart suspended Tx queues
1768 * @qs: the queue set to resume
1769 *
1770 * Restarts suspended Tx queues of an SGE queue set if they have enough
1771 * free resources to resume operation.
1772 */
1773static void restart_tx(struct sge_qset *qs)
1774{
1775 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1776 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1777 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1778 qs->txq[TXQ_ETH].restarts++;
1779 if (netif_running(qs->netdev))
1780 netif_wake_queue(qs->netdev);
1781 }
1782
1783 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1784 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1785 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1786 qs->txq[TXQ_OFLD].restarts++;
1787 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1788 }
1789 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1790 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1791 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1792 qs->txq[TXQ_CTRL].restarts++;
1793 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1794 }
1795}
1796
1797/**
1798 * rx_eth - process an ingress ethernet packet
1799 * @adap: the adapter
1800 * @rq: the response queue that received the packet
1801 * @skb: the packet
1802 * @pad: amount of padding at the start of the buffer
1803 *
1804 * Process an ingress ethernet pakcet and deliver it to the stack.
1805 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1806 * if it was immediate data in a response.
1807 */
1808static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1809 struct sk_buff *skb, int pad)
1810{
1811 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1812 struct port_info *pi;
1813
4d22de3e 1814 skb_pull(skb, sizeof(*p) + pad);
4c13eb66 1815 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
e360b562 1816 skb->dev->last_rx = jiffies;
4d22de3e 1817 pi = netdev_priv(skb->dev);
05e5c116 1818 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
4d22de3e
DLR
1819 !p->fragment) {
1820 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1821 skb->ip_summed = CHECKSUM_UNNECESSARY;
1822 } else
1823 skb->ip_summed = CHECKSUM_NONE;
1824
1825 if (unlikely(p->vlan_valid)) {
1826 struct vlan_group *grp = pi->vlan_grp;
1827
1828 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1829 if (likely(grp))
1830 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1831 rq->polling);
1832 else
1833 dev_kfree_skb_any(skb);
1834 } else if (rq->polling)
1835 netif_receive_skb(skb);
1836 else
1837 netif_rx(skb);
1838}
1839
1840/**
1841 * handle_rsp_cntrl_info - handles control information in a response
1842 * @qs: the queue set corresponding to the response
1843 * @flags: the response control flags
4d22de3e
DLR
1844 *
1845 * Handles the control information of an SGE response, such as GTS
1846 * indications and completion credits for the queue set's Tx queues.
6195c71d 1847 * HW coalesces credits, we don't do any extra SW coalescing.
4d22de3e 1848 */
6195c71d 1849static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
4d22de3e
DLR
1850{
1851 unsigned int credits;
1852
1853#if USE_GTS
1854 if (flags & F_RSPD_TXQ0_GTS)
1855 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1856#endif
1857
4d22de3e
DLR
1858 credits = G_RSPD_TXQ0_CR(flags);
1859 if (credits)
1860 qs->txq[TXQ_ETH].processed += credits;
1861
6195c71d
DLR
1862 credits = G_RSPD_TXQ2_CR(flags);
1863 if (credits)
1864 qs->txq[TXQ_CTRL].processed += credits;
1865
4d22de3e
DLR
1866# if USE_GTS
1867 if (flags & F_RSPD_TXQ1_GTS)
1868 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1869# endif
6195c71d
DLR
1870 credits = G_RSPD_TXQ1_CR(flags);
1871 if (credits)
1872 qs->txq[TXQ_OFLD].processed += credits;
4d22de3e
DLR
1873}
1874
1875/**
1876 * check_ring_db - check if we need to ring any doorbells
1877 * @adapter: the adapter
1878 * @qs: the queue set whose Tx queues are to be examined
1879 * @sleeping: indicates which Tx queue sent GTS
1880 *
1881 * Checks if some of a queue set's Tx queues need to ring their doorbells
1882 * to resume transmission after idling while they still have unprocessed
1883 * descriptors.
1884 */
1885static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1886 unsigned int sleeping)
1887{
1888 if (sleeping & F_RSPD_TXQ0_GTS) {
1889 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1890
1891 if (txq->cleaned + txq->in_use != txq->processed &&
1892 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1893 set_bit(TXQ_RUNNING, &txq->flags);
1894 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1895 V_EGRCNTX(txq->cntxt_id));
1896 }
1897 }
1898
1899 if (sleeping & F_RSPD_TXQ1_GTS) {
1900 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1901
1902 if (txq->cleaned + txq->in_use != txq->processed &&
1903 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1904 set_bit(TXQ_RUNNING, &txq->flags);
1905 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1906 V_EGRCNTX(txq->cntxt_id));
1907 }
1908 }
1909}
1910
1911/**
1912 * is_new_response - check if a response is newly written
1913 * @r: the response descriptor
1914 * @q: the response queue
1915 *
1916 * Returns true if a response descriptor contains a yet unprocessed
1917 * response.
1918 */
1919static inline int is_new_response(const struct rsp_desc *r,
1920 const struct sge_rspq *q)
1921{
1922 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1923}
1924
1925#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1926#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1927 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1928 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1929 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1930
1931/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1932#define NOMEM_INTR_DELAY 2500
1933
1934/**
1935 * process_responses - process responses from an SGE response queue
1936 * @adap: the adapter
1937 * @qs: the queue set to which the response queue belongs
1938 * @budget: how many responses can be processed in this round
1939 *
1940 * Process responses from an SGE response queue up to the supplied budget.
1941 * Responses include received packets as well as credits and other events
1942 * for the queues that belong to the response queue's queue set.
1943 * A negative budget is effectively unlimited.
1944 *
1945 * Additionally choose the interrupt holdoff time for the next interrupt
1946 * on this queue. If the system is under memory shortage use a fairly
1947 * long delay to help recovery.
1948 */
1949static int process_responses(struct adapter *adap, struct sge_qset *qs,
1950 int budget)
1951{
1952 struct sge_rspq *q = &qs->rspq;
1953 struct rsp_desc *r = &q->desc[q->cidx];
1954 int budget_left = budget;
6195c71d 1955 unsigned int sleeping = 0;
4d22de3e
DLR
1956 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1957 int ngathered = 0;
1958
1959 q->next_holdoff = q->holdoff_tmr;
1960
1961 while (likely(budget_left && is_new_response(r, q))) {
e0994eb1 1962 int eth, ethpad = 2;
4d22de3e
DLR
1963 struct sk_buff *skb = NULL;
1964 u32 len, flags = ntohl(r->flags);
05e5c116 1965 __be32 rss_hi = *(const __be32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
4d22de3e
DLR
1966
1967 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1968
1969 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1970 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1971 if (!skb)
1972 goto no_mem;
1973
1974 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1975 skb->data[0] = CPL_ASYNC_NOTIF;
1976 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1977 q->async_notif++;
1978 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1979 skb = get_imm_packet(r);
1980 if (unlikely(!skb)) {
cf992af5 1981no_mem:
4d22de3e
DLR
1982 q->next_holdoff = NOMEM_INTR_DELAY;
1983 q->nomem++;
1984 /* consume one credit since we tried */
1985 budget_left--;
1986 break;
1987 }
1988 q->imm_data++;
e0994eb1 1989 ethpad = 0;
4d22de3e 1990 } else if ((len = ntohl(r->len_cq)) != 0) {
cf992af5 1991 struct sge_fl *fl;
e0994eb1 1992
cf992af5
DLR
1993 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1994 if (fl->use_pages) {
1995 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
e0994eb1 1996
cf992af5
DLR
1997 prefetch(addr);
1998#if L1_CACHE_BYTES < 128
1999 prefetch(addr + L1_CACHE_BYTES);
2000#endif
e0994eb1
DLR
2001 __refill_fl(adap, fl);
2002
cf992af5
DLR
2003 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
2004 eth ? SGE_RX_DROP_THRES : 0);
2005 } else
e0994eb1
DLR
2006 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2007 eth ? SGE_RX_DROP_THRES : 0);
cf992af5
DLR
2008 if (unlikely(!skb)) {
2009 if (!eth)
2010 goto no_mem;
2011 q->rx_drops++;
2012 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2013 __skb_pull(skb, 2);
4d22de3e 2014
4d22de3e
DLR
2015 if (++fl->cidx == fl->size)
2016 fl->cidx = 0;
2017 } else
2018 q->pure_rsps++;
2019
2020 if (flags & RSPD_CTRL_MASK) {
2021 sleeping |= flags & RSPD_GTS_MASK;
6195c71d 2022 handle_rsp_cntrl_info(qs, flags);
4d22de3e
DLR
2023 }
2024
2025 r++;
2026 if (unlikely(++q->cidx == q->size)) {
2027 q->cidx = 0;
2028 q->gen ^= 1;
2029 r = q->desc;
2030 }
2031 prefetch(r);
2032
2033 if (++q->credits >= (q->size / 4)) {
2034 refill_rspq(adap, q, q->credits);
2035 q->credits = 0;
2036 }
2037
cf992af5 2038 if (likely(skb != NULL)) {
4d22de3e
DLR
2039 if (eth)
2040 rx_eth(adap, q, skb, ethpad);
2041 else {
afefce66 2042 q->offload_pkts++;
cf992af5
DLR
2043 /* Preserve the RSS info in csum & priority */
2044 skb->csum = rss_hi;
2045 skb->priority = rss_lo;
2046 ngathered = rx_offload(&adap->tdev, q, skb,
2047 offload_skbs,
e0994eb1 2048 ngathered);
4d22de3e
DLR
2049 }
2050 }
4d22de3e
DLR
2051 --budget_left;
2052 }
2053
4d22de3e
DLR
2054 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2055 if (sleeping)
2056 check_ring_db(adap, qs, sleeping);
2057
2058 smp_mb(); /* commit Tx queue .processed updates */
2059 if (unlikely(qs->txq_stopped != 0))
2060 restart_tx(qs);
2061
2062 budget -= budget_left;
2063 return budget;
2064}
2065
2066static inline int is_pure_response(const struct rsp_desc *r)
2067{
2068 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2069
2070 return (n | r->len_cq) == 0;
2071}
2072
2073/**
2074 * napi_rx_handler - the NAPI handler for Rx processing
bea3348e 2075 * @napi: the napi instance
4d22de3e
DLR
2076 * @budget: how many packets we can process in this round
2077 *
2078 * Handler for new data events when using NAPI.
2079 */
bea3348e 2080static int napi_rx_handler(struct napi_struct *napi, int budget)
4d22de3e 2081{
bea3348e
SH
2082 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2083 struct adapter *adap = qs->adap;
2084 int work_done = process_responses(adap, qs, budget);
4d22de3e 2085
bea3348e
SH
2086 if (likely(work_done < budget)) {
2087 napi_complete(napi);
4d22de3e 2088
bea3348e
SH
2089 /*
2090 * Because we don't atomically flush the following
2091 * write it is possible that in very rare cases it can
2092 * reach the device in a way that races with a new
2093 * response being written plus an error interrupt
2094 * causing the NAPI interrupt handler below to return
2095 * unhandled status to the OS. To protect against
2096 * this would require flushing the write and doing
2097 * both the write and the flush with interrupts off.
2098 * Way too expensive and unjustifiable given the
2099 * rarity of the race.
2100 *
2101 * The race cannot happen at all with MSI-X.
2102 */
2103 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2104 V_NEWTIMER(qs->rspq.next_holdoff) |
2105 V_NEWINDEX(qs->rspq.cidx));
2106 }
2107 return work_done;
4d22de3e
DLR
2108}
2109
2110/*
2111 * Returns true if the device is already scheduled for polling.
2112 */
bea3348e 2113static inline int napi_is_scheduled(struct napi_struct *napi)
4d22de3e 2114{
bea3348e 2115 return test_bit(NAPI_STATE_SCHED, &napi->state);
4d22de3e
DLR
2116}
2117
2118/**
2119 * process_pure_responses - process pure responses from a response queue
2120 * @adap: the adapter
2121 * @qs: the queue set owning the response queue
2122 * @r: the first pure response to process
2123 *
2124 * A simpler version of process_responses() that handles only pure (i.e.,
2125 * non data-carrying) responses. Such respones are too light-weight to
2126 * justify calling a softirq under NAPI, so we handle them specially in
2127 * the interrupt handler. The function is called with a pointer to a
2128 * response, which the caller must ensure is a valid pure response.
2129 *
2130 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2131 */
2132static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2133 struct rsp_desc *r)
2134{
2135 struct sge_rspq *q = &qs->rspq;
6195c71d 2136 unsigned int sleeping = 0;
4d22de3e
DLR
2137
2138 do {
2139 u32 flags = ntohl(r->flags);
2140
2141 r++;
2142 if (unlikely(++q->cidx == q->size)) {
2143 q->cidx = 0;
2144 q->gen ^= 1;
2145 r = q->desc;
2146 }
2147 prefetch(r);
2148
2149 if (flags & RSPD_CTRL_MASK) {
2150 sleeping |= flags & RSPD_GTS_MASK;
6195c71d 2151 handle_rsp_cntrl_info(qs, flags);
4d22de3e
DLR
2152 }
2153
2154 q->pure_rsps++;
2155 if (++q->credits >= (q->size / 4)) {
2156 refill_rspq(adap, q, q->credits);
2157 q->credits = 0;
2158 }
2159 } while (is_new_response(r, q) && is_pure_response(r));
2160
4d22de3e
DLR
2161 if (sleeping)
2162 check_ring_db(adap, qs, sleeping);
2163
2164 smp_mb(); /* commit Tx queue .processed updates */
2165 if (unlikely(qs->txq_stopped != 0))
2166 restart_tx(qs);
2167
2168 return is_new_response(r, q);
2169}
2170
2171/**
2172 * handle_responses - decide what to do with new responses in NAPI mode
2173 * @adap: the adapter
2174 * @q: the response queue
2175 *
2176 * This is used by the NAPI interrupt handlers to decide what to do with
2177 * new SGE responses. If there are no new responses it returns -1. If
2178 * there are new responses and they are pure (i.e., non-data carrying)
2179 * it handles them straight in hard interrupt context as they are very
2180 * cheap and don't deliver any packets. Finally, if there are any data
2181 * signaling responses it schedules the NAPI handler. Returns 1 if it
2182 * schedules NAPI, 0 if all new responses were pure.
2183 *
2184 * The caller must ascertain NAPI is not already running.
2185 */
2186static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2187{
2188 struct sge_qset *qs = rspq_to_qset(q);
2189 struct rsp_desc *r = &q->desc[q->cidx];
2190
2191 if (!is_new_response(r, q))
2192 return -1;
2193 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2194 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2195 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2196 return 0;
2197 }
bea3348e 2198 napi_schedule(&qs->napi);
4d22de3e
DLR
2199 return 1;
2200}
2201
2202/*
2203 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2204 * (i.e., response queue serviced in hard interrupt).
2205 */
2206irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2207{
2208 struct sge_qset *qs = cookie;
bea3348e 2209 struct adapter *adap = qs->adap;
4d22de3e
DLR
2210 struct sge_rspq *q = &qs->rspq;
2211
2212 spin_lock(&q->lock);
2213 if (process_responses(adap, qs, -1) == 0)
2214 q->unhandled_irqs++;
2215 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2216 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2217 spin_unlock(&q->lock);
2218 return IRQ_HANDLED;
2219}
2220
2221/*
2222 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2223 * (i.e., response queue serviced by NAPI polling).
2224 */
9265fabf 2225static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
4d22de3e
DLR
2226{
2227 struct sge_qset *qs = cookie;
4d22de3e
DLR
2228 struct sge_rspq *q = &qs->rspq;
2229
2230 spin_lock(&q->lock);
4d22de3e 2231
bea3348e 2232 if (handle_responses(qs->adap, q) < 0)
4d22de3e
DLR
2233 q->unhandled_irqs++;
2234 spin_unlock(&q->lock);
2235 return IRQ_HANDLED;
2236}
2237
2238/*
2239 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2240 * SGE response queues as well as error and other async events as they all use
2241 * the same MSI vector. We use one SGE response queue per port in this mode
2242 * and protect all response queues with queue 0's lock.
2243 */
2244static irqreturn_t t3_intr_msi(int irq, void *cookie)
2245{
2246 int new_packets = 0;
2247 struct adapter *adap = cookie;
2248 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2249
2250 spin_lock(&q->lock);
2251
2252 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2253 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2254 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2255 new_packets = 1;
2256 }
2257
2258 if (adap->params.nports == 2 &&
2259 process_responses(adap, &adap->sge.qs[1], -1)) {
2260 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2261
2262 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2263 V_NEWTIMER(q1->next_holdoff) |
2264 V_NEWINDEX(q1->cidx));
2265 new_packets = 1;
2266 }
2267
2268 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2269 q->unhandled_irqs++;
2270
2271 spin_unlock(&q->lock);
2272 return IRQ_HANDLED;
2273}
2274
bea3348e 2275static int rspq_check_napi(struct sge_qset *qs)
4d22de3e 2276{
bea3348e
SH
2277 struct sge_rspq *q = &qs->rspq;
2278
2279 if (!napi_is_scheduled(&qs->napi) &&
2280 is_new_response(&q->desc[q->cidx], q)) {
2281 napi_schedule(&qs->napi);
4d22de3e
DLR
2282 return 1;
2283 }
2284 return 0;
2285}
2286
2287/*
2288 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2289 * by NAPI polling). Handles data events from SGE response queues as well as
2290 * error and other async events as they all use the same MSI vector. We use
2291 * one SGE response queue per port in this mode and protect all response
2292 * queues with queue 0's lock.
2293 */
9265fabf 2294static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
4d22de3e
DLR
2295{
2296 int new_packets;
2297 struct adapter *adap = cookie;
2298 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2299
2300 spin_lock(&q->lock);
2301
bea3348e 2302 new_packets = rspq_check_napi(&adap->sge.qs[0]);
4d22de3e 2303 if (adap->params.nports == 2)
bea3348e 2304 new_packets += rspq_check_napi(&adap->sge.qs[1]);
4d22de3e
DLR
2305 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2306 q->unhandled_irqs++;
2307
2308 spin_unlock(&q->lock);
2309 return IRQ_HANDLED;
2310}
2311
2312/*
2313 * A helper function that processes responses and issues GTS.
2314 */
2315static inline int process_responses_gts(struct adapter *adap,
2316 struct sge_rspq *rq)
2317{
2318 int work;
2319
2320 work = process_responses(adap, rspq_to_qset(rq), -1);
2321 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2322 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2323 return work;
2324}
2325
2326/*
2327 * The legacy INTx interrupt handler. This needs to handle data events from
2328 * SGE response queues as well as error and other async events as they all use
2329 * the same interrupt pin. We use one SGE response queue per port in this mode
2330 * and protect all response queues with queue 0's lock.
2331 */
2332static irqreturn_t t3_intr(int irq, void *cookie)
2333{
2334 int work_done, w0, w1;
2335 struct adapter *adap = cookie;
2336 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2337 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2338
2339 spin_lock(&q0->lock);
2340
2341 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2342 w1 = adap->params.nports == 2 &&
2343 is_new_response(&q1->desc[q1->cidx], q1);
2344
2345 if (likely(w0 | w1)) {
2346 t3_write_reg(adap, A_PL_CLI, 0);
2347 t3_read_reg(adap, A_PL_CLI); /* flush */
2348
2349 if (likely(w0))
2350 process_responses_gts(adap, q0);
2351
2352 if (w1)
2353 process_responses_gts(adap, q1);
2354
2355 work_done = w0 | w1;
2356 } else
2357 work_done = t3_slow_intr_handler(adap);
2358
2359 spin_unlock(&q0->lock);
2360 return IRQ_RETVAL(work_done != 0);
2361}
2362
2363/*
2364 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2365 * Handles data events from SGE response queues as well as error and other
2366 * async events as they all use the same interrupt pin. We use one SGE
2367 * response queue per port in this mode and protect all response queues with
2368 * queue 0's lock.
2369 */
2370static irqreturn_t t3b_intr(int irq, void *cookie)
2371{
2372 u32 map;
2373 struct adapter *adap = cookie;
2374 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2375
2376 t3_write_reg(adap, A_PL_CLI, 0);
2377 map = t3_read_reg(adap, A_SG_DATA_INTR);
2378
2379 if (unlikely(!map)) /* shared interrupt, most likely */
2380 return IRQ_NONE;
2381
2382 spin_lock(&q0->lock);
2383
2384 if (unlikely(map & F_ERRINTR))
2385 t3_slow_intr_handler(adap);
2386
2387 if (likely(map & 1))
2388 process_responses_gts(adap, q0);
2389
2390 if (map & 2)
2391 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2392
2393 spin_unlock(&q0->lock);
2394 return IRQ_HANDLED;
2395}
2396
2397/*
2398 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2399 * Handles data events from SGE response queues as well as error and other
2400 * async events as they all use the same interrupt pin. We use one SGE
2401 * response queue per port in this mode and protect all response queues with
2402 * queue 0's lock.
2403 */
2404static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2405{
2406 u32 map;
4d22de3e 2407 struct adapter *adap = cookie;
bea3348e
SH
2408 struct sge_qset *qs0 = &adap->sge.qs[0];
2409 struct sge_rspq *q0 = &qs0->rspq;
4d22de3e
DLR
2410
2411 t3_write_reg(adap, A_PL_CLI, 0);
2412 map = t3_read_reg(adap, A_SG_DATA_INTR);
2413
2414 if (unlikely(!map)) /* shared interrupt, most likely */
2415 return IRQ_NONE;
2416
2417 spin_lock(&q0->lock);
2418
2419 if (unlikely(map & F_ERRINTR))
2420 t3_slow_intr_handler(adap);
2421
bea3348e
SH
2422 if (likely(map & 1))
2423 napi_schedule(&qs0->napi);
4d22de3e 2424
bea3348e
SH
2425 if (map & 2)
2426 napi_schedule(&adap->sge.qs[1].napi);
4d22de3e
DLR
2427
2428 spin_unlock(&q0->lock);
2429 return IRQ_HANDLED;
2430}
2431
2432/**
2433 * t3_intr_handler - select the top-level interrupt handler
2434 * @adap: the adapter
2435 * @polling: whether using NAPI to service response queues
2436 *
2437 * Selects the top-level interrupt handler based on the type of interrupts
2438 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2439 * response queues.
2440 */
7c239975 2441irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
4d22de3e
DLR
2442{
2443 if (adap->flags & USING_MSIX)
2444 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2445 if (adap->flags & USING_MSI)
2446 return polling ? t3_intr_msi_napi : t3_intr_msi;
2447 if (adap->params.rev > 0)
2448 return polling ? t3b_intr_napi : t3b_intr;
2449 return t3_intr;
2450}
2451
b881955b
DLR
2452#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2453 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2454 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2455 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2456 F_HIRCQPARITYERROR)
2457#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2458#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2459 F_RSPQDISABLED)
2460
4d22de3e
DLR
2461/**
2462 * t3_sge_err_intr_handler - SGE async event interrupt handler
2463 * @adapter: the adapter
2464 *
2465 * Interrupt handler for SGE asynchronous (non-data) events.
2466 */
2467void t3_sge_err_intr_handler(struct adapter *adapter)
2468{
2469 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2470
b881955b
DLR
2471 if (status & SGE_PARERR)
2472 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2473 status & SGE_PARERR);
2474 if (status & SGE_FRAMINGERR)
2475 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2476 status & SGE_FRAMINGERR);
2477
4d22de3e
DLR
2478 if (status & F_RSPQCREDITOVERFOW)
2479 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2480
2481 if (status & F_RSPQDISABLED) {
2482 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2483
2484 CH_ALERT(adapter,
2485 "packet delivered to disabled response queue "
2486 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2487 }
2488
6e3f03b7
DLR
2489 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2490 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2491 status & F_HIPIODRBDROPERR ? "high" : "lo");
2492
4d22de3e 2493 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
b881955b 2494 if (status & SGE_FATALERR)
4d22de3e
DLR
2495 t3_fatal_err(adapter);
2496}
2497
2498/**
2499 * sge_timer_cb - perform periodic maintenance of an SGE qset
2500 * @data: the SGE queue set to maintain
2501 *
2502 * Runs periodically from a timer to perform maintenance of an SGE queue
2503 * set. It performs two tasks:
2504 *
2505 * a) Cleans up any completed Tx descriptors that may still be pending.
2506 * Normal descriptor cleanup happens when new packets are added to a Tx
2507 * queue so this timer is relatively infrequent and does any cleanup only
2508 * if the Tx queue has not seen any new packets in a while. We make a
2509 * best effort attempt to reclaim descriptors, in that we don't wait
2510 * around if we cannot get a queue's lock (which most likely is because
2511 * someone else is queueing new packets and so will also handle the clean
2512 * up). Since control queues use immediate data exclusively we don't
2513 * bother cleaning them up here.
2514 *
2515 * b) Replenishes Rx queues that have run out due to memory shortage.
2516 * Normally new Rx buffers are added when existing ones are consumed but
2517 * when out of memory a queue can become empty. We try to add only a few
2518 * buffers here, the queue will be replenished fully as these new buffers
2519 * are used up if memory shortage has subsided.
2520 */
2521static void sge_timer_cb(unsigned long data)
2522{
2523 spinlock_t *lock;
2524 struct sge_qset *qs = (struct sge_qset *)data;
bea3348e 2525 struct adapter *adap = qs->adap;
4d22de3e
DLR
2526
2527 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2528 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2529 spin_unlock(&qs->txq[TXQ_ETH].lock);
2530 }
2531 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2532 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2533 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2534 }
2535 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
bea3348e 2536 &adap->sge.qs[0].rspq.lock;
4d22de3e 2537 if (spin_trylock_irq(lock)) {
bea3348e 2538 if (!napi_is_scheduled(&qs->napi)) {
bae73f44
DLR
2539 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2540
4d22de3e
DLR
2541 if (qs->fl[0].credits < qs->fl[0].size)
2542 __refill_fl(adap, &qs->fl[0]);
2543 if (qs->fl[1].credits < qs->fl[1].size)
2544 __refill_fl(adap, &qs->fl[1]);
bae73f44
DLR
2545
2546 if (status & (1 << qs->rspq.cntxt_id)) {
2547 qs->rspq.starved++;
2548 if (qs->rspq.credits) {
2549 refill_rspq(adap, &qs->rspq, 1);
2550 qs->rspq.credits--;
2551 qs->rspq.restarted++;
e0994eb1 2552 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
bae73f44
DLR
2553 1 << qs->rspq.cntxt_id);
2554 }
2555 }
4d22de3e
DLR
2556 }
2557 spin_unlock_irq(lock);
2558 }
2559 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2560}
2561
2562/**
2563 * t3_update_qset_coalesce - update coalescing settings for a queue set
2564 * @qs: the SGE queue set
2565 * @p: new queue set parameters
2566 *
2567 * Update the coalescing settings for an SGE queue set. Nothing is done
2568 * if the queue set is not initialized yet.
2569 */
2570void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2571{
4d22de3e
DLR
2572 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2573 qs->rspq.polling = p->polling;
bea3348e 2574 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
4d22de3e
DLR
2575}
2576
2577/**
2578 * t3_sge_alloc_qset - initialize an SGE queue set
2579 * @adapter: the adapter
2580 * @id: the queue set id
2581 * @nports: how many Ethernet ports will be using this queue set
2582 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2583 * @p: configuration parameters for this queue set
2584 * @ntxq: number of Tx queues for the queue set
2585 * @netdev: net device associated with this queue set
2586 *
2587 * Allocate resources and initialize an SGE queue set. A queue set
2588 * comprises a response queue, two Rx free-buffer queues, and up to 3
2589 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2590 * queue, offload queue, and control queue.
2591 */
2592int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2593 int irq_vec_idx, const struct qset_params *p,
bea3348e 2594 int ntxq, struct net_device *dev)
4d22de3e
DLR
2595{
2596 int i, ret = -ENOMEM;
2597 struct sge_qset *q = &adapter->sge.qs[id];
2598
2599 init_qset_cntxt(q, id);
2600 init_timer(&q->tx_reclaim_timer);
2601 q->tx_reclaim_timer.data = (unsigned long)q;
2602 q->tx_reclaim_timer.function = sge_timer_cb;
2603
2604 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2605 sizeof(struct rx_desc),
2606 sizeof(struct rx_sw_desc),
2607 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2608 if (!q->fl[0].desc)
2609 goto err;
2610
2611 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2612 sizeof(struct rx_desc),
2613 sizeof(struct rx_sw_desc),
2614 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2615 if (!q->fl[1].desc)
2616 goto err;
2617
2618 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2619 sizeof(struct rsp_desc), 0,
2620 &q->rspq.phys_addr, NULL);
2621 if (!q->rspq.desc)
2622 goto err;
2623
2624 for (i = 0; i < ntxq; ++i) {
2625 /*
2626 * The control queue always uses immediate data so does not
2627 * need to keep track of any sk_buffs.
2628 */
2629 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2630
2631 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2632 sizeof(struct tx_desc), sz,
2633 &q->txq[i].phys_addr,
2634 &q->txq[i].sdesc);
2635 if (!q->txq[i].desc)
2636 goto err;
2637
2638 q->txq[i].gen = 1;
2639 q->txq[i].size = p->txq_size[i];
2640 spin_lock_init(&q->txq[i].lock);
2641 skb_queue_head_init(&q->txq[i].sendq);
2642 }
2643
2644 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2645 (unsigned long)q);
2646 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2647 (unsigned long)q);
2648
2649 q->fl[0].gen = q->fl[1].gen = 1;
2650 q->fl[0].size = p->fl_size;
2651 q->fl[1].size = p->jumbo_size;
2652
2653 q->rspq.gen = 1;
2654 q->rspq.size = p->rspq_size;
2655 spin_lock_init(&q->rspq.lock);
2656
2657 q->txq[TXQ_ETH].stop_thres = nports *
2658 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2659
cf992af5
DLR
2660#if FL0_PG_CHUNK_SIZE > 0
2661 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
e0994eb1 2662#else
cf992af5 2663 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
e0994eb1 2664#endif
cf992af5
DLR
2665 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2666 q->fl[1].buf_size = is_offload(adapter) ?
2667 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2668 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
4d22de3e
DLR
2669
2670 spin_lock(&adapter->sge.reg_lock);
2671
2672 /* FL threshold comparison uses < */
2673 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2674 q->rspq.phys_addr, q->rspq.size,
2675 q->fl[0].buf_size, 1, 0);
2676 if (ret)
2677 goto err_unlock;
2678
2679 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2680 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2681 q->fl[i].phys_addr, q->fl[i].size,
2682 q->fl[i].buf_size, p->cong_thres, 1,
2683 0);
2684 if (ret)
2685 goto err_unlock;
2686 }
2687
2688 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2689 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2690 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2691 1, 0);
2692 if (ret)
2693 goto err_unlock;
2694
2695 if (ntxq > 1) {
2696 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2697 USE_GTS, SGE_CNTXT_OFLD, id,
2698 q->txq[TXQ_OFLD].phys_addr,
2699 q->txq[TXQ_OFLD].size, 0, 1, 0);
2700 if (ret)
2701 goto err_unlock;
2702 }
2703
2704 if (ntxq > 2) {
2705 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2706 SGE_CNTXT_CTRL, id,
2707 q->txq[TXQ_CTRL].phys_addr,
2708 q->txq[TXQ_CTRL].size,
2709 q->txq[TXQ_CTRL].token, 1, 0);
2710 if (ret)
2711 goto err_unlock;
2712 }
2713
2714 spin_unlock(&adapter->sge.reg_lock);
4d22de3e 2715
bea3348e
SH
2716 q->adap = adapter;
2717 q->netdev = dev;
2718 t3_update_qset_coalesce(q, p);
4d22de3e
DLR
2719
2720 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2721 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2722 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2723
2724 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2725 V_NEWTIMER(q->rspq.holdoff_tmr));
2726
2727 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2728 return 0;
2729
2730 err_unlock:
2731 spin_unlock(&adapter->sge.reg_lock);
2732 err:
2733 t3_free_qset(adapter, q);
2734 return ret;
2735}
2736
2737/**
2738 * t3_free_sge_resources - free SGE resources
2739 * @adap: the adapter
2740 *
2741 * Frees resources used by the SGE queue sets.
2742 */
2743void t3_free_sge_resources(struct adapter *adap)
2744{
2745 int i;
2746
2747 for (i = 0; i < SGE_QSETS; ++i)
2748 t3_free_qset(adap, &adap->sge.qs[i]);
2749}
2750
2751/**
2752 * t3_sge_start - enable SGE
2753 * @adap: the adapter
2754 *
2755 * Enables the SGE for DMAs. This is the last step in starting packet
2756 * transfers.
2757 */
2758void t3_sge_start(struct adapter *adap)
2759{
2760 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2761}
2762
2763/**
2764 * t3_sge_stop - disable SGE operation
2765 * @adap: the adapter
2766 *
2767 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2768 * from error interrupts) or from normal process context. In the latter
2769 * case it also disables any pending queue restart tasklets. Note that
2770 * if it is called in interrupt context it cannot disable the restart
2771 * tasklets as it cannot wait, however the tasklets will have no effect
2772 * since the doorbells are disabled and the driver will call this again
2773 * later from process context, at which time the tasklets will be stopped
2774 * if they are still running.
2775 */
2776void t3_sge_stop(struct adapter *adap)
2777{
2778 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2779 if (!in_interrupt()) {
2780 int i;
2781
2782 for (i = 0; i < SGE_QSETS; ++i) {
2783 struct sge_qset *qs = &adap->sge.qs[i];
2784
2785 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2786 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2787 }
2788 }
2789}
2790
2791/**
2792 * t3_sge_init - initialize SGE
2793 * @adap: the adapter
2794 * @p: the SGE parameters
2795 *
2796 * Performs SGE initialization needed every time after a chip reset.
2797 * We do not initialize any of the queue sets here, instead the driver
2798 * top-level must request those individually. We also do not enable DMA
2799 * here, that should be done after the queues have been set up.
2800 */
2801void t3_sge_init(struct adapter *adap, struct sge_params *p)
2802{
2803 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2804
2805 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
b881955b 2806 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
4d22de3e
DLR
2807 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2808 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2809#if SGE_NUM_GENBITS == 1
2810 ctrl |= F_EGRGENCTRL;
2811#endif
2812 if (adap->params.rev > 0) {
2813 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2814 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
4d22de3e
DLR
2815 }
2816 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2817 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2818 V_LORCQDRBTHRSH(512));
2819 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2820 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
6195c71d 2821 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
b881955b
DLR
2822 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
2823 adap->params.rev < T3_REV_C ? 1000 : 500);
4d22de3e
DLR
2824 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2825 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2826 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2827 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2828 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2829}
2830
2831/**
2832 * t3_sge_prep - one-time SGE initialization
2833 * @adap: the associated adapter
2834 * @p: SGE parameters
2835 *
2836 * Performs one-time initialization of SGE SW state. Includes determining
2837 * defaults for the assorted SGE parameters, which admins can change until
2838 * they are used to initialize the SGE.
2839 */
7b9b0943 2840void t3_sge_prep(struct adapter *adap, struct sge_params *p)
4d22de3e
DLR
2841{
2842 int i;
2843
2844 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2845 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2846
2847 for (i = 0; i < SGE_QSETS; ++i) {
2848 struct qset_params *q = p->qset + i;
2849
2850 q->polling = adap->params.rev > 0;
2851 q->coalesce_usecs = 5;
2852 q->rspq_size = 1024;
e0994eb1 2853 q->fl_size = 1024;
4d22de3e
DLR
2854 q->jumbo_size = 512;
2855 q->txq_size[TXQ_ETH] = 1024;
2856 q->txq_size[TXQ_OFLD] = 1024;
2857 q->txq_size[TXQ_CTRL] = 256;
2858 q->cong_thres = 0;
2859 }
2860
2861 spin_lock_init(&adap->sge.reg_lock);
2862}
2863
2864/**
2865 * t3_get_desc - dump an SGE descriptor for debugging purposes
2866 * @qs: the queue set
2867 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2868 * @idx: the descriptor index in the queue
2869 * @data: where to dump the descriptor contents
2870 *
2871 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2872 * size of the descriptor.
2873 */
2874int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2875 unsigned char *data)
2876{
2877 if (qnum >= 6)
2878 return -EINVAL;
2879
2880 if (qnum < 3) {
2881 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2882 return -EINVAL;
2883 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2884 return sizeof(struct tx_desc);
2885 }
2886
2887 if (qnum == 3) {
2888 if (!qs->rspq.desc || idx >= qs->rspq.size)
2889 return -EINVAL;
2890 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2891 return sizeof(struct rsp_desc);
2892 }
2893
2894 qnum -= 4;
2895 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2896 return -EINVAL;
2897 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2898 return sizeof(struct rx_desc);
2899}