]>
Commit | Line | Data |
---|---|---|
4d22de3e | 1 | /* |
1d68e93d | 2 | * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved. |
4d22de3e | 3 | * |
1d68e93d DLR |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
4d22de3e | 9 | * |
1d68e93d DLR |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
4d22de3e | 31 | */ |
4d22de3e DLR |
32 | #include <linux/skbuff.h> |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/etherdevice.h> | |
35 | #include <linux/if_vlan.h> | |
36 | #include <linux/ip.h> | |
37 | #include <linux/tcp.h> | |
38 | #include <linux/dma-mapping.h> | |
39 | #include "common.h" | |
40 | #include "regs.h" | |
41 | #include "sge_defs.h" | |
42 | #include "t3_cpl.h" | |
43 | #include "firmware_exports.h" | |
44 | ||
45 | #define USE_GTS 0 | |
46 | ||
47 | #define SGE_RX_SM_BUF_SIZE 1536 | |
e0994eb1 DLR |
48 | |
49 | /* | |
50 | * If USE_RX_PAGE is defined, the small freelist populated with (partial) | |
51 | * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must | |
52 | * be a multiple of the host page size). | |
53 | */ | |
54 | #define USE_RX_PAGE | |
55 | #define RX_PAGE_SIZE 2048 | |
56 | ||
57 | /* | |
58 | * skb freelist packets are copied into a new skb (and the freelist one is | |
59 | * reused) if their len is <= | |
60 | */ | |
4d22de3e DLR |
61 | #define SGE_RX_COPY_THRES 256 |
62 | ||
e0994eb1 DLR |
63 | /* |
64 | * Minimum number of freelist entries before we start dropping TUNNEL frames. | |
65 | */ | |
66 | #define SGE_RX_DROP_THRES 16 | |
4d22de3e DLR |
67 | |
68 | /* | |
69 | * Period of the Tx buffer reclaim timer. This timer does not need to run | |
70 | * frequently as Tx buffers are usually reclaimed by new Tx packets. | |
71 | */ | |
72 | #define TX_RECLAIM_PERIOD (HZ / 4) | |
73 | ||
74 | /* WR size in bytes */ | |
75 | #define WR_LEN (WR_FLITS * 8) | |
76 | ||
77 | /* | |
78 | * Types of Tx queues in each queue set. Order here matters, do not change. | |
79 | */ | |
80 | enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; | |
81 | ||
82 | /* Values for sge_txq.flags */ | |
83 | enum { | |
84 | TXQ_RUNNING = 1 << 0, /* fetch engine is running */ | |
85 | TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ | |
86 | }; | |
87 | ||
88 | struct tx_desc { | |
89 | u64 flit[TX_DESC_FLITS]; | |
90 | }; | |
91 | ||
92 | struct rx_desc { | |
93 | __be32 addr_lo; | |
94 | __be32 len_gen; | |
95 | __be32 gen2; | |
96 | __be32 addr_hi; | |
97 | }; | |
98 | ||
99 | struct tx_sw_desc { /* SW state per Tx descriptor */ | |
100 | struct sk_buff *skb; | |
101 | }; | |
102 | ||
103 | struct rx_sw_desc { /* SW state per Rx descriptor */ | |
e0994eb1 DLR |
104 | union { |
105 | struct sk_buff *skb; | |
106 | struct sge_fl_page page; | |
107 | } t; | |
4d22de3e DLR |
108 | DECLARE_PCI_UNMAP_ADDR(dma_addr); |
109 | }; | |
110 | ||
111 | struct rsp_desc { /* response queue descriptor */ | |
112 | struct rss_header rss_hdr; | |
113 | __be32 flags; | |
114 | __be32 len_cq; | |
115 | u8 imm_data[47]; | |
116 | u8 intr_gen; | |
117 | }; | |
118 | ||
119 | struct unmap_info { /* packet unmapping info, overlays skb->cb */ | |
120 | int sflit; /* start flit of first SGL entry in Tx descriptor */ | |
121 | u16 fragidx; /* first page fragment in current Tx descriptor */ | |
122 | u16 addr_idx; /* buffer index of first SGL entry in descriptor */ | |
123 | u32 len; /* mapped length of skb main body */ | |
124 | }; | |
125 | ||
99d7cf30 DLR |
126 | /* |
127 | * Holds unmapping information for Tx packets that need deferred unmapping. | |
128 | * This structure lives at skb->head and must be allocated by callers. | |
129 | */ | |
130 | struct deferred_unmap_info { | |
131 | struct pci_dev *pdev; | |
132 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | |
133 | }; | |
134 | ||
4d22de3e DLR |
135 | /* |
136 | * Maps a number of flits to the number of Tx descriptors that can hold them. | |
137 | * The formula is | |
138 | * | |
139 | * desc = 1 + (flits - 2) / (WR_FLITS - 1). | |
140 | * | |
141 | * HW allows up to 4 descriptors to be combined into a WR. | |
142 | */ | |
143 | static u8 flit_desc_map[] = { | |
144 | 0, | |
145 | #if SGE_NUM_GENBITS == 1 | |
146 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | |
147 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | |
148 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, | |
149 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 | |
150 | #elif SGE_NUM_GENBITS == 2 | |
151 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | |
152 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | |
153 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, | |
154 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, | |
155 | #else | |
156 | # error "SGE_NUM_GENBITS must be 1 or 2" | |
157 | #endif | |
158 | }; | |
159 | ||
160 | static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) | |
161 | { | |
162 | return container_of(q, struct sge_qset, fl[qidx]); | |
163 | } | |
164 | ||
165 | static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) | |
166 | { | |
167 | return container_of(q, struct sge_qset, rspq); | |
168 | } | |
169 | ||
170 | static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) | |
171 | { | |
172 | return container_of(q, struct sge_qset, txq[qidx]); | |
173 | } | |
174 | ||
175 | /** | |
176 | * refill_rspq - replenish an SGE response queue | |
177 | * @adapter: the adapter | |
178 | * @q: the response queue to replenish | |
179 | * @credits: how many new responses to make available | |
180 | * | |
181 | * Replenishes a response queue by making the supplied number of responses | |
182 | * available to HW. | |
183 | */ | |
184 | static inline void refill_rspq(struct adapter *adapter, | |
185 | const struct sge_rspq *q, unsigned int credits) | |
186 | { | |
187 | t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, | |
188 | V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); | |
189 | } | |
190 | ||
191 | /** | |
192 | * need_skb_unmap - does the platform need unmapping of sk_buffs? | |
193 | * | |
194 | * Returns true if the platfrom needs sk_buff unmapping. The compiler | |
195 | * optimizes away unecessary code if this returns true. | |
196 | */ | |
197 | static inline int need_skb_unmap(void) | |
198 | { | |
199 | /* | |
200 | * This structure is used to tell if the platfrom needs buffer | |
201 | * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. | |
202 | */ | |
203 | struct dummy { | |
204 | DECLARE_PCI_UNMAP_ADDR(addr); | |
205 | }; | |
206 | ||
207 | return sizeof(struct dummy) != 0; | |
208 | } | |
209 | ||
210 | /** | |
211 | * unmap_skb - unmap a packet main body and its page fragments | |
212 | * @skb: the packet | |
213 | * @q: the Tx queue containing Tx descriptors for the packet | |
214 | * @cidx: index of Tx descriptor | |
215 | * @pdev: the PCI device | |
216 | * | |
217 | * Unmap the main body of an sk_buff and its page fragments, if any. | |
218 | * Because of the fairly complicated structure of our SGLs and the desire | |
219 | * to conserve space for metadata, we keep the information necessary to | |
220 | * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly | |
221 | * in the Tx descriptors (the physical addresses of the various data | |
222 | * buffers). The send functions initialize the state in skb->cb so we | |
223 | * can unmap the buffers held in the first Tx descriptor here, and we | |
224 | * have enough information at this point to update the state for the next | |
225 | * Tx descriptor. | |
226 | */ | |
227 | static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, | |
228 | unsigned int cidx, struct pci_dev *pdev) | |
229 | { | |
230 | const struct sg_ent *sgp; | |
231 | struct unmap_info *ui = (struct unmap_info *)skb->cb; | |
232 | int nfrags, frag_idx, curflit, j = ui->addr_idx; | |
233 | ||
234 | sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit]; | |
235 | ||
236 | if (ui->len) { | |
237 | pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len, | |
238 | PCI_DMA_TODEVICE); | |
239 | ui->len = 0; /* so we know for next descriptor for this skb */ | |
240 | j = 1; | |
241 | } | |
242 | ||
243 | frag_idx = ui->fragidx; | |
244 | curflit = ui->sflit + 1 + j; | |
245 | nfrags = skb_shinfo(skb)->nr_frags; | |
246 | ||
247 | while (frag_idx < nfrags && curflit < WR_FLITS) { | |
248 | pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), | |
249 | skb_shinfo(skb)->frags[frag_idx].size, | |
250 | PCI_DMA_TODEVICE); | |
251 | j ^= 1; | |
252 | if (j == 0) { | |
253 | sgp++; | |
254 | curflit++; | |
255 | } | |
256 | curflit++; | |
257 | frag_idx++; | |
258 | } | |
259 | ||
260 | if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ | |
261 | ui->fragidx = frag_idx; | |
262 | ui->addr_idx = j; | |
263 | ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ | |
264 | } | |
265 | } | |
266 | ||
267 | /** | |
268 | * free_tx_desc - reclaims Tx descriptors and their buffers | |
269 | * @adapter: the adapter | |
270 | * @q: the Tx queue to reclaim descriptors from | |
271 | * @n: the number of descriptors to reclaim | |
272 | * | |
273 | * Reclaims Tx descriptors from an SGE Tx queue and frees the associated | |
274 | * Tx buffers. Called with the Tx queue lock held. | |
275 | */ | |
276 | static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, | |
277 | unsigned int n) | |
278 | { | |
279 | struct tx_sw_desc *d; | |
280 | struct pci_dev *pdev = adapter->pdev; | |
281 | unsigned int cidx = q->cidx; | |
282 | ||
99d7cf30 DLR |
283 | const int need_unmap = need_skb_unmap() && |
284 | q->cntxt_id >= FW_TUNNEL_SGEEC_START; | |
285 | ||
4d22de3e DLR |
286 | d = &q->sdesc[cidx]; |
287 | while (n--) { | |
288 | if (d->skb) { /* an SGL is present */ | |
99d7cf30 | 289 | if (need_unmap) |
4d22de3e DLR |
290 | unmap_skb(d->skb, q, cidx, pdev); |
291 | if (d->skb->priority == cidx) | |
292 | kfree_skb(d->skb); | |
293 | } | |
294 | ++d; | |
295 | if (++cidx == q->size) { | |
296 | cidx = 0; | |
297 | d = q->sdesc; | |
298 | } | |
299 | } | |
300 | q->cidx = cidx; | |
301 | } | |
302 | ||
303 | /** | |
304 | * reclaim_completed_tx - reclaims completed Tx descriptors | |
305 | * @adapter: the adapter | |
306 | * @q: the Tx queue to reclaim completed descriptors from | |
307 | * | |
308 | * Reclaims Tx descriptors that the SGE has indicated it has processed, | |
309 | * and frees the associated buffers if possible. Called with the Tx | |
310 | * queue's lock held. | |
311 | */ | |
312 | static inline void reclaim_completed_tx(struct adapter *adapter, | |
313 | struct sge_txq *q) | |
314 | { | |
315 | unsigned int reclaim = q->processed - q->cleaned; | |
316 | ||
317 | if (reclaim) { | |
318 | free_tx_desc(adapter, q, reclaim); | |
319 | q->cleaned += reclaim; | |
320 | q->in_use -= reclaim; | |
321 | } | |
322 | } | |
323 | ||
324 | /** | |
325 | * should_restart_tx - are there enough resources to restart a Tx queue? | |
326 | * @q: the Tx queue | |
327 | * | |
328 | * Checks if there are enough descriptors to restart a suspended Tx queue. | |
329 | */ | |
330 | static inline int should_restart_tx(const struct sge_txq *q) | |
331 | { | |
332 | unsigned int r = q->processed - q->cleaned; | |
333 | ||
334 | return q->in_use - r < (q->size >> 1); | |
335 | } | |
336 | ||
337 | /** | |
338 | * free_rx_bufs - free the Rx buffers on an SGE free list | |
339 | * @pdev: the PCI device associated with the adapter | |
340 | * @rxq: the SGE free list to clean up | |
341 | * | |
342 | * Release the buffers on an SGE free-buffer Rx queue. HW fetching from | |
343 | * this queue should be stopped before calling this function. | |
344 | */ | |
345 | static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) | |
346 | { | |
347 | unsigned int cidx = q->cidx; | |
348 | ||
349 | while (q->credits--) { | |
350 | struct rx_sw_desc *d = &q->sdesc[cidx]; | |
351 | ||
352 | pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), | |
353 | q->buf_size, PCI_DMA_FROMDEVICE); | |
e0994eb1 DLR |
354 | |
355 | if (q->buf_size != RX_PAGE_SIZE) { | |
356 | kfree_skb(d->t.skb); | |
357 | d->t.skb = NULL; | |
358 | } else { | |
359 | if (d->t.page.frag.page) | |
360 | put_page(d->t.page.frag.page); | |
361 | d->t.page.frag.page = NULL; | |
362 | } | |
4d22de3e DLR |
363 | if (++cidx == q->size) |
364 | cidx = 0; | |
365 | } | |
e0994eb1 DLR |
366 | |
367 | if (q->page.frag.page) | |
368 | put_page(q->page.frag.page); | |
369 | q->page.frag.page = NULL; | |
4d22de3e DLR |
370 | } |
371 | ||
372 | /** | |
373 | * add_one_rx_buf - add a packet buffer to a free-buffer list | |
e0994eb1 | 374 | * @va: va of the buffer to add |
4d22de3e DLR |
375 | * @len: the buffer length |
376 | * @d: the HW Rx descriptor to write | |
377 | * @sd: the SW Rx descriptor to write | |
378 | * @gen: the generation bit value | |
379 | * @pdev: the PCI device associated with the adapter | |
380 | * | |
381 | * Add a buffer of the given length to the supplied HW and SW Rx | |
382 | * descriptors. | |
383 | */ | |
e0994eb1 | 384 | static inline void add_one_rx_buf(unsigned char *va, unsigned int len, |
4d22de3e DLR |
385 | struct rx_desc *d, struct rx_sw_desc *sd, |
386 | unsigned int gen, struct pci_dev *pdev) | |
387 | { | |
388 | dma_addr_t mapping; | |
389 | ||
e0994eb1 | 390 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
4d22de3e DLR |
391 | pci_unmap_addr_set(sd, dma_addr, mapping); |
392 | ||
393 | d->addr_lo = cpu_to_be32(mapping); | |
394 | d->addr_hi = cpu_to_be32((u64) mapping >> 32); | |
395 | wmb(); | |
396 | d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); | |
397 | d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); | |
398 | } | |
399 | ||
400 | /** | |
401 | * refill_fl - refill an SGE free-buffer list | |
402 | * @adapter: the adapter | |
403 | * @q: the free-list to refill | |
404 | * @n: the number of new buffers to allocate | |
405 | * @gfp: the gfp flags for allocating new buffers | |
406 | * | |
407 | * (Re)populate an SGE free-buffer list with up to @n new packet buffers, | |
408 | * allocated with the supplied gfp flags. The caller must assure that | |
409 | * @n does not exceed the queue's capacity. | |
410 | */ | |
411 | static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) | |
412 | { | |
413 | struct rx_sw_desc *sd = &q->sdesc[q->pidx]; | |
414 | struct rx_desc *d = &q->desc[q->pidx]; | |
e0994eb1 | 415 | struct sge_fl_page *p = &q->page; |
4d22de3e DLR |
416 | |
417 | while (n--) { | |
e0994eb1 | 418 | unsigned char *va; |
4d22de3e | 419 | |
e0994eb1 DLR |
420 | if (unlikely(q->buf_size != RX_PAGE_SIZE)) { |
421 | struct sk_buff *skb = alloc_skb(q->buf_size, gfp); | |
422 | ||
423 | if (!skb) { | |
424 | q->alloc_failed++; | |
425 | break; | |
426 | } | |
427 | va = skb->data; | |
428 | sd->t.skb = skb; | |
429 | } else { | |
430 | if (!p->frag.page) { | |
431 | p->frag.page = alloc_pages(gfp, 0); | |
432 | if (unlikely(!p->frag.page)) { | |
433 | q->alloc_failed++; | |
434 | break; | |
435 | } else { | |
436 | p->frag.size = RX_PAGE_SIZE; | |
437 | p->frag.page_offset = 0; | |
438 | p->va = page_address(p->frag.page); | |
439 | } | |
440 | } | |
441 | ||
442 | memcpy(&sd->t, p, sizeof(*p)); | |
443 | va = p->va; | |
444 | ||
445 | p->frag.page_offset += RX_PAGE_SIZE; | |
446 | BUG_ON(p->frag.page_offset > PAGE_SIZE); | |
447 | p->va += RX_PAGE_SIZE; | |
448 | if (p->frag.page_offset == PAGE_SIZE) | |
449 | p->frag.page = NULL; | |
450 | else | |
451 | get_page(p->frag.page); | |
452 | } | |
453 | ||
454 | add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev); | |
4d22de3e | 455 | |
4d22de3e DLR |
456 | d++; |
457 | sd++; | |
458 | if (++q->pidx == q->size) { | |
459 | q->pidx = 0; | |
460 | q->gen ^= 1; | |
461 | sd = q->sdesc; | |
462 | d = q->desc; | |
463 | } | |
464 | q->credits++; | |
465 | } | |
466 | ||
467 | t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); | |
468 | } | |
469 | ||
470 | static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) | |
471 | { | |
472 | refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC); | |
473 | } | |
474 | ||
475 | /** | |
476 | * recycle_rx_buf - recycle a receive buffer | |
477 | * @adapter: the adapter | |
478 | * @q: the SGE free list | |
479 | * @idx: index of buffer to recycle | |
480 | * | |
481 | * Recycles the specified buffer on the given free list by adding it at | |
482 | * the next available slot on the list. | |
483 | */ | |
484 | static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, | |
485 | unsigned int idx) | |
486 | { | |
487 | struct rx_desc *from = &q->desc[idx]; | |
488 | struct rx_desc *to = &q->desc[q->pidx]; | |
489 | ||
e0994eb1 | 490 | memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc)); |
4d22de3e DLR |
491 | to->addr_lo = from->addr_lo; /* already big endian */ |
492 | to->addr_hi = from->addr_hi; /* likewise */ | |
493 | wmb(); | |
494 | to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); | |
495 | to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); | |
496 | q->credits++; | |
497 | ||
498 | if (++q->pidx == q->size) { | |
499 | q->pidx = 0; | |
500 | q->gen ^= 1; | |
501 | } | |
502 | t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); | |
503 | } | |
504 | ||
505 | /** | |
506 | * alloc_ring - allocate resources for an SGE descriptor ring | |
507 | * @pdev: the PCI device | |
508 | * @nelem: the number of descriptors | |
509 | * @elem_size: the size of each descriptor | |
510 | * @sw_size: the size of the SW state associated with each ring element | |
511 | * @phys: the physical address of the allocated ring | |
512 | * @metadata: address of the array holding the SW state for the ring | |
513 | * | |
514 | * Allocates resources for an SGE descriptor ring, such as Tx queues, | |
515 | * free buffer lists, or response queues. Each SGE ring requires | |
516 | * space for its HW descriptors plus, optionally, space for the SW state | |
517 | * associated with each HW entry (the metadata). The function returns | |
518 | * three values: the virtual address for the HW ring (the return value | |
519 | * of the function), the physical address of the HW ring, and the address | |
520 | * of the SW ring. | |
521 | */ | |
522 | static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, | |
e0994eb1 | 523 | size_t sw_size, dma_addr_t * phys, void *metadata) |
4d22de3e DLR |
524 | { |
525 | size_t len = nelem * elem_size; | |
526 | void *s = NULL; | |
527 | void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); | |
528 | ||
529 | if (!p) | |
530 | return NULL; | |
531 | if (sw_size) { | |
532 | s = kcalloc(nelem, sw_size, GFP_KERNEL); | |
533 | ||
534 | if (!s) { | |
535 | dma_free_coherent(&pdev->dev, len, p, *phys); | |
536 | return NULL; | |
537 | } | |
538 | } | |
539 | if (metadata) | |
540 | *(void **)metadata = s; | |
541 | memset(p, 0, len); | |
542 | return p; | |
543 | } | |
544 | ||
545 | /** | |
546 | * free_qset - free the resources of an SGE queue set | |
547 | * @adapter: the adapter owning the queue set | |
548 | * @q: the queue set | |
549 | * | |
550 | * Release the HW and SW resources associated with an SGE queue set, such | |
551 | * as HW contexts, packet buffers, and descriptor rings. Traffic to the | |
552 | * queue set must be quiesced prior to calling this. | |
553 | */ | |
554 | void t3_free_qset(struct adapter *adapter, struct sge_qset *q) | |
555 | { | |
556 | int i; | |
557 | struct pci_dev *pdev = adapter->pdev; | |
558 | ||
559 | if (q->tx_reclaim_timer.function) | |
560 | del_timer_sync(&q->tx_reclaim_timer); | |
561 | ||
562 | for (i = 0; i < SGE_RXQ_PER_SET; ++i) | |
563 | if (q->fl[i].desc) { | |
564 | spin_lock(&adapter->sge.reg_lock); | |
565 | t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); | |
566 | spin_unlock(&adapter->sge.reg_lock); | |
567 | free_rx_bufs(pdev, &q->fl[i]); | |
568 | kfree(q->fl[i].sdesc); | |
569 | dma_free_coherent(&pdev->dev, | |
570 | q->fl[i].size * | |
571 | sizeof(struct rx_desc), q->fl[i].desc, | |
572 | q->fl[i].phys_addr); | |
573 | } | |
574 | ||
575 | for (i = 0; i < SGE_TXQ_PER_SET; ++i) | |
576 | if (q->txq[i].desc) { | |
577 | spin_lock(&adapter->sge.reg_lock); | |
578 | t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); | |
579 | spin_unlock(&adapter->sge.reg_lock); | |
580 | if (q->txq[i].sdesc) { | |
581 | free_tx_desc(adapter, &q->txq[i], | |
582 | q->txq[i].in_use); | |
583 | kfree(q->txq[i].sdesc); | |
584 | } | |
585 | dma_free_coherent(&pdev->dev, | |
586 | q->txq[i].size * | |
587 | sizeof(struct tx_desc), | |
588 | q->txq[i].desc, q->txq[i].phys_addr); | |
589 | __skb_queue_purge(&q->txq[i].sendq); | |
590 | } | |
591 | ||
592 | if (q->rspq.desc) { | |
593 | spin_lock(&adapter->sge.reg_lock); | |
594 | t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); | |
595 | spin_unlock(&adapter->sge.reg_lock); | |
596 | dma_free_coherent(&pdev->dev, | |
597 | q->rspq.size * sizeof(struct rsp_desc), | |
598 | q->rspq.desc, q->rspq.phys_addr); | |
599 | } | |
600 | ||
601 | if (q->netdev) | |
602 | q->netdev->atalk_ptr = NULL; | |
603 | ||
604 | memset(q, 0, sizeof(*q)); | |
605 | } | |
606 | ||
607 | /** | |
608 | * init_qset_cntxt - initialize an SGE queue set context info | |
609 | * @qs: the queue set | |
610 | * @id: the queue set id | |
611 | * | |
612 | * Initializes the TIDs and context ids for the queues of a queue set. | |
613 | */ | |
614 | static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) | |
615 | { | |
616 | qs->rspq.cntxt_id = id; | |
617 | qs->fl[0].cntxt_id = 2 * id; | |
618 | qs->fl[1].cntxt_id = 2 * id + 1; | |
619 | qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; | |
620 | qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; | |
621 | qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; | |
622 | qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; | |
623 | qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; | |
624 | } | |
625 | ||
626 | /** | |
627 | * sgl_len - calculates the size of an SGL of the given capacity | |
628 | * @n: the number of SGL entries | |
629 | * | |
630 | * Calculates the number of flits needed for a scatter/gather list that | |
631 | * can hold the given number of entries. | |
632 | */ | |
633 | static inline unsigned int sgl_len(unsigned int n) | |
634 | { | |
635 | /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ | |
636 | return (3 * n) / 2 + (n & 1); | |
637 | } | |
638 | ||
639 | /** | |
640 | * flits_to_desc - returns the num of Tx descriptors for the given flits | |
641 | * @n: the number of flits | |
642 | * | |
643 | * Calculates the number of Tx descriptors needed for the supplied number | |
644 | * of flits. | |
645 | */ | |
646 | static inline unsigned int flits_to_desc(unsigned int n) | |
647 | { | |
648 | BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); | |
649 | return flit_desc_map[n]; | |
650 | } | |
651 | ||
4d22de3e DLR |
652 | /** |
653 | * get_imm_packet - return the next ingress packet buffer from a response | |
654 | * @resp: the response descriptor containing the packet data | |
655 | * | |
656 | * Return a packet containing the immediate data of the given response. | |
657 | */ | |
658 | static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) | |
659 | { | |
660 | struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); | |
661 | ||
662 | if (skb) { | |
663 | __skb_put(skb, IMMED_PKT_SIZE); | |
664 | memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE); | |
665 | } | |
666 | return skb; | |
667 | } | |
668 | ||
669 | /** | |
670 | * calc_tx_descs - calculate the number of Tx descriptors for a packet | |
671 | * @skb: the packet | |
672 | * | |
673 | * Returns the number of Tx descriptors needed for the given Ethernet | |
674 | * packet. Ethernet packets require addition of WR and CPL headers. | |
675 | */ | |
676 | static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |
677 | { | |
678 | unsigned int flits; | |
679 | ||
680 | if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) | |
681 | return 1; | |
682 | ||
683 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; | |
684 | if (skb_shinfo(skb)->gso_size) | |
685 | flits++; | |
686 | return flits_to_desc(flits); | |
687 | } | |
688 | ||
689 | /** | |
690 | * make_sgl - populate a scatter/gather list for a packet | |
691 | * @skb: the packet | |
692 | * @sgp: the SGL to populate | |
693 | * @start: start address of skb main body data to include in the SGL | |
694 | * @len: length of skb main body data to include in the SGL | |
695 | * @pdev: the PCI device | |
696 | * | |
697 | * Generates a scatter/gather list for the buffers that make up a packet | |
698 | * and returns the SGL size in 8-byte words. The caller must size the SGL | |
699 | * appropriately. | |
700 | */ | |
701 | static inline unsigned int make_sgl(const struct sk_buff *skb, | |
702 | struct sg_ent *sgp, unsigned char *start, | |
703 | unsigned int len, struct pci_dev *pdev) | |
704 | { | |
705 | dma_addr_t mapping; | |
706 | unsigned int i, j = 0, nfrags; | |
707 | ||
708 | if (len) { | |
709 | mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); | |
710 | sgp->len[0] = cpu_to_be32(len); | |
711 | sgp->addr[0] = cpu_to_be64(mapping); | |
712 | j = 1; | |
713 | } | |
714 | ||
715 | nfrags = skb_shinfo(skb)->nr_frags; | |
716 | for (i = 0; i < nfrags; i++) { | |
717 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
718 | ||
719 | mapping = pci_map_page(pdev, frag->page, frag->page_offset, | |
720 | frag->size, PCI_DMA_TODEVICE); | |
721 | sgp->len[j] = cpu_to_be32(frag->size); | |
722 | sgp->addr[j] = cpu_to_be64(mapping); | |
723 | j ^= 1; | |
724 | if (j == 0) | |
725 | ++sgp; | |
726 | } | |
727 | if (j) | |
728 | sgp->len[j] = 0; | |
729 | return ((nfrags + (len != 0)) * 3) / 2 + j; | |
730 | } | |
731 | ||
732 | /** | |
733 | * check_ring_tx_db - check and potentially ring a Tx queue's doorbell | |
734 | * @adap: the adapter | |
735 | * @q: the Tx queue | |
736 | * | |
737 | * Ring the doorbel if a Tx queue is asleep. There is a natural race, | |
738 | * where the HW is going to sleep just after we checked, however, | |
739 | * then the interrupt handler will detect the outstanding TX packet | |
740 | * and ring the doorbell for us. | |
741 | * | |
742 | * When GTS is disabled we unconditionally ring the doorbell. | |
743 | */ | |
744 | static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) | |
745 | { | |
746 | #if USE_GTS | |
747 | clear_bit(TXQ_LAST_PKT_DB, &q->flags); | |
748 | if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { | |
749 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | |
750 | t3_write_reg(adap, A_SG_KDOORBELL, | |
751 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
752 | } | |
753 | #else | |
754 | wmb(); /* write descriptors before telling HW */ | |
755 | t3_write_reg(adap, A_SG_KDOORBELL, | |
756 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
757 | #endif | |
758 | } | |
759 | ||
760 | static inline void wr_gen2(struct tx_desc *d, unsigned int gen) | |
761 | { | |
762 | #if SGE_NUM_GENBITS == 2 | |
763 | d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); | |
764 | #endif | |
765 | } | |
766 | ||
767 | /** | |
768 | * write_wr_hdr_sgl - write a WR header and, optionally, SGL | |
769 | * @ndesc: number of Tx descriptors spanned by the SGL | |
770 | * @skb: the packet corresponding to the WR | |
771 | * @d: first Tx descriptor to be written | |
772 | * @pidx: index of above descriptors | |
773 | * @q: the SGE Tx queue | |
774 | * @sgl: the SGL | |
775 | * @flits: number of flits to the start of the SGL in the first descriptor | |
776 | * @sgl_flits: the SGL size in flits | |
777 | * @gen: the Tx descriptor generation | |
778 | * @wr_hi: top 32 bits of WR header based on WR type (big endian) | |
779 | * @wr_lo: low 32 bits of WR header based on WR type (big endian) | |
780 | * | |
781 | * Write a work request header and an associated SGL. If the SGL is | |
782 | * small enough to fit into one Tx descriptor it has already been written | |
783 | * and we just need to write the WR header. Otherwise we distribute the | |
784 | * SGL across the number of descriptors it spans. | |
785 | */ | |
786 | static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, | |
787 | struct tx_desc *d, unsigned int pidx, | |
788 | const struct sge_txq *q, | |
789 | const struct sg_ent *sgl, | |
790 | unsigned int flits, unsigned int sgl_flits, | |
791 | unsigned int gen, unsigned int wr_hi, | |
792 | unsigned int wr_lo) | |
793 | { | |
794 | struct work_request_hdr *wrp = (struct work_request_hdr *)d; | |
795 | struct tx_sw_desc *sd = &q->sdesc[pidx]; | |
796 | ||
797 | sd->skb = skb; | |
798 | if (need_skb_unmap()) { | |
799 | struct unmap_info *ui = (struct unmap_info *)skb->cb; | |
800 | ||
801 | ui->fragidx = 0; | |
802 | ui->addr_idx = 0; | |
803 | ui->sflit = flits; | |
804 | } | |
805 | ||
806 | if (likely(ndesc == 1)) { | |
807 | skb->priority = pidx; | |
808 | wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | | |
809 | V_WR_SGLSFLT(flits)) | wr_hi; | |
810 | wmb(); | |
811 | wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | | |
812 | V_WR_GEN(gen)) | wr_lo; | |
813 | wr_gen2(d, gen); | |
814 | } else { | |
815 | unsigned int ogen = gen; | |
816 | const u64 *fp = (const u64 *)sgl; | |
817 | struct work_request_hdr *wp = wrp; | |
818 | ||
819 | wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | | |
820 | V_WR_SGLSFLT(flits)) | wr_hi; | |
821 | ||
822 | while (sgl_flits) { | |
823 | unsigned int avail = WR_FLITS - flits; | |
824 | ||
825 | if (avail > sgl_flits) | |
826 | avail = sgl_flits; | |
827 | memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); | |
828 | sgl_flits -= avail; | |
829 | ndesc--; | |
830 | if (!sgl_flits) | |
831 | break; | |
832 | ||
833 | fp += avail; | |
834 | d++; | |
835 | sd++; | |
836 | if (++pidx == q->size) { | |
837 | pidx = 0; | |
838 | gen ^= 1; | |
839 | d = q->desc; | |
840 | sd = q->sdesc; | |
841 | } | |
842 | ||
843 | sd->skb = skb; | |
844 | wrp = (struct work_request_hdr *)d; | |
845 | wrp->wr_hi = htonl(V_WR_DATATYPE(1) | | |
846 | V_WR_SGLSFLT(1)) | wr_hi; | |
847 | wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, | |
848 | sgl_flits + 1)) | | |
849 | V_WR_GEN(gen)) | wr_lo; | |
850 | wr_gen2(d, gen); | |
851 | flits = 1; | |
852 | } | |
853 | skb->priority = pidx; | |
854 | wrp->wr_hi |= htonl(F_WR_EOP); | |
855 | wmb(); | |
856 | wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; | |
857 | wr_gen2((struct tx_desc *)wp, ogen); | |
858 | WARN_ON(ndesc != 0); | |
859 | } | |
860 | } | |
861 | ||
862 | /** | |
863 | * write_tx_pkt_wr - write a TX_PKT work request | |
864 | * @adap: the adapter | |
865 | * @skb: the packet to send | |
866 | * @pi: the egress interface | |
867 | * @pidx: index of the first Tx descriptor to write | |
868 | * @gen: the generation value to use | |
869 | * @q: the Tx queue | |
870 | * @ndesc: number of descriptors the packet will occupy | |
871 | * @compl: the value of the COMPL bit to use | |
872 | * | |
873 | * Generate a TX_PKT work request to send the supplied packet. | |
874 | */ | |
875 | static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |
876 | const struct port_info *pi, | |
877 | unsigned int pidx, unsigned int gen, | |
878 | struct sge_txq *q, unsigned int ndesc, | |
879 | unsigned int compl) | |
880 | { | |
881 | unsigned int flits, sgl_flits, cntrl, tso_info; | |
882 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | |
883 | struct tx_desc *d = &q->desc[pidx]; | |
884 | struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; | |
885 | ||
886 | cpl->len = htonl(skb->len | 0x80000000); | |
887 | cntrl = V_TXPKT_INTF(pi->port_id); | |
888 | ||
889 | if (vlan_tx_tag_present(skb) && pi->vlan_grp) | |
890 | cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); | |
891 | ||
892 | tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); | |
893 | if (tso_info) { | |
894 | int eth_type; | |
895 | struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; | |
896 | ||
897 | d->flit[2] = 0; | |
898 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); | |
899 | hdr->cntrl = htonl(cntrl); | |
900 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? | |
901 | CPL_ETH_II : CPL_ETH_II_VLAN; | |
902 | tso_info |= V_LSO_ETH_TYPE(eth_type) | | |
903 | V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) | | |
904 | V_LSO_TCPHDR_WORDS(skb->h.th->doff); | |
905 | hdr->lso_info = htonl(tso_info); | |
906 | flits = 3; | |
907 | } else { | |
908 | cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); | |
909 | cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ | |
910 | cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); | |
911 | cpl->cntrl = htonl(cntrl); | |
912 | ||
913 | if (skb->len <= WR_LEN - sizeof(*cpl)) { | |
914 | q->sdesc[pidx].skb = NULL; | |
915 | if (!skb->data_len) | |
916 | memcpy(&d->flit[2], skb->data, skb->len); | |
917 | else | |
918 | skb_copy_bits(skb, 0, &d->flit[2], skb->len); | |
919 | ||
920 | flits = (skb->len + 7) / 8 + 2; | |
921 | cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | | |
922 | V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | |
923 | | F_WR_SOP | F_WR_EOP | compl); | |
924 | wmb(); | |
925 | cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | | |
926 | V_WR_TID(q->token)); | |
927 | wr_gen2(d, gen); | |
928 | kfree_skb(skb); | |
929 | return; | |
930 | } | |
931 | ||
932 | flits = 2; | |
933 | } | |
934 | ||
935 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | |
936 | sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); | |
937 | if (need_skb_unmap()) | |
938 | ((struct unmap_info *)skb->cb)->len = skb_headlen(skb); | |
939 | ||
940 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, | |
941 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), | |
942 | htonl(V_WR_TID(q->token))); | |
943 | } | |
944 | ||
945 | /** | |
946 | * eth_xmit - add a packet to the Ethernet Tx queue | |
947 | * @skb: the packet | |
948 | * @dev: the egress net device | |
949 | * | |
950 | * Add a packet to an SGE Tx queue. Runs with softirqs disabled. | |
951 | */ | |
952 | int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |
953 | { | |
954 | unsigned int ndesc, pidx, credits, gen, compl; | |
955 | const struct port_info *pi = netdev_priv(dev); | |
956 | struct adapter *adap = dev->priv; | |
957 | struct sge_qset *qs = dev2qset(dev); | |
958 | struct sge_txq *q = &qs->txq[TXQ_ETH]; | |
959 | ||
960 | /* | |
961 | * The chip min packet length is 9 octets but play safe and reject | |
962 | * anything shorter than an Ethernet header. | |
963 | */ | |
964 | if (unlikely(skb->len < ETH_HLEN)) { | |
965 | dev_kfree_skb(skb); | |
966 | return NETDEV_TX_OK; | |
967 | } | |
968 | ||
969 | spin_lock(&q->lock); | |
970 | reclaim_completed_tx(adap, q); | |
971 | ||
972 | credits = q->size - q->in_use; | |
973 | ndesc = calc_tx_descs(skb); | |
974 | ||
975 | if (unlikely(credits < ndesc)) { | |
976 | if (!netif_queue_stopped(dev)) { | |
977 | netif_stop_queue(dev); | |
978 | set_bit(TXQ_ETH, &qs->txq_stopped); | |
979 | q->stops++; | |
980 | dev_err(&adap->pdev->dev, | |
981 | "%s: Tx ring %u full while queue awake!\n", | |
982 | dev->name, q->cntxt_id & 7); | |
983 | } | |
984 | spin_unlock(&q->lock); | |
985 | return NETDEV_TX_BUSY; | |
986 | } | |
987 | ||
988 | q->in_use += ndesc; | |
989 | if (unlikely(credits - ndesc < q->stop_thres)) { | |
990 | q->stops++; | |
991 | netif_stop_queue(dev); | |
992 | set_bit(TXQ_ETH, &qs->txq_stopped); | |
993 | #if !USE_GTS | |
994 | if (should_restart_tx(q) && | |
995 | test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { | |
996 | q->restarts++; | |
997 | netif_wake_queue(dev); | |
998 | } | |
999 | #endif | |
1000 | } | |
1001 | ||
1002 | gen = q->gen; | |
1003 | q->unacked += ndesc; | |
1004 | compl = (q->unacked & 8) << (S_WR_COMPL - 3); | |
1005 | q->unacked &= 7; | |
1006 | pidx = q->pidx; | |
1007 | q->pidx += ndesc; | |
1008 | if (q->pidx >= q->size) { | |
1009 | q->pidx -= q->size; | |
1010 | q->gen ^= 1; | |
1011 | } | |
1012 | ||
1013 | /* update port statistics */ | |
1014 | if (skb->ip_summed == CHECKSUM_COMPLETE) | |
1015 | qs->port_stats[SGE_PSTAT_TX_CSUM]++; | |
1016 | if (skb_shinfo(skb)->gso_size) | |
1017 | qs->port_stats[SGE_PSTAT_TSO]++; | |
1018 | if (vlan_tx_tag_present(skb) && pi->vlan_grp) | |
1019 | qs->port_stats[SGE_PSTAT_VLANINS]++; | |
1020 | ||
1021 | dev->trans_start = jiffies; | |
1022 | spin_unlock(&q->lock); | |
1023 | ||
1024 | /* | |
1025 | * We do not use Tx completion interrupts to free DMAd Tx packets. | |
1026 | * This is good for performamce but means that we rely on new Tx | |
1027 | * packets arriving to run the destructors of completed packets, | |
1028 | * which open up space in their sockets' send queues. Sometimes | |
1029 | * we do not get such new packets causing Tx to stall. A single | |
1030 | * UDP transmitter is a good example of this situation. We have | |
1031 | * a clean up timer that periodically reclaims completed packets | |
1032 | * but it doesn't run often enough (nor do we want it to) to prevent | |
1033 | * lengthy stalls. A solution to this problem is to run the | |
1034 | * destructor early, after the packet is queued but before it's DMAd. | |
1035 | * A cons is that we lie to socket memory accounting, but the amount | |
1036 | * of extra memory is reasonable (limited by the number of Tx | |
1037 | * descriptors), the packets do actually get freed quickly by new | |
1038 | * packets almost always, and for protocols like TCP that wait for | |
1039 | * acks to really free up the data the extra memory is even less. | |
1040 | * On the positive side we run the destructors on the sending CPU | |
1041 | * rather than on a potentially different completing CPU, usually a | |
1042 | * good thing. We also run them without holding our Tx queue lock, | |
1043 | * unlike what reclaim_completed_tx() would otherwise do. | |
1044 | * | |
1045 | * Run the destructor before telling the DMA engine about the packet | |
1046 | * to make sure it doesn't complete and get freed prematurely. | |
1047 | */ | |
1048 | if (likely(!skb_shared(skb))) | |
1049 | skb_orphan(skb); | |
1050 | ||
1051 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); | |
1052 | check_ring_tx_db(adap, q); | |
1053 | return NETDEV_TX_OK; | |
1054 | } | |
1055 | ||
1056 | /** | |
1057 | * write_imm - write a packet into a Tx descriptor as immediate data | |
1058 | * @d: the Tx descriptor to write | |
1059 | * @skb: the packet | |
1060 | * @len: the length of packet data to write as immediate data | |
1061 | * @gen: the generation bit value to write | |
1062 | * | |
1063 | * Writes a packet as immediate data into a Tx descriptor. The packet | |
1064 | * contains a work request at its beginning. We must write the packet | |
1065 | * carefully so the SGE doesn't read accidentally before it's written in | |
1066 | * its entirety. | |
1067 | */ | |
1068 | static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, | |
1069 | unsigned int len, unsigned int gen) | |
1070 | { | |
1071 | struct work_request_hdr *from = (struct work_request_hdr *)skb->data; | |
1072 | struct work_request_hdr *to = (struct work_request_hdr *)d; | |
1073 | ||
1074 | memcpy(&to[1], &from[1], len - sizeof(*from)); | |
1075 | to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | | |
1076 | V_WR_BCNTLFLT(len & 7)); | |
1077 | wmb(); | |
1078 | to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | | |
1079 | V_WR_LEN((len + 7) / 8)); | |
1080 | wr_gen2(d, gen); | |
1081 | kfree_skb(skb); | |
1082 | } | |
1083 | ||
1084 | /** | |
1085 | * check_desc_avail - check descriptor availability on a send queue | |
1086 | * @adap: the adapter | |
1087 | * @q: the send queue | |
1088 | * @skb: the packet needing the descriptors | |
1089 | * @ndesc: the number of Tx descriptors needed | |
1090 | * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) | |
1091 | * | |
1092 | * Checks if the requested number of Tx descriptors is available on an | |
1093 | * SGE send queue. If the queue is already suspended or not enough | |
1094 | * descriptors are available the packet is queued for later transmission. | |
1095 | * Must be called with the Tx queue locked. | |
1096 | * | |
1097 | * Returns 0 if enough descriptors are available, 1 if there aren't | |
1098 | * enough descriptors and the packet has been queued, and 2 if the caller | |
1099 | * needs to retry because there weren't enough descriptors at the | |
1100 | * beginning of the call but some freed up in the mean time. | |
1101 | */ | |
1102 | static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, | |
1103 | struct sk_buff *skb, unsigned int ndesc, | |
1104 | unsigned int qid) | |
1105 | { | |
1106 | if (unlikely(!skb_queue_empty(&q->sendq))) { | |
1107 | addq_exit:__skb_queue_tail(&q->sendq, skb); | |
1108 | return 1; | |
1109 | } | |
1110 | if (unlikely(q->size - q->in_use < ndesc)) { | |
1111 | struct sge_qset *qs = txq_to_qset(q, qid); | |
1112 | ||
1113 | set_bit(qid, &qs->txq_stopped); | |
1114 | smp_mb__after_clear_bit(); | |
1115 | ||
1116 | if (should_restart_tx(q) && | |
1117 | test_and_clear_bit(qid, &qs->txq_stopped)) | |
1118 | return 2; | |
1119 | ||
1120 | q->stops++; | |
1121 | goto addq_exit; | |
1122 | } | |
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | /** | |
1127 | * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs | |
1128 | * @q: the SGE control Tx queue | |
1129 | * | |
1130 | * This is a variant of reclaim_completed_tx() that is used for Tx queues | |
1131 | * that send only immediate data (presently just the control queues) and | |
1132 | * thus do not have any sk_buffs to release. | |
1133 | */ | |
1134 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | |
1135 | { | |
1136 | unsigned int reclaim = q->processed - q->cleaned; | |
1137 | ||
1138 | q->in_use -= reclaim; | |
1139 | q->cleaned += reclaim; | |
1140 | } | |
1141 | ||
1142 | static inline int immediate(const struct sk_buff *skb) | |
1143 | { | |
1144 | return skb->len <= WR_LEN && !skb->data_len; | |
1145 | } | |
1146 | ||
1147 | /** | |
1148 | * ctrl_xmit - send a packet through an SGE control Tx queue | |
1149 | * @adap: the adapter | |
1150 | * @q: the control queue | |
1151 | * @skb: the packet | |
1152 | * | |
1153 | * Send a packet through an SGE control Tx queue. Packets sent through | |
1154 | * a control queue must fit entirely as immediate data in a single Tx | |
1155 | * descriptor and have no page fragments. | |
1156 | */ | |
1157 | static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, | |
1158 | struct sk_buff *skb) | |
1159 | { | |
1160 | int ret; | |
1161 | struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; | |
1162 | ||
1163 | if (unlikely(!immediate(skb))) { | |
1164 | WARN_ON(1); | |
1165 | dev_kfree_skb(skb); | |
1166 | return NET_XMIT_SUCCESS; | |
1167 | } | |
1168 | ||
1169 | wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); | |
1170 | wrp->wr_lo = htonl(V_WR_TID(q->token)); | |
1171 | ||
1172 | spin_lock(&q->lock); | |
1173 | again:reclaim_completed_tx_imm(q); | |
1174 | ||
1175 | ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); | |
1176 | if (unlikely(ret)) { | |
1177 | if (ret == 1) { | |
1178 | spin_unlock(&q->lock); | |
1179 | return NET_XMIT_CN; | |
1180 | } | |
1181 | goto again; | |
1182 | } | |
1183 | ||
1184 | write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); | |
1185 | ||
1186 | q->in_use++; | |
1187 | if (++q->pidx >= q->size) { | |
1188 | q->pidx = 0; | |
1189 | q->gen ^= 1; | |
1190 | } | |
1191 | spin_unlock(&q->lock); | |
1192 | wmb(); | |
1193 | t3_write_reg(adap, A_SG_KDOORBELL, | |
1194 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
1195 | return NET_XMIT_SUCCESS; | |
1196 | } | |
1197 | ||
1198 | /** | |
1199 | * restart_ctrlq - restart a suspended control queue | |
1200 | * @qs: the queue set cotaining the control queue | |
1201 | * | |
1202 | * Resumes transmission on a suspended Tx control queue. | |
1203 | */ | |
1204 | static void restart_ctrlq(unsigned long data) | |
1205 | { | |
1206 | struct sk_buff *skb; | |
1207 | struct sge_qset *qs = (struct sge_qset *)data; | |
1208 | struct sge_txq *q = &qs->txq[TXQ_CTRL]; | |
1209 | struct adapter *adap = qs->netdev->priv; | |
1210 | ||
1211 | spin_lock(&q->lock); | |
1212 | again:reclaim_completed_tx_imm(q); | |
1213 | ||
1214 | while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) { | |
1215 | ||
1216 | write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); | |
1217 | ||
1218 | if (++q->pidx >= q->size) { | |
1219 | q->pidx = 0; | |
1220 | q->gen ^= 1; | |
1221 | } | |
1222 | q->in_use++; | |
1223 | } | |
1224 | ||
1225 | if (!skb_queue_empty(&q->sendq)) { | |
1226 | set_bit(TXQ_CTRL, &qs->txq_stopped); | |
1227 | smp_mb__after_clear_bit(); | |
1228 | ||
1229 | if (should_restart_tx(q) && | |
1230 | test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) | |
1231 | goto again; | |
1232 | q->stops++; | |
1233 | } | |
1234 | ||
1235 | spin_unlock(&q->lock); | |
1236 | t3_write_reg(adap, A_SG_KDOORBELL, | |
1237 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
1238 | } | |
1239 | ||
14ab9892 DLR |
1240 | /* |
1241 | * Send a management message through control queue 0 | |
1242 | */ | |
1243 | int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) | |
1244 | { | |
1245 | return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); | |
1246 | } | |
1247 | ||
99d7cf30 DLR |
1248 | /** |
1249 | * deferred_unmap_destructor - unmap a packet when it is freed | |
1250 | * @skb: the packet | |
1251 | * | |
1252 | * This is the packet destructor used for Tx packets that need to remain | |
1253 | * mapped until they are freed rather than until their Tx descriptors are | |
1254 | * freed. | |
1255 | */ | |
1256 | static void deferred_unmap_destructor(struct sk_buff *skb) | |
1257 | { | |
1258 | int i; | |
1259 | const dma_addr_t *p; | |
1260 | const struct skb_shared_info *si; | |
1261 | const struct deferred_unmap_info *dui; | |
1262 | const struct unmap_info *ui = (struct unmap_info *)skb->cb; | |
1263 | ||
1264 | dui = (struct deferred_unmap_info *)skb->head; | |
1265 | p = dui->addr; | |
1266 | ||
1267 | if (ui->len) | |
1268 | pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE); | |
1269 | ||
1270 | si = skb_shinfo(skb); | |
1271 | for (i = 0; i < si->nr_frags; i++) | |
1272 | pci_unmap_page(dui->pdev, *p++, si->frags[i].size, | |
1273 | PCI_DMA_TODEVICE); | |
1274 | } | |
1275 | ||
1276 | static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | |
1277 | const struct sg_ent *sgl, int sgl_flits) | |
1278 | { | |
1279 | dma_addr_t *p; | |
1280 | struct deferred_unmap_info *dui; | |
1281 | ||
1282 | dui = (struct deferred_unmap_info *)skb->head; | |
1283 | dui->pdev = pdev; | |
1284 | for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { | |
1285 | *p++ = be64_to_cpu(sgl->addr[0]); | |
1286 | *p++ = be64_to_cpu(sgl->addr[1]); | |
1287 | } | |
1288 | if (sgl_flits) | |
1289 | *p = be64_to_cpu(sgl->addr[0]); | |
1290 | } | |
1291 | ||
4d22de3e DLR |
1292 | /** |
1293 | * write_ofld_wr - write an offload work request | |
1294 | * @adap: the adapter | |
1295 | * @skb: the packet to send | |
1296 | * @q: the Tx queue | |
1297 | * @pidx: index of the first Tx descriptor to write | |
1298 | * @gen: the generation value to use | |
1299 | * @ndesc: number of descriptors the packet will occupy | |
1300 | * | |
1301 | * Write an offload work request to send the supplied packet. The packet | |
1302 | * data already carry the work request with most fields populated. | |
1303 | */ | |
1304 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |
1305 | struct sge_txq *q, unsigned int pidx, | |
1306 | unsigned int gen, unsigned int ndesc) | |
1307 | { | |
1308 | unsigned int sgl_flits, flits; | |
1309 | struct work_request_hdr *from; | |
1310 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | |
1311 | struct tx_desc *d = &q->desc[pidx]; | |
1312 | ||
1313 | if (immediate(skb)) { | |
1314 | q->sdesc[pidx].skb = NULL; | |
1315 | write_imm(d, skb, skb->len, gen); | |
1316 | return; | |
1317 | } | |
1318 | ||
1319 | /* Only TX_DATA builds SGLs */ | |
1320 | ||
1321 | from = (struct work_request_hdr *)skb->data; | |
1322 | memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from)); | |
1323 | ||
1324 | flits = (skb->h.raw - skb->data) / 8; | |
1325 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | |
1326 | sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw, | |
1327 | adap->pdev); | |
99d7cf30 DLR |
1328 | if (need_skb_unmap()) { |
1329 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | |
1330 | skb->destructor = deferred_unmap_destructor; | |
4d22de3e | 1331 | ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw; |
99d7cf30 | 1332 | } |
4d22de3e DLR |
1333 | |
1334 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, | |
1335 | gen, from->wr_hi, from->wr_lo); | |
1336 | } | |
1337 | ||
1338 | /** | |
1339 | * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet | |
1340 | * @skb: the packet | |
1341 | * | |
1342 | * Returns the number of Tx descriptors needed for the given offload | |
1343 | * packet. These packets are already fully constructed. | |
1344 | */ | |
1345 | static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) | |
1346 | { | |
1347 | unsigned int flits, cnt = skb_shinfo(skb)->nr_frags; | |
1348 | ||
1349 | if (skb->len <= WR_LEN && cnt == 0) | |
1350 | return 1; /* packet fits as immediate data */ | |
1351 | ||
1352 | flits = (skb->h.raw - skb->data) / 8; /* headers */ | |
1353 | if (skb->tail != skb->h.raw) | |
1354 | cnt++; | |
1355 | return flits_to_desc(flits + sgl_len(cnt)); | |
1356 | } | |
1357 | ||
1358 | /** | |
1359 | * ofld_xmit - send a packet through an offload queue | |
1360 | * @adap: the adapter | |
1361 | * @q: the Tx offload queue | |
1362 | * @skb: the packet | |
1363 | * | |
1364 | * Send an offload packet through an SGE offload queue. | |
1365 | */ | |
1366 | static int ofld_xmit(struct adapter *adap, struct sge_txq *q, | |
1367 | struct sk_buff *skb) | |
1368 | { | |
1369 | int ret; | |
1370 | unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; | |
1371 | ||
1372 | spin_lock(&q->lock); | |
1373 | again:reclaim_completed_tx(adap, q); | |
1374 | ||
1375 | ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); | |
1376 | if (unlikely(ret)) { | |
1377 | if (ret == 1) { | |
1378 | skb->priority = ndesc; /* save for restart */ | |
1379 | spin_unlock(&q->lock); | |
1380 | return NET_XMIT_CN; | |
1381 | } | |
1382 | goto again; | |
1383 | } | |
1384 | ||
1385 | gen = q->gen; | |
1386 | q->in_use += ndesc; | |
1387 | pidx = q->pidx; | |
1388 | q->pidx += ndesc; | |
1389 | if (q->pidx >= q->size) { | |
1390 | q->pidx -= q->size; | |
1391 | q->gen ^= 1; | |
1392 | } | |
1393 | spin_unlock(&q->lock); | |
1394 | ||
1395 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); | |
1396 | check_ring_tx_db(adap, q); | |
1397 | return NET_XMIT_SUCCESS; | |
1398 | } | |
1399 | ||
1400 | /** | |
1401 | * restart_offloadq - restart a suspended offload queue | |
1402 | * @qs: the queue set cotaining the offload queue | |
1403 | * | |
1404 | * Resumes transmission on a suspended Tx offload queue. | |
1405 | */ | |
1406 | static void restart_offloadq(unsigned long data) | |
1407 | { | |
1408 | struct sk_buff *skb; | |
1409 | struct sge_qset *qs = (struct sge_qset *)data; | |
1410 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; | |
1411 | struct adapter *adap = qs->netdev->priv; | |
1412 | ||
1413 | spin_lock(&q->lock); | |
1414 | again:reclaim_completed_tx(adap, q); | |
1415 | ||
1416 | while ((skb = skb_peek(&q->sendq)) != NULL) { | |
1417 | unsigned int gen, pidx; | |
1418 | unsigned int ndesc = skb->priority; | |
1419 | ||
1420 | if (unlikely(q->size - q->in_use < ndesc)) { | |
1421 | set_bit(TXQ_OFLD, &qs->txq_stopped); | |
1422 | smp_mb__after_clear_bit(); | |
1423 | ||
1424 | if (should_restart_tx(q) && | |
1425 | test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) | |
1426 | goto again; | |
1427 | q->stops++; | |
1428 | break; | |
1429 | } | |
1430 | ||
1431 | gen = q->gen; | |
1432 | q->in_use += ndesc; | |
1433 | pidx = q->pidx; | |
1434 | q->pidx += ndesc; | |
1435 | if (q->pidx >= q->size) { | |
1436 | q->pidx -= q->size; | |
1437 | q->gen ^= 1; | |
1438 | } | |
1439 | __skb_unlink(skb, &q->sendq); | |
1440 | spin_unlock(&q->lock); | |
1441 | ||
1442 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); | |
1443 | spin_lock(&q->lock); | |
1444 | } | |
1445 | spin_unlock(&q->lock); | |
1446 | ||
1447 | #if USE_GTS | |
1448 | set_bit(TXQ_RUNNING, &q->flags); | |
1449 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | |
1450 | #endif | |
1451 | t3_write_reg(adap, A_SG_KDOORBELL, | |
1452 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | |
1453 | } | |
1454 | ||
1455 | /** | |
1456 | * queue_set - return the queue set a packet should use | |
1457 | * @skb: the packet | |
1458 | * | |
1459 | * Maps a packet to the SGE queue set it should use. The desired queue | |
1460 | * set is carried in bits 1-3 in the packet's priority. | |
1461 | */ | |
1462 | static inline int queue_set(const struct sk_buff *skb) | |
1463 | { | |
1464 | return skb->priority >> 1; | |
1465 | } | |
1466 | ||
1467 | /** | |
1468 | * is_ctrl_pkt - return whether an offload packet is a control packet | |
1469 | * @skb: the packet | |
1470 | * | |
1471 | * Determines whether an offload packet should use an OFLD or a CTRL | |
1472 | * Tx queue. This is indicated by bit 0 in the packet's priority. | |
1473 | */ | |
1474 | static inline int is_ctrl_pkt(const struct sk_buff *skb) | |
1475 | { | |
1476 | return skb->priority & 1; | |
1477 | } | |
1478 | ||
1479 | /** | |
1480 | * t3_offload_tx - send an offload packet | |
1481 | * @tdev: the offload device to send to | |
1482 | * @skb: the packet | |
1483 | * | |
1484 | * Sends an offload packet. We use the packet priority to select the | |
1485 | * appropriate Tx queue as follows: bit 0 indicates whether the packet | |
1486 | * should be sent as regular or control, bits 1-3 select the queue set. | |
1487 | */ | |
1488 | int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) | |
1489 | { | |
1490 | struct adapter *adap = tdev2adap(tdev); | |
1491 | struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; | |
1492 | ||
1493 | if (unlikely(is_ctrl_pkt(skb))) | |
1494 | return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); | |
1495 | ||
1496 | return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); | |
1497 | } | |
1498 | ||
1499 | /** | |
1500 | * offload_enqueue - add an offload packet to an SGE offload receive queue | |
1501 | * @q: the SGE response queue | |
1502 | * @skb: the packet | |
1503 | * | |
1504 | * Add a new offload packet to an SGE response queue's offload packet | |
1505 | * queue. If the packet is the first on the queue it schedules the RX | |
1506 | * softirq to process the queue. | |
1507 | */ | |
1508 | static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) | |
1509 | { | |
1510 | skb->next = skb->prev = NULL; | |
1511 | if (q->rx_tail) | |
1512 | q->rx_tail->next = skb; | |
1513 | else { | |
1514 | struct sge_qset *qs = rspq_to_qset(q); | |
1515 | ||
1516 | if (__netif_rx_schedule_prep(qs->netdev)) | |
1517 | __netif_rx_schedule(qs->netdev); | |
1518 | q->rx_head = skb; | |
1519 | } | |
1520 | q->rx_tail = skb; | |
1521 | } | |
1522 | ||
1523 | /** | |
1524 | * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts | |
1525 | * @tdev: the offload device that will be receiving the packets | |
1526 | * @q: the SGE response queue that assembled the bundle | |
1527 | * @skbs: the partial bundle | |
1528 | * @n: the number of packets in the bundle | |
1529 | * | |
1530 | * Delivers a (partial) bundle of Rx offload packets to an offload device. | |
1531 | */ | |
1532 | static inline void deliver_partial_bundle(struct t3cdev *tdev, | |
1533 | struct sge_rspq *q, | |
1534 | struct sk_buff *skbs[], int n) | |
1535 | { | |
1536 | if (n) { | |
1537 | q->offload_bundles++; | |
1538 | tdev->recv(tdev, skbs, n); | |
1539 | } | |
1540 | } | |
1541 | ||
1542 | /** | |
1543 | * ofld_poll - NAPI handler for offload packets in interrupt mode | |
1544 | * @dev: the network device doing the polling | |
1545 | * @budget: polling budget | |
1546 | * | |
1547 | * The NAPI handler for offload packets when a response queue is serviced | |
1548 | * by the hard interrupt handler, i.e., when it's operating in non-polling | |
1549 | * mode. Creates small packet batches and sends them through the offload | |
1550 | * receive handler. Batches need to be of modest size as we do prefetches | |
1551 | * on the packets in each. | |
1552 | */ | |
1553 | static int ofld_poll(struct net_device *dev, int *budget) | |
1554 | { | |
1555 | struct adapter *adapter = dev->priv; | |
1556 | struct sge_qset *qs = dev2qset(dev); | |
1557 | struct sge_rspq *q = &qs->rspq; | |
1558 | int work_done, limit = min(*budget, dev->quota), avail = limit; | |
1559 | ||
1560 | while (avail) { | |
1561 | struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; | |
1562 | int ngathered; | |
1563 | ||
1564 | spin_lock_irq(&q->lock); | |
1565 | head = q->rx_head; | |
1566 | if (!head) { | |
1567 | work_done = limit - avail; | |
1568 | *budget -= work_done; | |
1569 | dev->quota -= work_done; | |
1570 | __netif_rx_complete(dev); | |
1571 | spin_unlock_irq(&q->lock); | |
1572 | return 0; | |
1573 | } | |
1574 | ||
1575 | tail = q->rx_tail; | |
1576 | q->rx_head = q->rx_tail = NULL; | |
1577 | spin_unlock_irq(&q->lock); | |
1578 | ||
1579 | for (ngathered = 0; avail && head; avail--) { | |
1580 | prefetch(head->data); | |
1581 | skbs[ngathered] = head; | |
1582 | head = head->next; | |
1583 | skbs[ngathered]->next = NULL; | |
1584 | if (++ngathered == RX_BUNDLE_SIZE) { | |
1585 | q->offload_bundles++; | |
1586 | adapter->tdev.recv(&adapter->tdev, skbs, | |
1587 | ngathered); | |
1588 | ngathered = 0; | |
1589 | } | |
1590 | } | |
1591 | if (head) { /* splice remaining packets back onto Rx queue */ | |
1592 | spin_lock_irq(&q->lock); | |
1593 | tail->next = q->rx_head; | |
1594 | if (!q->rx_head) | |
1595 | q->rx_tail = tail; | |
1596 | q->rx_head = head; | |
1597 | spin_unlock_irq(&q->lock); | |
1598 | } | |
1599 | deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); | |
1600 | } | |
1601 | work_done = limit - avail; | |
1602 | *budget -= work_done; | |
1603 | dev->quota -= work_done; | |
1604 | return 1; | |
1605 | } | |
1606 | ||
1607 | /** | |
1608 | * rx_offload - process a received offload packet | |
1609 | * @tdev: the offload device receiving the packet | |
1610 | * @rq: the response queue that received the packet | |
1611 | * @skb: the packet | |
1612 | * @rx_gather: a gather list of packets if we are building a bundle | |
1613 | * @gather_idx: index of the next available slot in the bundle | |
1614 | * | |
1615 | * Process an ingress offload pakcet and add it to the offload ingress | |
1616 | * queue. Returns the index of the next available slot in the bundle. | |
1617 | */ | |
1618 | static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, | |
1619 | struct sk_buff *skb, struct sk_buff *rx_gather[], | |
1620 | unsigned int gather_idx) | |
1621 | { | |
1622 | rq->offload_pkts++; | |
1623 | skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data; | |
1624 | ||
1625 | if (rq->polling) { | |
1626 | rx_gather[gather_idx++] = skb; | |
1627 | if (gather_idx == RX_BUNDLE_SIZE) { | |
1628 | tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); | |
1629 | gather_idx = 0; | |
1630 | rq->offload_bundles++; | |
1631 | } | |
1632 | } else | |
1633 | offload_enqueue(rq, skb); | |
1634 | ||
1635 | return gather_idx; | |
1636 | } | |
1637 | ||
4d22de3e DLR |
1638 | /** |
1639 | * restart_tx - check whether to restart suspended Tx queues | |
1640 | * @qs: the queue set to resume | |
1641 | * | |
1642 | * Restarts suspended Tx queues of an SGE queue set if they have enough | |
1643 | * free resources to resume operation. | |
1644 | */ | |
1645 | static void restart_tx(struct sge_qset *qs) | |
1646 | { | |
1647 | if (test_bit(TXQ_ETH, &qs->txq_stopped) && | |
1648 | should_restart_tx(&qs->txq[TXQ_ETH]) && | |
1649 | test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { | |
1650 | qs->txq[TXQ_ETH].restarts++; | |
1651 | if (netif_running(qs->netdev)) | |
1652 | netif_wake_queue(qs->netdev); | |
1653 | } | |
1654 | ||
1655 | if (test_bit(TXQ_OFLD, &qs->txq_stopped) && | |
1656 | should_restart_tx(&qs->txq[TXQ_OFLD]) && | |
1657 | test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { | |
1658 | qs->txq[TXQ_OFLD].restarts++; | |
1659 | tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); | |
1660 | } | |
1661 | if (test_bit(TXQ_CTRL, &qs->txq_stopped) && | |
1662 | should_restart_tx(&qs->txq[TXQ_CTRL]) && | |
1663 | test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { | |
1664 | qs->txq[TXQ_CTRL].restarts++; | |
1665 | tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); | |
1666 | } | |
1667 | } | |
1668 | ||
1669 | /** | |
1670 | * rx_eth - process an ingress ethernet packet | |
1671 | * @adap: the adapter | |
1672 | * @rq: the response queue that received the packet | |
1673 | * @skb: the packet | |
1674 | * @pad: amount of padding at the start of the buffer | |
1675 | * | |
1676 | * Process an ingress ethernet pakcet and deliver it to the stack. | |
1677 | * The padding is 2 if the packet was delivered in an Rx buffer and 0 | |
1678 | * if it was immediate data in a response. | |
1679 | */ | |
1680 | static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |
1681 | struct sk_buff *skb, int pad) | |
1682 | { | |
1683 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); | |
1684 | struct port_info *pi; | |
1685 | ||
4d22de3e | 1686 | skb_pull(skb, sizeof(*p) + pad); |
4d22de3e | 1687 | skb->dev->last_rx = jiffies; |
4c13eb66 | 1688 | skb->protocol = eth_type_trans(skb, adap->port[p->iff]); |
4d22de3e DLR |
1689 | pi = netdev_priv(skb->dev); |
1690 | if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff && | |
1691 | !p->fragment) { | |
1692 | rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; | |
1693 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1694 | } else | |
1695 | skb->ip_summed = CHECKSUM_NONE; | |
1696 | ||
1697 | if (unlikely(p->vlan_valid)) { | |
1698 | struct vlan_group *grp = pi->vlan_grp; | |
1699 | ||
1700 | rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++; | |
1701 | if (likely(grp)) | |
1702 | __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), | |
1703 | rq->polling); | |
1704 | else | |
1705 | dev_kfree_skb_any(skb); | |
1706 | } else if (rq->polling) | |
1707 | netif_receive_skb(skb); | |
1708 | else | |
1709 | netif_rx(skb); | |
1710 | } | |
1711 | ||
e0994eb1 DLR |
1712 | #define SKB_DATA_SIZE 128 |
1713 | ||
1714 | static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p, | |
1715 | unsigned int len) | |
1716 | { | |
1717 | skb->len = len; | |
1718 | if (len <= SKB_DATA_SIZE) { | |
1719 | memcpy(skb->data, p->va, len); | |
1720 | skb->tail += len; | |
1721 | put_page(p->frag.page); | |
1722 | } else { | |
1723 | memcpy(skb->data, p->va, SKB_DATA_SIZE); | |
1724 | skb_shinfo(skb)->frags[0].page = p->frag.page; | |
1725 | skb_shinfo(skb)->frags[0].page_offset = | |
1726 | p->frag.page_offset + SKB_DATA_SIZE; | |
1727 | skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE; | |
1728 | skb_shinfo(skb)->nr_frags = 1; | |
1729 | skb->data_len = len - SKB_DATA_SIZE; | |
1730 | skb->tail += SKB_DATA_SIZE; | |
1731 | skb->truesize += skb->data_len; | |
1732 | } | |
1733 | } | |
1734 | ||
1735 | /** | |
1736 | * get_packet - return the next ingress packet buffer from a free list | |
1737 | * @adap: the adapter that received the packet | |
1738 | * @fl: the SGE free list holding the packet | |
1739 | * @len: the packet length including any SGE padding | |
1740 | * @drop_thres: # of remaining buffers before we start dropping packets | |
1741 | * | |
1742 | * Get the next packet from a free list and complete setup of the | |
1743 | * sk_buff. If the packet is small we make a copy and recycle the | |
1744 | * original buffer, otherwise we use the original buffer itself. If a | |
1745 | * positive drop threshold is supplied packets are dropped and their | |
1746 | * buffers recycled if (a) the number of remaining buffers is under the | |
1747 | * threshold and the packet is too big to copy, or (b) the packet should | |
1748 | * be copied but there is no memory for the copy. | |
1749 | */ | |
1750 | static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, | |
1751 | unsigned int len, unsigned int drop_thres) | |
1752 | { | |
1753 | struct sk_buff *skb = NULL; | |
1754 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | |
1755 | ||
1756 | prefetch(sd->t.skb->data); | |
1757 | ||
1758 | if (len <= SGE_RX_COPY_THRES) { | |
1759 | skb = alloc_skb(len, GFP_ATOMIC); | |
1760 | if (likely(skb != NULL)) { | |
1761 | struct rx_desc *d = &fl->desc[fl->cidx]; | |
1762 | dma_addr_t mapping = | |
1763 | (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 | | |
1764 | be32_to_cpu(d->addr_lo)); | |
1765 | ||
1766 | __skb_put(skb, len); | |
1767 | pci_dma_sync_single_for_cpu(adap->pdev, mapping, len, | |
1768 | PCI_DMA_FROMDEVICE); | |
1769 | memcpy(skb->data, sd->t.skb->data, len); | |
1770 | pci_dma_sync_single_for_device(adap->pdev, mapping, len, | |
1771 | PCI_DMA_FROMDEVICE); | |
1772 | } else if (!drop_thres) | |
1773 | goto use_orig_buf; | |
1774 | recycle: | |
1775 | recycle_rx_buf(adap, fl, fl->cidx); | |
1776 | return skb; | |
1777 | } | |
1778 | ||
1779 | if (unlikely(fl->credits < drop_thres)) | |
1780 | goto recycle; | |
1781 | ||
1782 | use_orig_buf: | |
1783 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | |
1784 | fl->buf_size, PCI_DMA_FROMDEVICE); | |
1785 | skb = sd->t.skb; | |
1786 | skb_put(skb, len); | |
1787 | __refill_fl(adap, fl); | |
1788 | return skb; | |
1789 | } | |
1790 | ||
4d22de3e DLR |
1791 | /** |
1792 | * handle_rsp_cntrl_info - handles control information in a response | |
1793 | * @qs: the queue set corresponding to the response | |
1794 | * @flags: the response control flags | |
4d22de3e DLR |
1795 | * |
1796 | * Handles the control information of an SGE response, such as GTS | |
1797 | * indications and completion credits for the queue set's Tx queues. | |
6195c71d | 1798 | * HW coalesces credits, we don't do any extra SW coalescing. |
4d22de3e | 1799 | */ |
6195c71d | 1800 | static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) |
4d22de3e DLR |
1801 | { |
1802 | unsigned int credits; | |
1803 | ||
1804 | #if USE_GTS | |
1805 | if (flags & F_RSPD_TXQ0_GTS) | |
1806 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); | |
1807 | #endif | |
1808 | ||
4d22de3e DLR |
1809 | credits = G_RSPD_TXQ0_CR(flags); |
1810 | if (credits) | |
1811 | qs->txq[TXQ_ETH].processed += credits; | |
1812 | ||
6195c71d DLR |
1813 | credits = G_RSPD_TXQ2_CR(flags); |
1814 | if (credits) | |
1815 | qs->txq[TXQ_CTRL].processed += credits; | |
1816 | ||
4d22de3e DLR |
1817 | # if USE_GTS |
1818 | if (flags & F_RSPD_TXQ1_GTS) | |
1819 | clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); | |
1820 | # endif | |
6195c71d DLR |
1821 | credits = G_RSPD_TXQ1_CR(flags); |
1822 | if (credits) | |
1823 | qs->txq[TXQ_OFLD].processed += credits; | |
4d22de3e DLR |
1824 | } |
1825 | ||
1826 | /** | |
1827 | * check_ring_db - check if we need to ring any doorbells | |
1828 | * @adapter: the adapter | |
1829 | * @qs: the queue set whose Tx queues are to be examined | |
1830 | * @sleeping: indicates which Tx queue sent GTS | |
1831 | * | |
1832 | * Checks if some of a queue set's Tx queues need to ring their doorbells | |
1833 | * to resume transmission after idling while they still have unprocessed | |
1834 | * descriptors. | |
1835 | */ | |
1836 | static void check_ring_db(struct adapter *adap, struct sge_qset *qs, | |
1837 | unsigned int sleeping) | |
1838 | { | |
1839 | if (sleeping & F_RSPD_TXQ0_GTS) { | |
1840 | struct sge_txq *txq = &qs->txq[TXQ_ETH]; | |
1841 | ||
1842 | if (txq->cleaned + txq->in_use != txq->processed && | |
1843 | !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { | |
1844 | set_bit(TXQ_RUNNING, &txq->flags); | |
1845 | t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | | |
1846 | V_EGRCNTX(txq->cntxt_id)); | |
1847 | } | |
1848 | } | |
1849 | ||
1850 | if (sleeping & F_RSPD_TXQ1_GTS) { | |
1851 | struct sge_txq *txq = &qs->txq[TXQ_OFLD]; | |
1852 | ||
1853 | if (txq->cleaned + txq->in_use != txq->processed && | |
1854 | !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { | |
1855 | set_bit(TXQ_RUNNING, &txq->flags); | |
1856 | t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | | |
1857 | V_EGRCNTX(txq->cntxt_id)); | |
1858 | } | |
1859 | } | |
1860 | } | |
1861 | ||
1862 | /** | |
1863 | * is_new_response - check if a response is newly written | |
1864 | * @r: the response descriptor | |
1865 | * @q: the response queue | |
1866 | * | |
1867 | * Returns true if a response descriptor contains a yet unprocessed | |
1868 | * response. | |
1869 | */ | |
1870 | static inline int is_new_response(const struct rsp_desc *r, | |
1871 | const struct sge_rspq *q) | |
1872 | { | |
1873 | return (r->intr_gen & F_RSPD_GEN2) == q->gen; | |
1874 | } | |
1875 | ||
1876 | #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) | |
1877 | #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ | |
1878 | V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ | |
1879 | V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ | |
1880 | V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) | |
1881 | ||
1882 | /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ | |
1883 | #define NOMEM_INTR_DELAY 2500 | |
1884 | ||
1885 | /** | |
1886 | * process_responses - process responses from an SGE response queue | |
1887 | * @adap: the adapter | |
1888 | * @qs: the queue set to which the response queue belongs | |
1889 | * @budget: how many responses can be processed in this round | |
1890 | * | |
1891 | * Process responses from an SGE response queue up to the supplied budget. | |
1892 | * Responses include received packets as well as credits and other events | |
1893 | * for the queues that belong to the response queue's queue set. | |
1894 | * A negative budget is effectively unlimited. | |
1895 | * | |
1896 | * Additionally choose the interrupt holdoff time for the next interrupt | |
1897 | * on this queue. If the system is under memory shortage use a fairly | |
1898 | * long delay to help recovery. | |
1899 | */ | |
1900 | static int process_responses(struct adapter *adap, struct sge_qset *qs, | |
1901 | int budget) | |
1902 | { | |
1903 | struct sge_rspq *q = &qs->rspq; | |
1904 | struct rsp_desc *r = &q->desc[q->cidx]; | |
1905 | int budget_left = budget; | |
6195c71d | 1906 | unsigned int sleeping = 0; |
4d22de3e DLR |
1907 | struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; |
1908 | int ngathered = 0; | |
1909 | ||
1910 | q->next_holdoff = q->holdoff_tmr; | |
1911 | ||
1912 | while (likely(budget_left && is_new_response(r, q))) { | |
e0994eb1 | 1913 | int eth, ethpad = 2; |
4d22de3e DLR |
1914 | struct sk_buff *skb = NULL; |
1915 | u32 len, flags = ntohl(r->flags); | |
1916 | u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val; | |
1917 | ||
1918 | eth = r->rss_hdr.opcode == CPL_RX_PKT; | |
1919 | ||
1920 | if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { | |
1921 | skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); | |
1922 | if (!skb) | |
1923 | goto no_mem; | |
1924 | ||
1925 | memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); | |
1926 | skb->data[0] = CPL_ASYNC_NOTIF; | |
1927 | rss_hi = htonl(CPL_ASYNC_NOTIF << 24); | |
1928 | q->async_notif++; | |
1929 | } else if (flags & F_RSPD_IMM_DATA_VALID) { | |
1930 | skb = get_imm_packet(r); | |
1931 | if (unlikely(!skb)) { | |
1932 | no_mem: | |
1933 | q->next_holdoff = NOMEM_INTR_DELAY; | |
1934 | q->nomem++; | |
1935 | /* consume one credit since we tried */ | |
1936 | budget_left--; | |
1937 | break; | |
1938 | } | |
1939 | q->imm_data++; | |
e0994eb1 | 1940 | ethpad = 0; |
4d22de3e | 1941 | } else if ((len = ntohl(r->len_cq)) != 0) { |
e0994eb1 DLR |
1942 | struct sge_fl *fl = |
1943 | (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; | |
1944 | ||
1945 | if (fl->buf_size == RX_PAGE_SIZE) { | |
1946 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | |
1947 | struct sge_fl_page *p = &sd->t.page; | |
1948 | ||
1949 | prefetch(p->va); | |
1950 | prefetch(p->va + L1_CACHE_BYTES); | |
1951 | ||
1952 | __refill_fl(adap, fl); | |
1953 | ||
1954 | pci_unmap_single(adap->pdev, | |
1955 | pci_unmap_addr(sd, dma_addr), | |
1956 | fl->buf_size, | |
1957 | PCI_DMA_FROMDEVICE); | |
1958 | ||
1959 | if (eth) { | |
1960 | if (unlikely(fl->credits < | |
1961 | SGE_RX_DROP_THRES)) | |
1962 | goto eth_recycle; | |
1963 | ||
1964 | skb = alloc_skb(SKB_DATA_SIZE, | |
1965 | GFP_ATOMIC); | |
1966 | if (unlikely(!skb)) { | |
1967 | eth_recycle: | |
1968 | q->rx_drops++; | |
1969 | recycle_rx_buf(adap, fl, | |
1970 | fl->cidx); | |
1971 | goto eth_done; | |
1972 | } | |
1973 | } else { | |
1974 | skb = alloc_skb(SKB_DATA_SIZE, | |
1975 | GFP_ATOMIC); | |
1976 | if (unlikely(!skb)) | |
1977 | goto no_mem; | |
1978 | } | |
1979 | ||
1980 | skb_data_init(skb, p, G_RSPD_LEN(len)); | |
1981 | eth_done: | |
1982 | fl->credits--; | |
1983 | q->eth_pkts++; | |
1984 | } else { | |
1985 | fl->credits--; | |
1986 | skb = get_packet(adap, fl, G_RSPD_LEN(len), | |
1987 | eth ? SGE_RX_DROP_THRES : 0); | |
1988 | } | |
4d22de3e | 1989 | |
4d22de3e DLR |
1990 | if (++fl->cidx == fl->size) |
1991 | fl->cidx = 0; | |
1992 | } else | |
1993 | q->pure_rsps++; | |
1994 | ||
1995 | if (flags & RSPD_CTRL_MASK) { | |
1996 | sleeping |= flags & RSPD_GTS_MASK; | |
6195c71d | 1997 | handle_rsp_cntrl_info(qs, flags); |
4d22de3e DLR |
1998 | } |
1999 | ||
2000 | r++; | |
2001 | if (unlikely(++q->cidx == q->size)) { | |
2002 | q->cidx = 0; | |
2003 | q->gen ^= 1; | |
2004 | r = q->desc; | |
2005 | } | |
2006 | prefetch(r); | |
2007 | ||
2008 | if (++q->credits >= (q->size / 4)) { | |
2009 | refill_rspq(adap, q, q->credits); | |
2010 | q->credits = 0; | |
2011 | } | |
2012 | ||
e0994eb1 DLR |
2013 | if (skb) { |
2014 | /* Preserve the RSS info in csum & priority */ | |
2015 | skb->csum = rss_hi; | |
2016 | skb->priority = rss_lo; | |
2017 | ||
4d22de3e DLR |
2018 | if (eth) |
2019 | rx_eth(adap, q, skb, ethpad); | |
2020 | else { | |
e0994eb1 DLR |
2021 | if (unlikely(r->rss_hdr.opcode == |
2022 | CPL_TRACE_PKT)) | |
2023 | __skb_pull(skb, ethpad); | |
2024 | ||
2025 | ngathered = rx_offload(&adap->tdev, q, | |
2026 | skb, offload_skbs, | |
2027 | ngathered); | |
4d22de3e DLR |
2028 | } |
2029 | } | |
4d22de3e DLR |
2030 | --budget_left; |
2031 | } | |
2032 | ||
4d22de3e DLR |
2033 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); |
2034 | if (sleeping) | |
2035 | check_ring_db(adap, qs, sleeping); | |
2036 | ||
2037 | smp_mb(); /* commit Tx queue .processed updates */ | |
2038 | if (unlikely(qs->txq_stopped != 0)) | |
2039 | restart_tx(qs); | |
2040 | ||
2041 | budget -= budget_left; | |
2042 | return budget; | |
2043 | } | |
2044 | ||
2045 | static inline int is_pure_response(const struct rsp_desc *r) | |
2046 | { | |
2047 | u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); | |
2048 | ||
2049 | return (n | r->len_cq) == 0; | |
2050 | } | |
2051 | ||
2052 | /** | |
2053 | * napi_rx_handler - the NAPI handler for Rx processing | |
2054 | * @dev: the net device | |
2055 | * @budget: how many packets we can process in this round | |
2056 | * | |
2057 | * Handler for new data events when using NAPI. | |
2058 | */ | |
2059 | static int napi_rx_handler(struct net_device *dev, int *budget) | |
2060 | { | |
2061 | struct adapter *adap = dev->priv; | |
2062 | struct sge_qset *qs = dev2qset(dev); | |
2063 | int effective_budget = min(*budget, dev->quota); | |
2064 | ||
2065 | int work_done = process_responses(adap, qs, effective_budget); | |
2066 | *budget -= work_done; | |
2067 | dev->quota -= work_done; | |
2068 | ||
2069 | if (work_done >= effective_budget) | |
2070 | return 1; | |
2071 | ||
2072 | netif_rx_complete(dev); | |
2073 | ||
2074 | /* | |
2075 | * Because we don't atomically flush the following write it is | |
2076 | * possible that in very rare cases it can reach the device in a way | |
2077 | * that races with a new response being written plus an error interrupt | |
2078 | * causing the NAPI interrupt handler below to return unhandled status | |
2079 | * to the OS. To protect against this would require flushing the write | |
2080 | * and doing both the write and the flush with interrupts off. Way too | |
2081 | * expensive and unjustifiable given the rarity of the race. | |
2082 | * | |
2083 | * The race cannot happen at all with MSI-X. | |
2084 | */ | |
2085 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | | |
2086 | V_NEWTIMER(qs->rspq.next_holdoff) | | |
2087 | V_NEWINDEX(qs->rspq.cidx)); | |
2088 | return 0; | |
2089 | } | |
2090 | ||
2091 | /* | |
2092 | * Returns true if the device is already scheduled for polling. | |
2093 | */ | |
2094 | static inline int napi_is_scheduled(struct net_device *dev) | |
2095 | { | |
2096 | return test_bit(__LINK_STATE_RX_SCHED, &dev->state); | |
2097 | } | |
2098 | ||
2099 | /** | |
2100 | * process_pure_responses - process pure responses from a response queue | |
2101 | * @adap: the adapter | |
2102 | * @qs: the queue set owning the response queue | |
2103 | * @r: the first pure response to process | |
2104 | * | |
2105 | * A simpler version of process_responses() that handles only pure (i.e., | |
2106 | * non data-carrying) responses. Such respones are too light-weight to | |
2107 | * justify calling a softirq under NAPI, so we handle them specially in | |
2108 | * the interrupt handler. The function is called with a pointer to a | |
2109 | * response, which the caller must ensure is a valid pure response. | |
2110 | * | |
2111 | * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. | |
2112 | */ | |
2113 | static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, | |
2114 | struct rsp_desc *r) | |
2115 | { | |
2116 | struct sge_rspq *q = &qs->rspq; | |
6195c71d | 2117 | unsigned int sleeping = 0; |
4d22de3e DLR |
2118 | |
2119 | do { | |
2120 | u32 flags = ntohl(r->flags); | |
2121 | ||
2122 | r++; | |
2123 | if (unlikely(++q->cidx == q->size)) { | |
2124 | q->cidx = 0; | |
2125 | q->gen ^= 1; | |
2126 | r = q->desc; | |
2127 | } | |
2128 | prefetch(r); | |
2129 | ||
2130 | if (flags & RSPD_CTRL_MASK) { | |
2131 | sleeping |= flags & RSPD_GTS_MASK; | |
6195c71d | 2132 | handle_rsp_cntrl_info(qs, flags); |
4d22de3e DLR |
2133 | } |
2134 | ||
2135 | q->pure_rsps++; | |
2136 | if (++q->credits >= (q->size / 4)) { | |
2137 | refill_rspq(adap, q, q->credits); | |
2138 | q->credits = 0; | |
2139 | } | |
2140 | } while (is_new_response(r, q) && is_pure_response(r)); | |
2141 | ||
4d22de3e DLR |
2142 | if (sleeping) |
2143 | check_ring_db(adap, qs, sleeping); | |
2144 | ||
2145 | smp_mb(); /* commit Tx queue .processed updates */ | |
2146 | if (unlikely(qs->txq_stopped != 0)) | |
2147 | restart_tx(qs); | |
2148 | ||
2149 | return is_new_response(r, q); | |
2150 | } | |
2151 | ||
2152 | /** | |
2153 | * handle_responses - decide what to do with new responses in NAPI mode | |
2154 | * @adap: the adapter | |
2155 | * @q: the response queue | |
2156 | * | |
2157 | * This is used by the NAPI interrupt handlers to decide what to do with | |
2158 | * new SGE responses. If there are no new responses it returns -1. If | |
2159 | * there are new responses and they are pure (i.e., non-data carrying) | |
2160 | * it handles them straight in hard interrupt context as they are very | |
2161 | * cheap and don't deliver any packets. Finally, if there are any data | |
2162 | * signaling responses it schedules the NAPI handler. Returns 1 if it | |
2163 | * schedules NAPI, 0 if all new responses were pure. | |
2164 | * | |
2165 | * The caller must ascertain NAPI is not already running. | |
2166 | */ | |
2167 | static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) | |
2168 | { | |
2169 | struct sge_qset *qs = rspq_to_qset(q); | |
2170 | struct rsp_desc *r = &q->desc[q->cidx]; | |
2171 | ||
2172 | if (!is_new_response(r, q)) | |
2173 | return -1; | |
2174 | if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { | |
2175 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | | |
2176 | V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); | |
2177 | return 0; | |
2178 | } | |
2179 | if (likely(__netif_rx_schedule_prep(qs->netdev))) | |
2180 | __netif_rx_schedule(qs->netdev); | |
2181 | return 1; | |
2182 | } | |
2183 | ||
2184 | /* | |
2185 | * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case | |
2186 | * (i.e., response queue serviced in hard interrupt). | |
2187 | */ | |
2188 | irqreturn_t t3_sge_intr_msix(int irq, void *cookie) | |
2189 | { | |
2190 | struct sge_qset *qs = cookie; | |
2191 | struct adapter *adap = qs->netdev->priv; | |
2192 | struct sge_rspq *q = &qs->rspq; | |
2193 | ||
2194 | spin_lock(&q->lock); | |
2195 | if (process_responses(adap, qs, -1) == 0) | |
2196 | q->unhandled_irqs++; | |
2197 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | | |
2198 | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); | |
2199 | spin_unlock(&q->lock); | |
2200 | return IRQ_HANDLED; | |
2201 | } | |
2202 | ||
2203 | /* | |
2204 | * The MSI-X interrupt handler for an SGE response queue for the NAPI case | |
2205 | * (i.e., response queue serviced by NAPI polling). | |
2206 | */ | |
2207 | irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) | |
2208 | { | |
2209 | struct sge_qset *qs = cookie; | |
2210 | struct adapter *adap = qs->netdev->priv; | |
2211 | struct sge_rspq *q = &qs->rspq; | |
2212 | ||
2213 | spin_lock(&q->lock); | |
2214 | BUG_ON(napi_is_scheduled(qs->netdev)); | |
2215 | ||
2216 | if (handle_responses(adap, q) < 0) | |
2217 | q->unhandled_irqs++; | |
2218 | spin_unlock(&q->lock); | |
2219 | return IRQ_HANDLED; | |
2220 | } | |
2221 | ||
2222 | /* | |
2223 | * The non-NAPI MSI interrupt handler. This needs to handle data events from | |
2224 | * SGE response queues as well as error and other async events as they all use | |
2225 | * the same MSI vector. We use one SGE response queue per port in this mode | |
2226 | * and protect all response queues with queue 0's lock. | |
2227 | */ | |
2228 | static irqreturn_t t3_intr_msi(int irq, void *cookie) | |
2229 | { | |
2230 | int new_packets = 0; | |
2231 | struct adapter *adap = cookie; | |
2232 | struct sge_rspq *q = &adap->sge.qs[0].rspq; | |
2233 | ||
2234 | spin_lock(&q->lock); | |
2235 | ||
2236 | if (process_responses(adap, &adap->sge.qs[0], -1)) { | |
2237 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | | |
2238 | V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); | |
2239 | new_packets = 1; | |
2240 | } | |
2241 | ||
2242 | if (adap->params.nports == 2 && | |
2243 | process_responses(adap, &adap->sge.qs[1], -1)) { | |
2244 | struct sge_rspq *q1 = &adap->sge.qs[1].rspq; | |
2245 | ||
2246 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | | |
2247 | V_NEWTIMER(q1->next_holdoff) | | |
2248 | V_NEWINDEX(q1->cidx)); | |
2249 | new_packets = 1; | |
2250 | } | |
2251 | ||
2252 | if (!new_packets && t3_slow_intr_handler(adap) == 0) | |
2253 | q->unhandled_irqs++; | |
2254 | ||
2255 | spin_unlock(&q->lock); | |
2256 | return IRQ_HANDLED; | |
2257 | } | |
2258 | ||
2259 | static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q) | |
2260 | { | |
2261 | if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) { | |
2262 | if (likely(__netif_rx_schedule_prep(dev))) | |
2263 | __netif_rx_schedule(dev); | |
2264 | return 1; | |
2265 | } | |
2266 | return 0; | |
2267 | } | |
2268 | ||
2269 | /* | |
2270 | * The MSI interrupt handler for the NAPI case (i.e., response queues serviced | |
2271 | * by NAPI polling). Handles data events from SGE response queues as well as | |
2272 | * error and other async events as they all use the same MSI vector. We use | |
2273 | * one SGE response queue per port in this mode and protect all response | |
2274 | * queues with queue 0's lock. | |
2275 | */ | |
2276 | irqreturn_t t3_intr_msi_napi(int irq, void *cookie) | |
2277 | { | |
2278 | int new_packets; | |
2279 | struct adapter *adap = cookie; | |
2280 | struct sge_rspq *q = &adap->sge.qs[0].rspq; | |
2281 | ||
2282 | spin_lock(&q->lock); | |
2283 | ||
2284 | new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q); | |
2285 | if (adap->params.nports == 2) | |
2286 | new_packets += rspq_check_napi(adap->sge.qs[1].netdev, | |
2287 | &adap->sge.qs[1].rspq); | |
2288 | if (!new_packets && t3_slow_intr_handler(adap) == 0) | |
2289 | q->unhandled_irqs++; | |
2290 | ||
2291 | spin_unlock(&q->lock); | |
2292 | return IRQ_HANDLED; | |
2293 | } | |
2294 | ||
2295 | /* | |
2296 | * A helper function that processes responses and issues GTS. | |
2297 | */ | |
2298 | static inline int process_responses_gts(struct adapter *adap, | |
2299 | struct sge_rspq *rq) | |
2300 | { | |
2301 | int work; | |
2302 | ||
2303 | work = process_responses(adap, rspq_to_qset(rq), -1); | |
2304 | t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | | |
2305 | V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); | |
2306 | return work; | |
2307 | } | |
2308 | ||
2309 | /* | |
2310 | * The legacy INTx interrupt handler. This needs to handle data events from | |
2311 | * SGE response queues as well as error and other async events as they all use | |
2312 | * the same interrupt pin. We use one SGE response queue per port in this mode | |
2313 | * and protect all response queues with queue 0's lock. | |
2314 | */ | |
2315 | static irqreturn_t t3_intr(int irq, void *cookie) | |
2316 | { | |
2317 | int work_done, w0, w1; | |
2318 | struct adapter *adap = cookie; | |
2319 | struct sge_rspq *q0 = &adap->sge.qs[0].rspq; | |
2320 | struct sge_rspq *q1 = &adap->sge.qs[1].rspq; | |
2321 | ||
2322 | spin_lock(&q0->lock); | |
2323 | ||
2324 | w0 = is_new_response(&q0->desc[q0->cidx], q0); | |
2325 | w1 = adap->params.nports == 2 && | |
2326 | is_new_response(&q1->desc[q1->cidx], q1); | |
2327 | ||
2328 | if (likely(w0 | w1)) { | |
2329 | t3_write_reg(adap, A_PL_CLI, 0); | |
2330 | t3_read_reg(adap, A_PL_CLI); /* flush */ | |
2331 | ||
2332 | if (likely(w0)) | |
2333 | process_responses_gts(adap, q0); | |
2334 | ||
2335 | if (w1) | |
2336 | process_responses_gts(adap, q1); | |
2337 | ||
2338 | work_done = w0 | w1; | |
2339 | } else | |
2340 | work_done = t3_slow_intr_handler(adap); | |
2341 | ||
2342 | spin_unlock(&q0->lock); | |
2343 | return IRQ_RETVAL(work_done != 0); | |
2344 | } | |
2345 | ||
2346 | /* | |
2347 | * Interrupt handler for legacy INTx interrupts for T3B-based cards. | |
2348 | * Handles data events from SGE response queues as well as error and other | |
2349 | * async events as they all use the same interrupt pin. We use one SGE | |
2350 | * response queue per port in this mode and protect all response queues with | |
2351 | * queue 0's lock. | |
2352 | */ | |
2353 | static irqreturn_t t3b_intr(int irq, void *cookie) | |
2354 | { | |
2355 | u32 map; | |
2356 | struct adapter *adap = cookie; | |
2357 | struct sge_rspq *q0 = &adap->sge.qs[0].rspq; | |
2358 | ||
2359 | t3_write_reg(adap, A_PL_CLI, 0); | |
2360 | map = t3_read_reg(adap, A_SG_DATA_INTR); | |
2361 | ||
2362 | if (unlikely(!map)) /* shared interrupt, most likely */ | |
2363 | return IRQ_NONE; | |
2364 | ||
2365 | spin_lock(&q0->lock); | |
2366 | ||
2367 | if (unlikely(map & F_ERRINTR)) | |
2368 | t3_slow_intr_handler(adap); | |
2369 | ||
2370 | if (likely(map & 1)) | |
2371 | process_responses_gts(adap, q0); | |
2372 | ||
2373 | if (map & 2) | |
2374 | process_responses_gts(adap, &adap->sge.qs[1].rspq); | |
2375 | ||
2376 | spin_unlock(&q0->lock); | |
2377 | return IRQ_HANDLED; | |
2378 | } | |
2379 | ||
2380 | /* | |
2381 | * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. | |
2382 | * Handles data events from SGE response queues as well as error and other | |
2383 | * async events as they all use the same interrupt pin. We use one SGE | |
2384 | * response queue per port in this mode and protect all response queues with | |
2385 | * queue 0's lock. | |
2386 | */ | |
2387 | static irqreturn_t t3b_intr_napi(int irq, void *cookie) | |
2388 | { | |
2389 | u32 map; | |
2390 | struct net_device *dev; | |
2391 | struct adapter *adap = cookie; | |
2392 | struct sge_rspq *q0 = &adap->sge.qs[0].rspq; | |
2393 | ||
2394 | t3_write_reg(adap, A_PL_CLI, 0); | |
2395 | map = t3_read_reg(adap, A_SG_DATA_INTR); | |
2396 | ||
2397 | if (unlikely(!map)) /* shared interrupt, most likely */ | |
2398 | return IRQ_NONE; | |
2399 | ||
2400 | spin_lock(&q0->lock); | |
2401 | ||
2402 | if (unlikely(map & F_ERRINTR)) | |
2403 | t3_slow_intr_handler(adap); | |
2404 | ||
2405 | if (likely(map & 1)) { | |
2406 | dev = adap->sge.qs[0].netdev; | |
2407 | ||
4d22de3e DLR |
2408 | if (likely(__netif_rx_schedule_prep(dev))) |
2409 | __netif_rx_schedule(dev); | |
2410 | } | |
2411 | if (map & 2) { | |
2412 | dev = adap->sge.qs[1].netdev; | |
2413 | ||
4d22de3e DLR |
2414 | if (likely(__netif_rx_schedule_prep(dev))) |
2415 | __netif_rx_schedule(dev); | |
2416 | } | |
2417 | ||
2418 | spin_unlock(&q0->lock); | |
2419 | return IRQ_HANDLED; | |
2420 | } | |
2421 | ||
2422 | /** | |
2423 | * t3_intr_handler - select the top-level interrupt handler | |
2424 | * @adap: the adapter | |
2425 | * @polling: whether using NAPI to service response queues | |
2426 | * | |
2427 | * Selects the top-level interrupt handler based on the type of interrupts | |
2428 | * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the | |
2429 | * response queues. | |
2430 | */ | |
2431 | intr_handler_t t3_intr_handler(struct adapter *adap, int polling) | |
2432 | { | |
2433 | if (adap->flags & USING_MSIX) | |
2434 | return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; | |
2435 | if (adap->flags & USING_MSI) | |
2436 | return polling ? t3_intr_msi_napi : t3_intr_msi; | |
2437 | if (adap->params.rev > 0) | |
2438 | return polling ? t3b_intr_napi : t3b_intr; | |
2439 | return t3_intr; | |
2440 | } | |
2441 | ||
2442 | /** | |
2443 | * t3_sge_err_intr_handler - SGE async event interrupt handler | |
2444 | * @adapter: the adapter | |
2445 | * | |
2446 | * Interrupt handler for SGE asynchronous (non-data) events. | |
2447 | */ | |
2448 | void t3_sge_err_intr_handler(struct adapter *adapter) | |
2449 | { | |
2450 | unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE); | |
2451 | ||
2452 | if (status & F_RSPQCREDITOVERFOW) | |
2453 | CH_ALERT(adapter, "SGE response queue credit overflow\n"); | |
2454 | ||
2455 | if (status & F_RSPQDISABLED) { | |
2456 | v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); | |
2457 | ||
2458 | CH_ALERT(adapter, | |
2459 | "packet delivered to disabled response queue " | |
2460 | "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); | |
2461 | } | |
2462 | ||
2463 | t3_write_reg(adapter, A_SG_INT_CAUSE, status); | |
2464 | if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED)) | |
2465 | t3_fatal_err(adapter); | |
2466 | } | |
2467 | ||
2468 | /** | |
2469 | * sge_timer_cb - perform periodic maintenance of an SGE qset | |
2470 | * @data: the SGE queue set to maintain | |
2471 | * | |
2472 | * Runs periodically from a timer to perform maintenance of an SGE queue | |
2473 | * set. It performs two tasks: | |
2474 | * | |
2475 | * a) Cleans up any completed Tx descriptors that may still be pending. | |
2476 | * Normal descriptor cleanup happens when new packets are added to a Tx | |
2477 | * queue so this timer is relatively infrequent and does any cleanup only | |
2478 | * if the Tx queue has not seen any new packets in a while. We make a | |
2479 | * best effort attempt to reclaim descriptors, in that we don't wait | |
2480 | * around if we cannot get a queue's lock (which most likely is because | |
2481 | * someone else is queueing new packets and so will also handle the clean | |
2482 | * up). Since control queues use immediate data exclusively we don't | |
2483 | * bother cleaning them up here. | |
2484 | * | |
2485 | * b) Replenishes Rx queues that have run out due to memory shortage. | |
2486 | * Normally new Rx buffers are added when existing ones are consumed but | |
2487 | * when out of memory a queue can become empty. We try to add only a few | |
2488 | * buffers here, the queue will be replenished fully as these new buffers | |
2489 | * are used up if memory shortage has subsided. | |
2490 | */ | |
2491 | static void sge_timer_cb(unsigned long data) | |
2492 | { | |
2493 | spinlock_t *lock; | |
2494 | struct sge_qset *qs = (struct sge_qset *)data; | |
2495 | struct adapter *adap = qs->netdev->priv; | |
2496 | ||
2497 | if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { | |
2498 | reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]); | |
2499 | spin_unlock(&qs->txq[TXQ_ETH].lock); | |
2500 | } | |
2501 | if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { | |
2502 | reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]); | |
2503 | spin_unlock(&qs->txq[TXQ_OFLD].lock); | |
2504 | } | |
2505 | lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : | |
e0994eb1 | 2506 | &adap->sge.qs[0].rspq.lock; |
4d22de3e DLR |
2507 | if (spin_trylock_irq(lock)) { |
2508 | if (!napi_is_scheduled(qs->netdev)) { | |
bae73f44 DLR |
2509 | u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); |
2510 | ||
4d22de3e DLR |
2511 | if (qs->fl[0].credits < qs->fl[0].size) |
2512 | __refill_fl(adap, &qs->fl[0]); | |
2513 | if (qs->fl[1].credits < qs->fl[1].size) | |
2514 | __refill_fl(adap, &qs->fl[1]); | |
bae73f44 DLR |
2515 | |
2516 | if (status & (1 << qs->rspq.cntxt_id)) { | |
2517 | qs->rspq.starved++; | |
2518 | if (qs->rspq.credits) { | |
2519 | refill_rspq(adap, &qs->rspq, 1); | |
2520 | qs->rspq.credits--; | |
2521 | qs->rspq.restarted++; | |
e0994eb1 | 2522 | t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, |
bae73f44 DLR |
2523 | 1 << qs->rspq.cntxt_id); |
2524 | } | |
2525 | } | |
4d22de3e DLR |
2526 | } |
2527 | spin_unlock_irq(lock); | |
2528 | } | |
2529 | mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | |
2530 | } | |
2531 | ||
2532 | /** | |
2533 | * t3_update_qset_coalesce - update coalescing settings for a queue set | |
2534 | * @qs: the SGE queue set | |
2535 | * @p: new queue set parameters | |
2536 | * | |
2537 | * Update the coalescing settings for an SGE queue set. Nothing is done | |
2538 | * if the queue set is not initialized yet. | |
2539 | */ | |
2540 | void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) | |
2541 | { | |
2542 | if (!qs->netdev) | |
2543 | return; | |
2544 | ||
2545 | qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ | |
2546 | qs->rspq.polling = p->polling; | |
2547 | qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll; | |
2548 | } | |
2549 | ||
2550 | /** | |
2551 | * t3_sge_alloc_qset - initialize an SGE queue set | |
2552 | * @adapter: the adapter | |
2553 | * @id: the queue set id | |
2554 | * @nports: how many Ethernet ports will be using this queue set | |
2555 | * @irq_vec_idx: the IRQ vector index for response queue interrupts | |
2556 | * @p: configuration parameters for this queue set | |
2557 | * @ntxq: number of Tx queues for the queue set | |
2558 | * @netdev: net device associated with this queue set | |
2559 | * | |
2560 | * Allocate resources and initialize an SGE queue set. A queue set | |
2561 | * comprises a response queue, two Rx free-buffer queues, and up to 3 | |
2562 | * Tx queues. The Tx queues are assigned roles in the order Ethernet | |
2563 | * queue, offload queue, and control queue. | |
2564 | */ | |
2565 | int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |
2566 | int irq_vec_idx, const struct qset_params *p, | |
2567 | int ntxq, struct net_device *netdev) | |
2568 | { | |
2569 | int i, ret = -ENOMEM; | |
2570 | struct sge_qset *q = &adapter->sge.qs[id]; | |
2571 | ||
2572 | init_qset_cntxt(q, id); | |
2573 | init_timer(&q->tx_reclaim_timer); | |
2574 | q->tx_reclaim_timer.data = (unsigned long)q; | |
2575 | q->tx_reclaim_timer.function = sge_timer_cb; | |
2576 | ||
2577 | q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, | |
2578 | sizeof(struct rx_desc), | |
2579 | sizeof(struct rx_sw_desc), | |
2580 | &q->fl[0].phys_addr, &q->fl[0].sdesc); | |
2581 | if (!q->fl[0].desc) | |
2582 | goto err; | |
2583 | ||
2584 | q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, | |
2585 | sizeof(struct rx_desc), | |
2586 | sizeof(struct rx_sw_desc), | |
2587 | &q->fl[1].phys_addr, &q->fl[1].sdesc); | |
2588 | if (!q->fl[1].desc) | |
2589 | goto err; | |
2590 | ||
2591 | q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, | |
2592 | sizeof(struct rsp_desc), 0, | |
2593 | &q->rspq.phys_addr, NULL); | |
2594 | if (!q->rspq.desc) | |
2595 | goto err; | |
2596 | ||
2597 | for (i = 0; i < ntxq; ++i) { | |
2598 | /* | |
2599 | * The control queue always uses immediate data so does not | |
2600 | * need to keep track of any sk_buffs. | |
2601 | */ | |
2602 | size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); | |
2603 | ||
2604 | q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], | |
2605 | sizeof(struct tx_desc), sz, | |
2606 | &q->txq[i].phys_addr, | |
2607 | &q->txq[i].sdesc); | |
2608 | if (!q->txq[i].desc) | |
2609 | goto err; | |
2610 | ||
2611 | q->txq[i].gen = 1; | |
2612 | q->txq[i].size = p->txq_size[i]; | |
2613 | spin_lock_init(&q->txq[i].lock); | |
2614 | skb_queue_head_init(&q->txq[i].sendq); | |
2615 | } | |
2616 | ||
2617 | tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, | |
2618 | (unsigned long)q); | |
2619 | tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, | |
2620 | (unsigned long)q); | |
2621 | ||
2622 | q->fl[0].gen = q->fl[1].gen = 1; | |
2623 | q->fl[0].size = p->fl_size; | |
2624 | q->fl[1].size = p->jumbo_size; | |
2625 | ||
2626 | q->rspq.gen = 1; | |
2627 | q->rspq.size = p->rspq_size; | |
2628 | spin_lock_init(&q->rspq.lock); | |
2629 | ||
2630 | q->txq[TXQ_ETH].stop_thres = nports * | |
2631 | flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); | |
2632 | ||
8ac3ba68 | 2633 | if (!is_offload(adapter)) { |
e0994eb1 DLR |
2634 | #ifdef USE_RX_PAGE |
2635 | q->fl[0].buf_size = RX_PAGE_SIZE; | |
2636 | #else | |
4d22de3e DLR |
2637 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + |
2638 | sizeof(struct cpl_rx_pkt); | |
e0994eb1 | 2639 | #endif |
4d22de3e DLR |
2640 | q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + |
2641 | sizeof(struct cpl_rx_pkt); | |
2642 | } else { | |
e0994eb1 DLR |
2643 | #ifdef USE_RX_PAGE |
2644 | q->fl[0].buf_size = RX_PAGE_SIZE; | |
2645 | #else | |
4d22de3e DLR |
2646 | q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + |
2647 | sizeof(struct cpl_rx_data); | |
e0994eb1 | 2648 | #endif |
4d22de3e DLR |
2649 | q->fl[1].buf_size = (16 * 1024) - |
2650 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
2651 | } | |
2652 | ||
2653 | spin_lock(&adapter->sge.reg_lock); | |
2654 | ||
2655 | /* FL threshold comparison uses < */ | |
2656 | ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, | |
2657 | q->rspq.phys_addr, q->rspq.size, | |
2658 | q->fl[0].buf_size, 1, 0); | |
2659 | if (ret) | |
2660 | goto err_unlock; | |
2661 | ||
2662 | for (i = 0; i < SGE_RXQ_PER_SET; ++i) { | |
2663 | ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, | |
2664 | q->fl[i].phys_addr, q->fl[i].size, | |
2665 | q->fl[i].buf_size, p->cong_thres, 1, | |
2666 | 0); | |
2667 | if (ret) | |
2668 | goto err_unlock; | |
2669 | } | |
2670 | ||
2671 | ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, | |
2672 | SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, | |
2673 | q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, | |
2674 | 1, 0); | |
2675 | if (ret) | |
2676 | goto err_unlock; | |
2677 | ||
2678 | if (ntxq > 1) { | |
2679 | ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, | |
2680 | USE_GTS, SGE_CNTXT_OFLD, id, | |
2681 | q->txq[TXQ_OFLD].phys_addr, | |
2682 | q->txq[TXQ_OFLD].size, 0, 1, 0); | |
2683 | if (ret) | |
2684 | goto err_unlock; | |
2685 | } | |
2686 | ||
2687 | if (ntxq > 2) { | |
2688 | ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, | |
2689 | SGE_CNTXT_CTRL, id, | |
2690 | q->txq[TXQ_CTRL].phys_addr, | |
2691 | q->txq[TXQ_CTRL].size, | |
2692 | q->txq[TXQ_CTRL].token, 1, 0); | |
2693 | if (ret) | |
2694 | goto err_unlock; | |
2695 | } | |
2696 | ||
2697 | spin_unlock(&adapter->sge.reg_lock); | |
2698 | q->netdev = netdev; | |
2699 | t3_update_qset_coalesce(q, p); | |
2700 | ||
2701 | /* | |
2702 | * We use atalk_ptr as a backpointer to a qset. In case a device is | |
2703 | * associated with multiple queue sets only the first one sets | |
2704 | * atalk_ptr. | |
2705 | */ | |
2706 | if (netdev->atalk_ptr == NULL) | |
2707 | netdev->atalk_ptr = q; | |
2708 | ||
2709 | refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); | |
2710 | refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); | |
2711 | refill_rspq(adapter, &q->rspq, q->rspq.size - 1); | |
2712 | ||
2713 | t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | | |
2714 | V_NEWTIMER(q->rspq.holdoff_tmr)); | |
2715 | ||
2716 | mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | |
2717 | return 0; | |
2718 | ||
2719 | err_unlock: | |
2720 | spin_unlock(&adapter->sge.reg_lock); | |
2721 | err: | |
2722 | t3_free_qset(adapter, q); | |
2723 | return ret; | |
2724 | } | |
2725 | ||
2726 | /** | |
2727 | * t3_free_sge_resources - free SGE resources | |
2728 | * @adap: the adapter | |
2729 | * | |
2730 | * Frees resources used by the SGE queue sets. | |
2731 | */ | |
2732 | void t3_free_sge_resources(struct adapter *adap) | |
2733 | { | |
2734 | int i; | |
2735 | ||
2736 | for (i = 0; i < SGE_QSETS; ++i) | |
2737 | t3_free_qset(adap, &adap->sge.qs[i]); | |
2738 | } | |
2739 | ||
2740 | /** | |
2741 | * t3_sge_start - enable SGE | |
2742 | * @adap: the adapter | |
2743 | * | |
2744 | * Enables the SGE for DMAs. This is the last step in starting packet | |
2745 | * transfers. | |
2746 | */ | |
2747 | void t3_sge_start(struct adapter *adap) | |
2748 | { | |
2749 | t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); | |
2750 | } | |
2751 | ||
2752 | /** | |
2753 | * t3_sge_stop - disable SGE operation | |
2754 | * @adap: the adapter | |
2755 | * | |
2756 | * Disables the DMA engine. This can be called in emeregencies (e.g., | |
2757 | * from error interrupts) or from normal process context. In the latter | |
2758 | * case it also disables any pending queue restart tasklets. Note that | |
2759 | * if it is called in interrupt context it cannot disable the restart | |
2760 | * tasklets as it cannot wait, however the tasklets will have no effect | |
2761 | * since the doorbells are disabled and the driver will call this again | |
2762 | * later from process context, at which time the tasklets will be stopped | |
2763 | * if they are still running. | |
2764 | */ | |
2765 | void t3_sge_stop(struct adapter *adap) | |
2766 | { | |
2767 | t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); | |
2768 | if (!in_interrupt()) { | |
2769 | int i; | |
2770 | ||
2771 | for (i = 0; i < SGE_QSETS; ++i) { | |
2772 | struct sge_qset *qs = &adap->sge.qs[i]; | |
2773 | ||
2774 | tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); | |
2775 | tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); | |
2776 | } | |
2777 | } | |
2778 | } | |
2779 | ||
2780 | /** | |
2781 | * t3_sge_init - initialize SGE | |
2782 | * @adap: the adapter | |
2783 | * @p: the SGE parameters | |
2784 | * | |
2785 | * Performs SGE initialization needed every time after a chip reset. | |
2786 | * We do not initialize any of the queue sets here, instead the driver | |
2787 | * top-level must request those individually. We also do not enable DMA | |
2788 | * here, that should be done after the queues have been set up. | |
2789 | */ | |
2790 | void t3_sge_init(struct adapter *adap, struct sge_params *p) | |
2791 | { | |
2792 | unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); | |
2793 | ||
2794 | ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | | |
2795 | F_CQCRDTCTRL | | |
2796 | V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | | |
2797 | V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; | |
2798 | #if SGE_NUM_GENBITS == 1 | |
2799 | ctrl |= F_EGRGENCTRL; | |
2800 | #endif | |
2801 | if (adap->params.rev > 0) { | |
2802 | if (!(adap->flags & (USING_MSIX | USING_MSI))) | |
2803 | ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; | |
2804 | ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL; | |
2805 | } | |
2806 | t3_write_reg(adap, A_SG_CONTROL, ctrl); | |
2807 | t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | | |
2808 | V_LORCQDRBTHRSH(512)); | |
2809 | t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); | |
2810 | t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | | |
6195c71d | 2811 | V_TIMEOUT(200 * core_ticks_per_usec(adap))); |
4d22de3e DLR |
2812 | t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000); |
2813 | t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); | |
2814 | t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); | |
2815 | t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); | |
2816 | t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); | |
2817 | t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); | |
2818 | } | |
2819 | ||
2820 | /** | |
2821 | * t3_sge_prep - one-time SGE initialization | |
2822 | * @adap: the associated adapter | |
2823 | * @p: SGE parameters | |
2824 | * | |
2825 | * Performs one-time initialization of SGE SW state. Includes determining | |
2826 | * defaults for the assorted SGE parameters, which admins can change until | |
2827 | * they are used to initialize the SGE. | |
2828 | */ | |
2829 | void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p) | |
2830 | { | |
2831 | int i; | |
2832 | ||
2833 | p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - | |
2834 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
2835 | ||
2836 | for (i = 0; i < SGE_QSETS; ++i) { | |
2837 | struct qset_params *q = p->qset + i; | |
2838 | ||
2839 | q->polling = adap->params.rev > 0; | |
2840 | q->coalesce_usecs = 5; | |
2841 | q->rspq_size = 1024; | |
e0994eb1 | 2842 | q->fl_size = 1024; |
4d22de3e DLR |
2843 | q->jumbo_size = 512; |
2844 | q->txq_size[TXQ_ETH] = 1024; | |
2845 | q->txq_size[TXQ_OFLD] = 1024; | |
2846 | q->txq_size[TXQ_CTRL] = 256; | |
2847 | q->cong_thres = 0; | |
2848 | } | |
2849 | ||
2850 | spin_lock_init(&adap->sge.reg_lock); | |
2851 | } | |
2852 | ||
2853 | /** | |
2854 | * t3_get_desc - dump an SGE descriptor for debugging purposes | |
2855 | * @qs: the queue set | |
2856 | * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx) | |
2857 | * @idx: the descriptor index in the queue | |
2858 | * @data: where to dump the descriptor contents | |
2859 | * | |
2860 | * Dumps the contents of a HW descriptor of an SGE queue. Returns the | |
2861 | * size of the descriptor. | |
2862 | */ | |
2863 | int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, | |
2864 | unsigned char *data) | |
2865 | { | |
2866 | if (qnum >= 6) | |
2867 | return -EINVAL; | |
2868 | ||
2869 | if (qnum < 3) { | |
2870 | if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size) | |
2871 | return -EINVAL; | |
2872 | memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc)); | |
2873 | return sizeof(struct tx_desc); | |
2874 | } | |
2875 | ||
2876 | if (qnum == 3) { | |
2877 | if (!qs->rspq.desc || idx >= qs->rspq.size) | |
2878 | return -EINVAL; | |
2879 | memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc)); | |
2880 | return sizeof(struct rsp_desc); | |
2881 | } | |
2882 | ||
2883 | qnum -= 4; | |
2884 | if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size) | |
2885 | return -EINVAL; | |
2886 | memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc)); | |
2887 | return sizeof(struct rx_desc); | |
2888 | } |