]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/chelsio/cxgb/sge.c
networking: make skb_push & __skb_push return void pointers
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb / sge.c
1 /*****************************************************************************
2 * *
3 * File: sge.c *
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, see <http://www.gnu.org/licenses/>. *
16 * *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
20 * *
21 * http://www.chelsio.com *
22 * *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
25 * *
26 * Maintainers: maintainers@chelsio.com *
27 * *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
34 * *
35 * History: *
36 * *
37 ****************************************************************************/
38
39 #include "common.h"
40
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/pci.h>
44 #include <linux/ktime.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/if_vlan.h>
48 #include <linux/skbuff.h>
49 #include <linux/mm.h>
50 #include <linux/tcp.h>
51 #include <linux/ip.h>
52 #include <linux/in.h>
53 #include <linux/if_arp.h>
54 #include <linux/slab.h>
55 #include <linux/prefetch.h>
56
57 #include "cpl5_cmd.h"
58 #include "sge.h"
59 #include "regs.h"
60 #include "espi.h"
61
62 /* This belongs in if_ether.h */
63 #define ETH_P_CPL5 0xf
64
65 #define SGE_CMDQ_N 2
66 #define SGE_FREELQ_N 2
67 #define SGE_CMDQ0_E_N 1024
68 #define SGE_CMDQ1_E_N 128
69 #define SGE_FREEL_SIZE 4096
70 #define SGE_JUMBO_FREEL_SIZE 512
71 #define SGE_FREEL_REFILL_THRESH 16
72 #define SGE_RESPQ_E_N 1024
73 #define SGE_INTRTIMER_NRES 1000
74 #define SGE_RX_SM_BUF_SIZE 1536
75 #define SGE_TX_DESC_MAX_PLEN 16384
76
77 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
78
79 /*
80 * Period of the TX buffer reclaim timer. This timer does not need to run
81 * frequently as TX buffers are usually reclaimed by new TX packets.
82 */
83 #define TX_RECLAIM_PERIOD (HZ / 4)
84
85 #define M_CMD_LEN 0x7fffffff
86 #define V_CMD_LEN(v) (v)
87 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
88 #define V_CMD_GEN1(v) ((v) << 31)
89 #define V_CMD_GEN2(v) (v)
90 #define F_CMD_DATAVALID (1 << 1)
91 #define F_CMD_SOP (1 << 2)
92 #define V_CMD_EOP(v) ((v) << 3)
93
94 /*
95 * Command queue, receive buffer list, and response queue descriptors.
96 */
97 #if defined(__BIG_ENDIAN_BITFIELD)
98 struct cmdQ_e {
99 u32 addr_lo;
100 u32 len_gen;
101 u32 flags;
102 u32 addr_hi;
103 };
104
105 struct freelQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 gen2;
109 u32 addr_hi;
110 };
111
112 struct respQ_e {
113 u32 Qsleeping : 4;
114 u32 Cmdq1CreditReturn : 5;
115 u32 Cmdq1DmaComplete : 5;
116 u32 Cmdq0CreditReturn : 5;
117 u32 Cmdq0DmaComplete : 5;
118 u32 FreelistQid : 2;
119 u32 CreditValid : 1;
120 u32 DataValid : 1;
121 u32 Offload : 1;
122 u32 Eop : 1;
123 u32 Sop : 1;
124 u32 GenerationBit : 1;
125 u32 BufferLength;
126 };
127 #elif defined(__LITTLE_ENDIAN_BITFIELD)
128 struct cmdQ_e {
129 u32 len_gen;
130 u32 addr_lo;
131 u32 addr_hi;
132 u32 flags;
133 };
134
135 struct freelQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 gen2;
140 };
141
142 struct respQ_e {
143 u32 BufferLength;
144 u32 GenerationBit : 1;
145 u32 Sop : 1;
146 u32 Eop : 1;
147 u32 Offload : 1;
148 u32 DataValid : 1;
149 u32 CreditValid : 1;
150 u32 FreelistQid : 2;
151 u32 Cmdq0DmaComplete : 5;
152 u32 Cmdq0CreditReturn : 5;
153 u32 Cmdq1DmaComplete : 5;
154 u32 Cmdq1CreditReturn : 5;
155 u32 Qsleeping : 4;
156 } ;
157 #endif
158
159 /*
160 * SW Context Command and Freelist Queue Descriptors
161 */
162 struct cmdQ_ce {
163 struct sk_buff *skb;
164 DEFINE_DMA_UNMAP_ADDR(dma_addr);
165 DEFINE_DMA_UNMAP_LEN(dma_len);
166 };
167
168 struct freelQ_ce {
169 struct sk_buff *skb;
170 DEFINE_DMA_UNMAP_ADDR(dma_addr);
171 DEFINE_DMA_UNMAP_LEN(dma_len);
172 };
173
174 /*
175 * SW command, freelist and response rings
176 */
177 struct cmdQ {
178 unsigned long status; /* HW DMA fetch status */
179 unsigned int in_use; /* # of in-use command descriptors */
180 unsigned int size; /* # of descriptors */
181 unsigned int processed; /* total # of descs HW has processed */
182 unsigned int cleaned; /* total # of descs SW has reclaimed */
183 unsigned int stop_thres; /* SW TX queue suspend threshold */
184 u16 pidx; /* producer index (SW) */
185 u16 cidx; /* consumer index (HW) */
186 u8 genbit; /* current generation (=valid) bit */
187 u8 sop; /* is next entry start of packet? */
188 struct cmdQ_e *entries; /* HW command descriptor Q */
189 struct cmdQ_ce *centries; /* SW command context descriptor Q */
190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
192 };
193
194 struct freelQ {
195 unsigned int credits; /* # of available RX buffers */
196 unsigned int size; /* free list capacity */
197 u16 pidx; /* producer index (SW) */
198 u16 cidx; /* consumer index (HW) */
199 u16 rx_buffer_size; /* Buffer size on this free list */
200 u16 dma_offset; /* DMA offset to align IP headers */
201 u16 recycleq_idx; /* skb recycle q to use */
202 u8 genbit; /* current generation (=valid) bit */
203 struct freelQ_e *entries; /* HW freelist descriptor Q */
204 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
205 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
206 };
207
208 struct respQ {
209 unsigned int credits; /* credits to be returned to SGE */
210 unsigned int size; /* # of response Q descriptors */
211 u16 cidx; /* consumer index (SW) */
212 u8 genbit; /* current generation(=valid) bit */
213 struct respQ_e *entries; /* HW response descriptor Q */
214 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
215 };
216
217 /* Bit flags for cmdQ.status */
218 enum {
219 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
220 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
221 };
222
223 /* T204 TX SW scheduler */
224
225 /* Per T204 TX port */
226 struct sched_port {
227 unsigned int avail; /* available bits - quota */
228 unsigned int drain_bits_per_1024ns; /* drain rate */
229 unsigned int speed; /* drain rate, mbps */
230 unsigned int mtu; /* mtu size */
231 struct sk_buff_head skbq; /* pending skbs */
232 };
233
234 /* Per T204 device */
235 struct sched {
236 ktime_t last_updated; /* last time quotas were computed */
237 unsigned int max_avail; /* max bits to be sent to any port */
238 unsigned int port; /* port index (round robin ports) */
239 unsigned int num; /* num skbs in per port queues */
240 struct sched_port p[MAX_NPORTS];
241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
242 };
243 static void restart_sched(unsigned long);
244
245
246 /*
247 * Main SGE data structure
248 *
249 * Interrupts are handled by a single CPU and it is likely that on a MP system
250 * the application is migrated to another CPU. In that scenario, we try to
251 * separate the RX(in irq context) and TX state in order to decrease memory
252 * contention.
253 */
254 struct sge {
255 struct adapter *adapter; /* adapter backpointer */
256 struct net_device *netdev; /* netdevice backpointer */
257 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
258 struct respQ respQ; /* response Q */
259 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
260 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
261 unsigned int jumbo_fl; /* jumbo freelist Q index */
262 unsigned int intrtimer_nres; /* no-resource interrupt timer */
263 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
264 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
265 struct timer_list espibug_timer;
266 unsigned long espibug_timeout;
267 struct sk_buff *espibug_skb[MAX_NPORTS];
268 u32 sge_control; /* shadow value of sge control reg */
269 struct sge_intr_counts stats;
270 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
271 struct sched *tx_sched;
272 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
273 };
274
275 static const u8 ch_mac_addr[ETH_ALEN] = {
276 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
277 };
278
279 /*
280 * stop tasklet and free all pending skb's
281 */
282 static void tx_sched_stop(struct sge *sge)
283 {
284 struct sched *s = sge->tx_sched;
285 int i;
286
287 tasklet_kill(&s->sched_tsk);
288
289 for (i = 0; i < MAX_NPORTS; i++)
290 __skb_queue_purge(&s->p[s->port].skbq);
291 }
292
293 /*
294 * t1_sched_update_parms() is called when the MTU or link speed changes. It
295 * re-computes scheduler parameters to scope with the change.
296 */
297 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
298 unsigned int mtu, unsigned int speed)
299 {
300 struct sched *s = sge->tx_sched;
301 struct sched_port *p = &s->p[port];
302 unsigned int max_avail_segs;
303
304 pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed);
305 if (speed)
306 p->speed = speed;
307 if (mtu)
308 p->mtu = mtu;
309
310 if (speed || mtu) {
311 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
312 do_div(drain, (p->mtu + 50) * 1000);
313 p->drain_bits_per_1024ns = (unsigned int) drain;
314
315 if (p->speed < 1000)
316 p->drain_bits_per_1024ns =
317 90 * p->drain_bits_per_1024ns / 100;
318 }
319
320 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
321 p->drain_bits_per_1024ns -= 16;
322 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
323 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
324 } else {
325 s->max_avail = 16384;
326 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
327 }
328
329 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
330 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
331 p->speed, s->max_avail, max_avail_segs,
332 p->drain_bits_per_1024ns);
333
334 return max_avail_segs * (p->mtu - 40);
335 }
336
337 #if 0
338
339 /*
340 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
341 * data that can be pushed per port.
342 */
343 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
344 {
345 struct sched *s = sge->tx_sched;
346 unsigned int i;
347
348 s->max_avail = val;
349 for (i = 0; i < MAX_NPORTS; i++)
350 t1_sched_update_parms(sge, i, 0, 0);
351 }
352
353 /*
354 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
355 * is draining.
356 */
357 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
358 unsigned int val)
359 {
360 struct sched *s = sge->tx_sched;
361 struct sched_port *p = &s->p[port];
362 p->drain_bits_per_1024ns = val * 1024 / 1000;
363 t1_sched_update_parms(sge, port, 0, 0);
364 }
365
366 #endif /* 0 */
367
368 /*
369 * tx_sched_init() allocates resources and does basic initialization.
370 */
371 static int tx_sched_init(struct sge *sge)
372 {
373 struct sched *s;
374 int i;
375
376 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
377 if (!s)
378 return -ENOMEM;
379
380 pr_debug("tx_sched_init\n");
381 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
382 sge->tx_sched = s;
383
384 for (i = 0; i < MAX_NPORTS; i++) {
385 skb_queue_head_init(&s->p[i].skbq);
386 t1_sched_update_parms(sge, i, 1500, 1000);
387 }
388
389 return 0;
390 }
391
392 /*
393 * sched_update_avail() computes the delta since the last time it was called
394 * and updates the per port quota (number of bits that can be sent to the any
395 * port).
396 */
397 static inline int sched_update_avail(struct sge *sge)
398 {
399 struct sched *s = sge->tx_sched;
400 ktime_t now = ktime_get();
401 unsigned int i;
402 long long delta_time_ns;
403
404 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
405
406 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
407 if (delta_time_ns < 15000)
408 return 0;
409
410 for (i = 0; i < MAX_NPORTS; i++) {
411 struct sched_port *p = &s->p[i];
412 unsigned int delta_avail;
413
414 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
415 p->avail = min(p->avail + delta_avail, s->max_avail);
416 }
417
418 s->last_updated = now;
419
420 return 1;
421 }
422
423 /*
424 * sched_skb() is called from two different places. In the tx path, any
425 * packet generating load on an output port will call sched_skb()
426 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
427 * context (skb == NULL).
428 * The scheduler only returns a skb (which will then be sent) if the
429 * length of the skb is <= the current quota of the output port.
430 */
431 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
432 unsigned int credits)
433 {
434 struct sched *s = sge->tx_sched;
435 struct sk_buff_head *skbq;
436 unsigned int i, len, update = 1;
437
438 pr_debug("sched_skb %p\n", skb);
439 if (!skb) {
440 if (!s->num)
441 return NULL;
442 } else {
443 skbq = &s->p[skb->dev->if_port].skbq;
444 __skb_queue_tail(skbq, skb);
445 s->num++;
446 skb = NULL;
447 }
448
449 if (credits < MAX_SKB_FRAGS + 1)
450 goto out;
451
452 again:
453 for (i = 0; i < MAX_NPORTS; i++) {
454 s->port = (s->port + 1) & (MAX_NPORTS - 1);
455 skbq = &s->p[s->port].skbq;
456
457 skb = skb_peek(skbq);
458
459 if (!skb)
460 continue;
461
462 len = skb->len;
463 if (len <= s->p[s->port].avail) {
464 s->p[s->port].avail -= len;
465 s->num--;
466 __skb_unlink(skb, skbq);
467 goto out;
468 }
469 skb = NULL;
470 }
471
472 if (update-- && sched_update_avail(sge))
473 goto again;
474
475 out:
476 /* If there are more pending skbs, we use the hardware to schedule us
477 * again.
478 */
479 if (s->num && !skb) {
480 struct cmdQ *q = &sge->cmdQ[0];
481 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
482 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
483 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
484 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
485 }
486 }
487 pr_debug("sched_skb ret %p\n", skb);
488
489 return skb;
490 }
491
492 /*
493 * PIO to indicate that memory mapped Q contains valid descriptor(s).
494 */
495 static inline void doorbell_pio(struct adapter *adapter, u32 val)
496 {
497 wmb();
498 writel(val, adapter->regs + A_SG_DOORBELL);
499 }
500
501 /*
502 * Frees all RX buffers on the freelist Q. The caller must make sure that
503 * the SGE is turned off before calling this function.
504 */
505 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
506 {
507 unsigned int cidx = q->cidx;
508
509 while (q->credits--) {
510 struct freelQ_ce *ce = &q->centries[cidx];
511
512 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
513 dma_unmap_len(ce, dma_len),
514 PCI_DMA_FROMDEVICE);
515 dev_kfree_skb(ce->skb);
516 ce->skb = NULL;
517 if (++cidx == q->size)
518 cidx = 0;
519 }
520 }
521
522 /*
523 * Free RX free list and response queue resources.
524 */
525 static void free_rx_resources(struct sge *sge)
526 {
527 struct pci_dev *pdev = sge->adapter->pdev;
528 unsigned int size, i;
529
530 if (sge->respQ.entries) {
531 size = sizeof(struct respQ_e) * sge->respQ.size;
532 pci_free_consistent(pdev, size, sge->respQ.entries,
533 sge->respQ.dma_addr);
534 }
535
536 for (i = 0; i < SGE_FREELQ_N; i++) {
537 struct freelQ *q = &sge->freelQ[i];
538
539 if (q->centries) {
540 free_freelQ_buffers(pdev, q);
541 kfree(q->centries);
542 }
543 if (q->entries) {
544 size = sizeof(struct freelQ_e) * q->size;
545 pci_free_consistent(pdev, size, q->entries,
546 q->dma_addr);
547 }
548 }
549 }
550
551 /*
552 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
553 * response queue.
554 */
555 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
556 {
557 struct pci_dev *pdev = sge->adapter->pdev;
558 unsigned int size, i;
559
560 for (i = 0; i < SGE_FREELQ_N; i++) {
561 struct freelQ *q = &sge->freelQ[i];
562
563 q->genbit = 1;
564 q->size = p->freelQ_size[i];
565 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
566 size = sizeof(struct freelQ_e) * q->size;
567 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
568 if (!q->entries)
569 goto err_no_mem;
570
571 size = sizeof(struct freelQ_ce) * q->size;
572 q->centries = kzalloc(size, GFP_KERNEL);
573 if (!q->centries)
574 goto err_no_mem;
575 }
576
577 /*
578 * Calculate the buffer sizes for the two free lists. FL0 accommodates
579 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
580 * including all the sk_buff overhead.
581 *
582 * Note: For T2 FL0 and FL1 are reversed.
583 */
584 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
585 sizeof(struct cpl_rx_data) +
586 sge->freelQ[!sge->jumbo_fl].dma_offset;
587
588 size = (16 * 1024) -
589 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
590
591 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
592
593 /*
594 * Setup which skb recycle Q should be used when recycling buffers from
595 * each free list.
596 */
597 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
598 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
599
600 sge->respQ.genbit = 1;
601 sge->respQ.size = SGE_RESPQ_E_N;
602 sge->respQ.credits = 0;
603 size = sizeof(struct respQ_e) * sge->respQ.size;
604 sge->respQ.entries =
605 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
606 if (!sge->respQ.entries)
607 goto err_no_mem;
608 return 0;
609
610 err_no_mem:
611 free_rx_resources(sge);
612 return -ENOMEM;
613 }
614
615 /*
616 * Reclaims n TX descriptors and frees the buffers associated with them.
617 */
618 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
619 {
620 struct cmdQ_ce *ce;
621 struct pci_dev *pdev = sge->adapter->pdev;
622 unsigned int cidx = q->cidx;
623
624 q->in_use -= n;
625 ce = &q->centries[cidx];
626 while (n--) {
627 if (likely(dma_unmap_len(ce, dma_len))) {
628 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
629 dma_unmap_len(ce, dma_len),
630 PCI_DMA_TODEVICE);
631 if (q->sop)
632 q->sop = 0;
633 }
634 if (ce->skb) {
635 dev_kfree_skb_any(ce->skb);
636 q->sop = 1;
637 }
638 ce++;
639 if (++cidx == q->size) {
640 cidx = 0;
641 ce = q->centries;
642 }
643 }
644 q->cidx = cidx;
645 }
646
647 /*
648 * Free TX resources.
649 *
650 * Assumes that SGE is stopped and all interrupts are disabled.
651 */
652 static void free_tx_resources(struct sge *sge)
653 {
654 struct pci_dev *pdev = sge->adapter->pdev;
655 unsigned int size, i;
656
657 for (i = 0; i < SGE_CMDQ_N; i++) {
658 struct cmdQ *q = &sge->cmdQ[i];
659
660 if (q->centries) {
661 if (q->in_use)
662 free_cmdQ_buffers(sge, q, q->in_use);
663 kfree(q->centries);
664 }
665 if (q->entries) {
666 size = sizeof(struct cmdQ_e) * q->size;
667 pci_free_consistent(pdev, size, q->entries,
668 q->dma_addr);
669 }
670 }
671 }
672
673 /*
674 * Allocates basic TX resources, consisting of memory mapped command Qs.
675 */
676 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
677 {
678 struct pci_dev *pdev = sge->adapter->pdev;
679 unsigned int size, i;
680
681 for (i = 0; i < SGE_CMDQ_N; i++) {
682 struct cmdQ *q = &sge->cmdQ[i];
683
684 q->genbit = 1;
685 q->sop = 1;
686 q->size = p->cmdQ_size[i];
687 q->in_use = 0;
688 q->status = 0;
689 q->processed = q->cleaned = 0;
690 q->stop_thres = 0;
691 spin_lock_init(&q->lock);
692 size = sizeof(struct cmdQ_e) * q->size;
693 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
694 if (!q->entries)
695 goto err_no_mem;
696
697 size = sizeof(struct cmdQ_ce) * q->size;
698 q->centries = kzalloc(size, GFP_KERNEL);
699 if (!q->centries)
700 goto err_no_mem;
701 }
702
703 /*
704 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
705 * only. For queue 0 set the stop threshold so we can handle one more
706 * packet from each port, plus reserve an additional 24 entries for
707 * Ethernet packets only. Queue 1 never suspends nor do we reserve
708 * space for Ethernet packets.
709 */
710 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
711 (MAX_SKB_FRAGS + 1);
712 return 0;
713
714 err_no_mem:
715 free_tx_resources(sge);
716 return -ENOMEM;
717 }
718
719 static inline void setup_ring_params(struct adapter *adapter, u64 addr,
720 u32 size, int base_reg_lo,
721 int base_reg_hi, int size_reg)
722 {
723 writel((u32)addr, adapter->regs + base_reg_lo);
724 writel(addr >> 32, adapter->regs + base_reg_hi);
725 writel(size, adapter->regs + size_reg);
726 }
727
728 /*
729 * Enable/disable VLAN acceleration.
730 */
731 void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
732 {
733 struct sge *sge = adapter->sge;
734
735 if (features & NETIF_F_HW_VLAN_CTAG_RX)
736 sge->sge_control |= F_VLAN_XTRACT;
737 else
738 sge->sge_control &= ~F_VLAN_XTRACT;
739 if (adapter->open_device_map) {
740 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
741 readl(adapter->regs + A_SG_CONTROL); /* flush */
742 }
743 }
744
745 /*
746 * Programs the various SGE registers. However, the engine is not yet enabled,
747 * but sge->sge_control is setup and ready to go.
748 */
749 static void configure_sge(struct sge *sge, struct sge_params *p)
750 {
751 struct adapter *ap = sge->adapter;
752
753 writel(0, ap->regs + A_SG_CONTROL);
754 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
755 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
756 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
757 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
758 setup_ring_params(ap, sge->freelQ[0].dma_addr,
759 sge->freelQ[0].size, A_SG_FL0BASELWR,
760 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
761 setup_ring_params(ap, sge->freelQ[1].dma_addr,
762 sge->freelQ[1].size, A_SG_FL1BASELWR,
763 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
764
765 /* The threshold comparison uses <. */
766 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
767
768 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
769 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
770 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
771
772 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
773 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
774 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
775 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
776
777 #if defined(__BIG_ENDIAN_BITFIELD)
778 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
779 #endif
780
781 /* Initialize no-resource timer */
782 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
783
784 t1_sge_set_coalesce_params(sge, p);
785 }
786
787 /*
788 * Return the payload capacity of the jumbo free-list buffers.
789 */
790 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
791 {
792 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
793 sge->freelQ[sge->jumbo_fl].dma_offset -
794 sizeof(struct cpl_rx_data);
795 }
796
797 /*
798 * Frees all SGE related resources and the sge structure itself
799 */
800 void t1_sge_destroy(struct sge *sge)
801 {
802 int i;
803
804 for_each_port(sge->adapter, i)
805 free_percpu(sge->port_stats[i]);
806
807 kfree(sge->tx_sched);
808 free_tx_resources(sge);
809 free_rx_resources(sge);
810 kfree(sge);
811 }
812
813 /*
814 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
815 * context Q) until the Q is full or alloc_skb fails.
816 *
817 * It is possible that the generation bits already match, indicating that the
818 * buffer is already valid and nothing needs to be done. This happens when we
819 * copied a received buffer into a new sk_buff during the interrupt processing.
820 *
821 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
822 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
823 * aligned.
824 */
825 static void refill_free_list(struct sge *sge, struct freelQ *q)
826 {
827 struct pci_dev *pdev = sge->adapter->pdev;
828 struct freelQ_ce *ce = &q->centries[q->pidx];
829 struct freelQ_e *e = &q->entries[q->pidx];
830 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
831
832 while (q->credits < q->size) {
833 struct sk_buff *skb;
834 dma_addr_t mapping;
835
836 skb = dev_alloc_skb(q->rx_buffer_size);
837 if (!skb)
838 break;
839
840 skb_reserve(skb, q->dma_offset);
841 mapping = pci_map_single(pdev, skb->data, dma_len,
842 PCI_DMA_FROMDEVICE);
843 skb_reserve(skb, sge->rx_pkt_pad);
844
845 ce->skb = skb;
846 dma_unmap_addr_set(ce, dma_addr, mapping);
847 dma_unmap_len_set(ce, dma_len, dma_len);
848 e->addr_lo = (u32)mapping;
849 e->addr_hi = (u64)mapping >> 32;
850 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
851 wmb();
852 e->gen2 = V_CMD_GEN2(q->genbit);
853
854 e++;
855 ce++;
856 if (++q->pidx == q->size) {
857 q->pidx = 0;
858 q->genbit ^= 1;
859 ce = q->centries;
860 e = q->entries;
861 }
862 q->credits++;
863 }
864 }
865
866 /*
867 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
868 * of both rings, we go into 'few interrupt mode' in order to give the system
869 * time to free up resources.
870 */
871 static void freelQs_empty(struct sge *sge)
872 {
873 struct adapter *adapter = sge->adapter;
874 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
875 u32 irqholdoff_reg;
876
877 refill_free_list(sge, &sge->freelQ[0]);
878 refill_free_list(sge, &sge->freelQ[1]);
879
880 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
881 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
882 irq_reg |= F_FL_EXHAUSTED;
883 irqholdoff_reg = sge->fixed_intrtimer;
884 } else {
885 /* Clear the F_FL_EXHAUSTED interrupts for now */
886 irq_reg &= ~F_FL_EXHAUSTED;
887 irqholdoff_reg = sge->intrtimer_nres;
888 }
889 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
890 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
891
892 /* We reenable the Qs to force a freelist GTS interrupt later */
893 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
894 }
895
896 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
897 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
898 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
899 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
900
901 /*
902 * Disable SGE Interrupts
903 */
904 void t1_sge_intr_disable(struct sge *sge)
905 {
906 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
907
908 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
909 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
910 }
911
912 /*
913 * Enable SGE interrupts.
914 */
915 void t1_sge_intr_enable(struct sge *sge)
916 {
917 u32 en = SGE_INT_ENABLE;
918 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
919
920 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
921 en &= ~F_PACKET_TOO_BIG;
922 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
923 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
924 }
925
926 /*
927 * Clear SGE interrupts.
928 */
929 void t1_sge_intr_clear(struct sge *sge)
930 {
931 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
932 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
933 }
934
935 /*
936 * SGE 'Error' interrupt handler
937 */
938 int t1_sge_intr_error_handler(struct sge *sge)
939 {
940 struct adapter *adapter = sge->adapter;
941 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
942
943 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
944 cause &= ~F_PACKET_TOO_BIG;
945 if (cause & F_RESPQ_EXHAUSTED)
946 sge->stats.respQ_empty++;
947 if (cause & F_RESPQ_OVERFLOW) {
948 sge->stats.respQ_overflow++;
949 pr_alert("%s: SGE response queue overflow\n",
950 adapter->name);
951 }
952 if (cause & F_FL_EXHAUSTED) {
953 sge->stats.freelistQ_empty++;
954 freelQs_empty(sge);
955 }
956 if (cause & F_PACKET_TOO_BIG) {
957 sge->stats.pkt_too_big++;
958 pr_alert("%s: SGE max packet size exceeded\n",
959 adapter->name);
960 }
961 if (cause & F_PACKET_MISMATCH) {
962 sge->stats.pkt_mismatch++;
963 pr_alert("%s: SGE packet mismatch\n", adapter->name);
964 }
965 if (cause & SGE_INT_FATAL)
966 t1_fatal_err(adapter);
967
968 writel(cause, adapter->regs + A_SG_INT_CAUSE);
969 return 0;
970 }
971
972 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
973 {
974 return &sge->stats;
975 }
976
977 void t1_sge_get_port_stats(const struct sge *sge, int port,
978 struct sge_port_stats *ss)
979 {
980 int cpu;
981
982 memset(ss, 0, sizeof(*ss));
983 for_each_possible_cpu(cpu) {
984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
985
986 ss->rx_cso_good += st->rx_cso_good;
987 ss->tx_cso += st->tx_cso;
988 ss->tx_tso += st->tx_tso;
989 ss->tx_need_hdrroom += st->tx_need_hdrroom;
990 ss->vlan_xtract += st->vlan_xtract;
991 ss->vlan_insert += st->vlan_insert;
992 }
993 }
994
995 /**
996 * recycle_fl_buf - recycle a free list buffer
997 * @fl: the free list
998 * @idx: index of buffer to recycle
999 *
1000 * Recycles the specified buffer on the given free list by adding it at
1001 * the next available slot on the list.
1002 */
1003 static void recycle_fl_buf(struct freelQ *fl, int idx)
1004 {
1005 struct freelQ_e *from = &fl->entries[idx];
1006 struct freelQ_e *to = &fl->entries[fl->pidx];
1007
1008 fl->centries[fl->pidx] = fl->centries[idx];
1009 to->addr_lo = from->addr_lo;
1010 to->addr_hi = from->addr_hi;
1011 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1012 wmb();
1013 to->gen2 = V_CMD_GEN2(fl->genbit);
1014 fl->credits++;
1015
1016 if (++fl->pidx == fl->size) {
1017 fl->pidx = 0;
1018 fl->genbit ^= 1;
1019 }
1020 }
1021
1022 static int copybreak __read_mostly = 256;
1023 module_param(copybreak, int, 0);
1024 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1025
1026 /**
1027 * get_packet - return the next ingress packet buffer
1028 * @adapter: the adapter that received the packet
1029 * @fl: the SGE free list holding the packet
1030 * @len: the actual packet length, excluding any SGE padding
1031 *
1032 * Get the next packet from a free list and complete setup of the
1033 * sk_buff. If the packet is small we make a copy and recycle the
1034 * original buffer, otherwise we use the original buffer itself. If a
1035 * positive drop threshold is supplied packets are dropped and their
1036 * buffers recycled if (a) the number of remaining buffers is under the
1037 * threshold and the packet is too big to copy, or (b) the packet should
1038 * be copied but there is no memory for the copy.
1039 */
1040 static inline struct sk_buff *get_packet(struct adapter *adapter,
1041 struct freelQ *fl, unsigned int len)
1042 {
1043 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1044 struct pci_dev *pdev = adapter->pdev;
1045 struct sk_buff *skb;
1046
1047 if (len < copybreak) {
1048 skb = napi_alloc_skb(&adapter->napi, len);
1049 if (!skb)
1050 goto use_orig_buf;
1051
1052 skb_put(skb, len);
1053 pci_dma_sync_single_for_cpu(pdev,
1054 dma_unmap_addr(ce, dma_addr),
1055 dma_unmap_len(ce, dma_len),
1056 PCI_DMA_FROMDEVICE);
1057 skb_copy_from_linear_data(ce->skb, skb->data, len);
1058 pci_dma_sync_single_for_device(pdev,
1059 dma_unmap_addr(ce, dma_addr),
1060 dma_unmap_len(ce, dma_len),
1061 PCI_DMA_FROMDEVICE);
1062 recycle_fl_buf(fl, fl->cidx);
1063 return skb;
1064 }
1065
1066 use_orig_buf:
1067 if (fl->credits < 2) {
1068 recycle_fl_buf(fl, fl->cidx);
1069 return NULL;
1070 }
1071
1072 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1073 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1074 skb = ce->skb;
1075 prefetch(skb->data);
1076
1077 skb_put(skb, len);
1078 return skb;
1079 }
1080
1081 /**
1082 * unexpected_offload - handle an unexpected offload packet
1083 * @adapter: the adapter
1084 * @fl: the free list that received the packet
1085 *
1086 * Called when we receive an unexpected offload packet (e.g., the TOE
1087 * function is disabled or the card is a NIC). Prints a message and
1088 * recycles the buffer.
1089 */
1090 static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1091 {
1092 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1093 struct sk_buff *skb = ce->skb;
1094
1095 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1096 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1097 pr_err("%s: unexpected offload packet, cmd %u\n",
1098 adapter->name, *skb->data);
1099 recycle_fl_buf(fl, fl->cidx);
1100 }
1101
1102 /*
1103 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1104 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1105 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1106 * Note that the *_large_page_tx_descs stuff will be optimized out when
1107 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1108 *
1109 * compute_large_page_descs() computes how many additional descriptors are
1110 * required to break down the stack's request.
1111 */
1112 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1113 {
1114 unsigned int count = 0;
1115
1116 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1117 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1118 unsigned int i, len = skb_headlen(skb);
1119 while (len > SGE_TX_DESC_MAX_PLEN) {
1120 count++;
1121 len -= SGE_TX_DESC_MAX_PLEN;
1122 }
1123 for (i = 0; nfrags--; i++) {
1124 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1125 len = skb_frag_size(frag);
1126 while (len > SGE_TX_DESC_MAX_PLEN) {
1127 count++;
1128 len -= SGE_TX_DESC_MAX_PLEN;
1129 }
1130 }
1131 }
1132 return count;
1133 }
1134
1135 /*
1136 * Write a cmdQ entry.
1137 *
1138 * Since this function writes the 'flags' field, it must not be used to
1139 * write the first cmdQ entry.
1140 */
1141 static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1142 unsigned int len, unsigned int gen,
1143 unsigned int eop)
1144 {
1145 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1146
1147 e->addr_lo = (u32)mapping;
1148 e->addr_hi = (u64)mapping >> 32;
1149 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1150 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1151 }
1152
1153 /*
1154 * See comment for previous function.
1155 *
1156 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1157 * *desc_len exceeds HW's capability.
1158 */
1159 static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1160 struct cmdQ_e **e,
1161 struct cmdQ_ce **ce,
1162 unsigned int *gen,
1163 dma_addr_t *desc_mapping,
1164 unsigned int *desc_len,
1165 unsigned int nfrags,
1166 struct cmdQ *q)
1167 {
1168 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1169 struct cmdQ_e *e1 = *e;
1170 struct cmdQ_ce *ce1 = *ce;
1171
1172 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1173 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1174 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1175 *gen, nfrags == 0 && *desc_len == 0);
1176 ce1->skb = NULL;
1177 dma_unmap_len_set(ce1, dma_len, 0);
1178 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1179 if (*desc_len) {
1180 ce1++;
1181 e1++;
1182 if (++pidx == q->size) {
1183 pidx = 0;
1184 *gen ^= 1;
1185 ce1 = q->centries;
1186 e1 = q->entries;
1187 }
1188 }
1189 }
1190 *e = e1;
1191 *ce = ce1;
1192 }
1193 return pidx;
1194 }
1195
1196 /*
1197 * Write the command descriptors to transmit the given skb starting at
1198 * descriptor pidx with the given generation.
1199 */
1200 static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1201 unsigned int pidx, unsigned int gen,
1202 struct cmdQ *q)
1203 {
1204 dma_addr_t mapping, desc_mapping;
1205 struct cmdQ_e *e, *e1;
1206 struct cmdQ_ce *ce;
1207 unsigned int i, flags, first_desc_len, desc_len,
1208 nfrags = skb_shinfo(skb)->nr_frags;
1209
1210 e = e1 = &q->entries[pidx];
1211 ce = &q->centries[pidx];
1212
1213 mapping = pci_map_single(adapter->pdev, skb->data,
1214 skb_headlen(skb), PCI_DMA_TODEVICE);
1215
1216 desc_mapping = mapping;
1217 desc_len = skb_headlen(skb);
1218
1219 flags = F_CMD_DATAVALID | F_CMD_SOP |
1220 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1221 V_CMD_GEN2(gen);
1222 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1223 desc_len : SGE_TX_DESC_MAX_PLEN;
1224 e->addr_lo = (u32)desc_mapping;
1225 e->addr_hi = (u64)desc_mapping >> 32;
1226 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1227 ce->skb = NULL;
1228 dma_unmap_len_set(ce, dma_len, 0);
1229
1230 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1231 desc_len > SGE_TX_DESC_MAX_PLEN) {
1232 desc_mapping += first_desc_len;
1233 desc_len -= first_desc_len;
1234 e1++;
1235 ce++;
1236 if (++pidx == q->size) {
1237 pidx = 0;
1238 gen ^= 1;
1239 e1 = q->entries;
1240 ce = q->centries;
1241 }
1242 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1243 &desc_mapping, &desc_len,
1244 nfrags, q);
1245
1246 if (likely(desc_len))
1247 write_tx_desc(e1, desc_mapping, desc_len, gen,
1248 nfrags == 0);
1249 }
1250
1251 ce->skb = NULL;
1252 dma_unmap_addr_set(ce, dma_addr, mapping);
1253 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1254
1255 for (i = 0; nfrags--; i++) {
1256 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1257 e1++;
1258 ce++;
1259 if (++pidx == q->size) {
1260 pidx = 0;
1261 gen ^= 1;
1262 e1 = q->entries;
1263 ce = q->centries;
1264 }
1265
1266 mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
1267 skb_frag_size(frag), DMA_TO_DEVICE);
1268 desc_mapping = mapping;
1269 desc_len = skb_frag_size(frag);
1270
1271 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1272 &desc_mapping, &desc_len,
1273 nfrags, q);
1274 if (likely(desc_len))
1275 write_tx_desc(e1, desc_mapping, desc_len, gen,
1276 nfrags == 0);
1277 ce->skb = NULL;
1278 dma_unmap_addr_set(ce, dma_addr, mapping);
1279 dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
1280 }
1281 ce->skb = skb;
1282 wmb();
1283 e->flags = flags;
1284 }
1285
1286 /*
1287 * Clean up completed Tx buffers.
1288 */
1289 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1290 {
1291 unsigned int reclaim = q->processed - q->cleaned;
1292
1293 if (reclaim) {
1294 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1295 q->processed, q->cleaned);
1296 free_cmdQ_buffers(sge, q, reclaim);
1297 q->cleaned += reclaim;
1298 }
1299 }
1300
1301 /*
1302 * Called from tasklet. Checks the scheduler for any
1303 * pending skbs that can be sent.
1304 */
1305 static void restart_sched(unsigned long arg)
1306 {
1307 struct sge *sge = (struct sge *) arg;
1308 struct adapter *adapter = sge->adapter;
1309 struct cmdQ *q = &sge->cmdQ[0];
1310 struct sk_buff *skb;
1311 unsigned int credits, queued_skb = 0;
1312
1313 spin_lock(&q->lock);
1314 reclaim_completed_tx(sge, q);
1315
1316 credits = q->size - q->in_use;
1317 pr_debug("restart_sched credits=%d\n", credits);
1318 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1319 unsigned int genbit, pidx, count;
1320 count = 1 + skb_shinfo(skb)->nr_frags;
1321 count += compute_large_page_tx_descs(skb);
1322 q->in_use += count;
1323 genbit = q->genbit;
1324 pidx = q->pidx;
1325 q->pidx += count;
1326 if (q->pidx >= q->size) {
1327 q->pidx -= q->size;
1328 q->genbit ^= 1;
1329 }
1330 write_tx_descs(adapter, skb, pidx, genbit, q);
1331 credits = q->size - q->in_use;
1332 queued_skb = 1;
1333 }
1334
1335 if (queued_skb) {
1336 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1337 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1338 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1339 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1340 }
1341 }
1342 spin_unlock(&q->lock);
1343 }
1344
1345 /**
1346 * sge_rx - process an ingress ethernet packet
1347 * @sge: the sge structure
1348 * @fl: the free list that contains the packet buffer
1349 * @len: the packet length
1350 *
1351 * Process an ingress ethernet pakcet and deliver it to the stack.
1352 */
1353 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1354 {
1355 struct sk_buff *skb;
1356 const struct cpl_rx_pkt *p;
1357 struct adapter *adapter = sge->adapter;
1358 struct sge_port_stats *st;
1359 struct net_device *dev;
1360
1361 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
1362 if (unlikely(!skb)) {
1363 sge->stats.rx_drops++;
1364 return;
1365 }
1366
1367 p = (const struct cpl_rx_pkt *) skb->data;
1368 if (p->iff >= adapter->params.nports) {
1369 kfree_skb(skb);
1370 return;
1371 }
1372 __skb_pull(skb, sizeof(*p));
1373
1374 st = this_cpu_ptr(sge->port_stats[p->iff]);
1375 dev = adapter->port[p->iff].dev;
1376
1377 skb->protocol = eth_type_trans(skb, dev);
1378 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1379 skb->protocol == htons(ETH_P_IP) &&
1380 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1381 ++st->rx_cso_good;
1382 skb->ip_summed = CHECKSUM_UNNECESSARY;
1383 } else
1384 skb_checksum_none_assert(skb);
1385
1386 if (p->vlan_valid) {
1387 st->vlan_xtract++;
1388 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
1389 }
1390 netif_receive_skb(skb);
1391 }
1392
1393 /*
1394 * Returns true if a command queue has enough available descriptors that
1395 * we can resume Tx operation after temporarily disabling its packet queue.
1396 */
1397 static inline int enough_free_Tx_descs(const struct cmdQ *q)
1398 {
1399 unsigned int r = q->processed - q->cleaned;
1400
1401 return q->in_use - r < (q->size >> 1);
1402 }
1403
1404 /*
1405 * Called when sufficient space has become available in the SGE command queues
1406 * after the Tx packet schedulers have been suspended to restart the Tx path.
1407 */
1408 static void restart_tx_queues(struct sge *sge)
1409 {
1410 struct adapter *adap = sge->adapter;
1411 int i;
1412
1413 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1414 return;
1415
1416 for_each_port(adap, i) {
1417 struct net_device *nd = adap->port[i].dev;
1418
1419 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1420 netif_running(nd)) {
1421 sge->stats.cmdQ_restarted[2]++;
1422 netif_wake_queue(nd);
1423 }
1424 }
1425 }
1426
1427 /*
1428 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1429 * information.
1430 */
1431 static unsigned int update_tx_info(struct adapter *adapter,
1432 unsigned int flags,
1433 unsigned int pr0)
1434 {
1435 struct sge *sge = adapter->sge;
1436 struct cmdQ *cmdq = &sge->cmdQ[0];
1437
1438 cmdq->processed += pr0;
1439 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1440 freelQs_empty(sge);
1441 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1442 }
1443 if (flags & F_CMDQ0_ENABLE) {
1444 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1445
1446 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1447 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1448 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1449 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1450 }
1451 if (sge->tx_sched)
1452 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1453
1454 flags &= ~F_CMDQ0_ENABLE;
1455 }
1456
1457 if (unlikely(sge->stopped_tx_queues != 0))
1458 restart_tx_queues(sge);
1459
1460 return flags;
1461 }
1462
1463 /*
1464 * Process SGE responses, up to the supplied budget. Returns the number of
1465 * responses processed. A negative budget is effectively unlimited.
1466 */
1467 static int process_responses(struct adapter *adapter, int budget)
1468 {
1469 struct sge *sge = adapter->sge;
1470 struct respQ *q = &sge->respQ;
1471 struct respQ_e *e = &q->entries[q->cidx];
1472 int done = 0;
1473 unsigned int flags = 0;
1474 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1475
1476 while (done < budget && e->GenerationBit == q->genbit) {
1477 flags |= e->Qsleeping;
1478
1479 cmdq_processed[0] += e->Cmdq0CreditReturn;
1480 cmdq_processed[1] += e->Cmdq1CreditReturn;
1481
1482 /* We batch updates to the TX side to avoid cacheline
1483 * ping-pong of TX state information on MP where the sender
1484 * might run on a different CPU than this function...
1485 */
1486 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1487 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1488 cmdq_processed[0] = 0;
1489 }
1490
1491 if (unlikely(cmdq_processed[1] > 16)) {
1492 sge->cmdQ[1].processed += cmdq_processed[1];
1493 cmdq_processed[1] = 0;
1494 }
1495
1496 if (likely(e->DataValid)) {
1497 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1498
1499 BUG_ON(!e->Sop || !e->Eop);
1500 if (unlikely(e->Offload))
1501 unexpected_offload(adapter, fl);
1502 else
1503 sge_rx(sge, fl, e->BufferLength);
1504
1505 ++done;
1506
1507 /*
1508 * Note: this depends on each packet consuming a
1509 * single free-list buffer; cf. the BUG above.
1510 */
1511 if (++fl->cidx == fl->size)
1512 fl->cidx = 0;
1513 prefetch(fl->centries[fl->cidx].skb);
1514
1515 if (unlikely(--fl->credits <
1516 fl->size - SGE_FREEL_REFILL_THRESH))
1517 refill_free_list(sge, fl);
1518 } else
1519 sge->stats.pure_rsps++;
1520
1521 e++;
1522 if (unlikely(++q->cidx == q->size)) {
1523 q->cidx = 0;
1524 q->genbit ^= 1;
1525 e = q->entries;
1526 }
1527 prefetch(e);
1528
1529 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1530 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1531 q->credits = 0;
1532 }
1533 }
1534
1535 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1536 sge->cmdQ[1].processed += cmdq_processed[1];
1537
1538 return done;
1539 }
1540
1541 static inline int responses_pending(const struct adapter *adapter)
1542 {
1543 const struct respQ *Q = &adapter->sge->respQ;
1544 const struct respQ_e *e = &Q->entries[Q->cidx];
1545
1546 return e->GenerationBit == Q->genbit;
1547 }
1548
1549 /*
1550 * A simpler version of process_responses() that handles only pure (i.e.,
1551 * non data-carrying) responses. Such respones are too light-weight to justify
1552 * calling a softirq when using NAPI, so we handle them specially in hard
1553 * interrupt context. The function is called with a pointer to a response,
1554 * which the caller must ensure is a valid pure response. Returns 1 if it
1555 * encounters a valid data-carrying response, 0 otherwise.
1556 */
1557 static int process_pure_responses(struct adapter *adapter)
1558 {
1559 struct sge *sge = adapter->sge;
1560 struct respQ *q = &sge->respQ;
1561 struct respQ_e *e = &q->entries[q->cidx];
1562 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1563 unsigned int flags = 0;
1564 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1565
1566 prefetch(fl->centries[fl->cidx].skb);
1567 if (e->DataValid)
1568 return 1;
1569
1570 do {
1571 flags |= e->Qsleeping;
1572
1573 cmdq_processed[0] += e->Cmdq0CreditReturn;
1574 cmdq_processed[1] += e->Cmdq1CreditReturn;
1575
1576 e++;
1577 if (unlikely(++q->cidx == q->size)) {
1578 q->cidx = 0;
1579 q->genbit ^= 1;
1580 e = q->entries;
1581 }
1582 prefetch(e);
1583
1584 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1585 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1586 q->credits = 0;
1587 }
1588 sge->stats.pure_rsps++;
1589 } while (e->GenerationBit == q->genbit && !e->DataValid);
1590
1591 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1592 sge->cmdQ[1].processed += cmdq_processed[1];
1593
1594 return e->GenerationBit == q->genbit;
1595 }
1596
1597 /*
1598 * Handler for new data events when using NAPI. This does not need any locking
1599 * or protection from interrupts as data interrupts are off at this point and
1600 * other adapter interrupts do not interfere.
1601 */
1602 int t1_poll(struct napi_struct *napi, int budget)
1603 {
1604 struct adapter *adapter = container_of(napi, struct adapter, napi);
1605 int work_done = process_responses(adapter, budget);
1606
1607 if (likely(work_done < budget)) {
1608 napi_complete_done(napi, work_done);
1609 writel(adapter->sge->respQ.cidx,
1610 adapter->regs + A_SG_SLEEPING);
1611 }
1612 return work_done;
1613 }
1614
1615 irqreturn_t t1_interrupt(int irq, void *data)
1616 {
1617 struct adapter *adapter = data;
1618 struct sge *sge = adapter->sge;
1619 int handled;
1620
1621 if (likely(responses_pending(adapter))) {
1622 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1623
1624 if (napi_schedule_prep(&adapter->napi)) {
1625 if (process_pure_responses(adapter))
1626 __napi_schedule(&adapter->napi);
1627 else {
1628 /* no data, no NAPI needed */
1629 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1630 /* undo schedule_prep */
1631 napi_enable(&adapter->napi);
1632 }
1633 }
1634 return IRQ_HANDLED;
1635 }
1636
1637 spin_lock(&adapter->async_lock);
1638 handled = t1_slow_intr_handler(adapter);
1639 spin_unlock(&adapter->async_lock);
1640
1641 if (!handled)
1642 sge->stats.unhandled_irqs++;
1643
1644 return IRQ_RETVAL(handled != 0);
1645 }
1646
1647 /*
1648 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1649 *
1650 * The code figures out how many entries the sk_buff will require in the
1651 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1652 * has complete. Then, it doesn't access the global structure anymore, but
1653 * uses the corresponding fields on the stack. In conjunction with a spinlock
1654 * around that code, we can make the function reentrant without holding the
1655 * lock when we actually enqueue (which might be expensive, especially on
1656 * architectures with IO MMUs).
1657 *
1658 * This runs with softirqs disabled.
1659 */
1660 static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1661 unsigned int qid, struct net_device *dev)
1662 {
1663 struct sge *sge = adapter->sge;
1664 struct cmdQ *q = &sge->cmdQ[qid];
1665 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1666
1667 spin_lock(&q->lock);
1668
1669 reclaim_completed_tx(sge, q);
1670
1671 pidx = q->pidx;
1672 credits = q->size - q->in_use;
1673 count = 1 + skb_shinfo(skb)->nr_frags;
1674 count += compute_large_page_tx_descs(skb);
1675
1676 /* Ethernet packet */
1677 if (unlikely(credits < count)) {
1678 if (!netif_queue_stopped(dev)) {
1679 netif_stop_queue(dev);
1680 set_bit(dev->if_port, &sge->stopped_tx_queues);
1681 sge->stats.cmdQ_full[2]++;
1682 pr_err("%s: Tx ring full while queue awake!\n",
1683 adapter->name);
1684 }
1685 spin_unlock(&q->lock);
1686 return NETDEV_TX_BUSY;
1687 }
1688
1689 if (unlikely(credits - count < q->stop_thres)) {
1690 netif_stop_queue(dev);
1691 set_bit(dev->if_port, &sge->stopped_tx_queues);
1692 sge->stats.cmdQ_full[2]++;
1693 }
1694
1695 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1696 * through the scheduler.
1697 */
1698 if (sge->tx_sched && !qid && skb->dev) {
1699 use_sched:
1700 use_sched_skb = 1;
1701 /* Note that the scheduler might return a different skb than
1702 * the one passed in.
1703 */
1704 skb = sched_skb(sge, skb, credits);
1705 if (!skb) {
1706 spin_unlock(&q->lock);
1707 return NETDEV_TX_OK;
1708 }
1709 pidx = q->pidx;
1710 count = 1 + skb_shinfo(skb)->nr_frags;
1711 count += compute_large_page_tx_descs(skb);
1712 }
1713
1714 q->in_use += count;
1715 genbit = q->genbit;
1716 pidx = q->pidx;
1717 q->pidx += count;
1718 if (q->pidx >= q->size) {
1719 q->pidx -= q->size;
1720 q->genbit ^= 1;
1721 }
1722 spin_unlock(&q->lock);
1723
1724 write_tx_descs(adapter, skb, pidx, genbit, q);
1725
1726 /*
1727 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1728 * the doorbell if the Q is asleep. There is a natural race, where
1729 * the hardware is going to sleep just after we checked, however,
1730 * then the interrupt handler will detect the outstanding TX packet
1731 * and ring the doorbell for us.
1732 */
1733 if (qid)
1734 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1735 else {
1736 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1737 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1738 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1739 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1740 }
1741 }
1742
1743 if (use_sched_skb) {
1744 if (spin_trylock(&q->lock)) {
1745 credits = q->size - q->in_use;
1746 skb = NULL;
1747 goto use_sched;
1748 }
1749 }
1750 return NETDEV_TX_OK;
1751 }
1752
1753 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1754
1755 /*
1756 * eth_hdr_len - return the length of an Ethernet header
1757 * @data: pointer to the start of the Ethernet header
1758 *
1759 * Returns the length of an Ethernet header, including optional VLAN tag.
1760 */
1761 static inline int eth_hdr_len(const void *data)
1762 {
1763 const struct ethhdr *e = data;
1764
1765 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1766 }
1767
1768 /*
1769 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1770 */
1771 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1772 {
1773 struct adapter *adapter = dev->ml_priv;
1774 struct sge *sge = adapter->sge;
1775 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1776 struct cpl_tx_pkt *cpl;
1777 struct sk_buff *orig_skb = skb;
1778 int ret;
1779
1780 if (skb->protocol == htons(ETH_P_CPL5))
1781 goto send;
1782
1783 /*
1784 * We are using a non-standard hard_header_len.
1785 * Allocate more header room in the rare cases it is not big enough.
1786 */
1787 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1788 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1789 ++st->tx_need_hdrroom;
1790 dev_kfree_skb_any(orig_skb);
1791 if (!skb)
1792 return NETDEV_TX_OK;
1793 }
1794
1795 if (skb_shinfo(skb)->gso_size) {
1796 int eth_type;
1797 struct cpl_tx_pkt_lso *hdr;
1798
1799 ++st->tx_tso;
1800
1801 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1802 CPL_ETH_II : CPL_ETH_II_VLAN;
1803
1804 hdr = skb_push(skb, sizeof(*hdr));
1805 hdr->opcode = CPL_TX_PKT_LSO;
1806 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1807 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1808 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1809 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1810 skb_shinfo(skb)->gso_size));
1811 hdr->len = htonl(skb->len - sizeof(*hdr));
1812 cpl = (struct cpl_tx_pkt *)hdr;
1813 } else {
1814 /*
1815 * Packets shorter than ETH_HLEN can break the MAC, drop them
1816 * early. Also, we may get oversized packets because some
1817 * parts of the kernel don't handle our unusual hard_header_len
1818 * right, drop those too.
1819 */
1820 if (unlikely(skb->len < ETH_HLEN ||
1821 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1822 netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
1823 skb->len, eth_hdr_len(skb->data), dev->mtu);
1824 dev_kfree_skb_any(skb);
1825 return NETDEV_TX_OK;
1826 }
1827
1828 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1829 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1830 if (unlikely(skb_checksum_help(skb))) {
1831 netdev_dbg(dev, "unable to do udp checksum\n");
1832 dev_kfree_skb_any(skb);
1833 return NETDEV_TX_OK;
1834 }
1835 }
1836
1837 /* Hmmm, assuming to catch the gratious arp... and we'll use
1838 * it to flush out stuck espi packets...
1839 */
1840 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1841 if (skb->protocol == htons(ETH_P_ARP) &&
1842 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1843 adapter->sge->espibug_skb[dev->if_port] = skb;
1844 /* We want to re-use this skb later. We
1845 * simply bump the reference count and it
1846 * will not be freed...
1847 */
1848 skb = skb_get(skb);
1849 }
1850 }
1851
1852 cpl = __skb_push(skb, sizeof(*cpl));
1853 cpl->opcode = CPL_TX_PKT;
1854 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1855 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1856 /* the length field isn't used so don't bother setting it */
1857
1858 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1859 }
1860 cpl->iff = dev->if_port;
1861
1862 if (skb_vlan_tag_present(skb)) {
1863 cpl->vlan_valid = 1;
1864 cpl->vlan = htons(skb_vlan_tag_get(skb));
1865 st->vlan_insert++;
1866 } else
1867 cpl->vlan_valid = 0;
1868
1869 send:
1870 ret = t1_sge_tx(skb, adapter, 0, dev);
1871
1872 /* If transmit busy, and we reallocated skb's due to headroom limit,
1873 * then silently discard to avoid leak.
1874 */
1875 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1876 dev_kfree_skb_any(skb);
1877 ret = NETDEV_TX_OK;
1878 }
1879 return ret;
1880 }
1881
1882 /*
1883 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1884 */
1885 static void sge_tx_reclaim_cb(unsigned long data)
1886 {
1887 int i;
1888 struct sge *sge = (struct sge *)data;
1889
1890 for (i = 0; i < SGE_CMDQ_N; ++i) {
1891 struct cmdQ *q = &sge->cmdQ[i];
1892
1893 if (!spin_trylock(&q->lock))
1894 continue;
1895
1896 reclaim_completed_tx(sge, q);
1897 if (i == 0 && q->in_use) { /* flush pending credits */
1898 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1899 }
1900 spin_unlock(&q->lock);
1901 }
1902 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1903 }
1904
1905 /*
1906 * Propagate changes of the SGE coalescing parameters to the HW.
1907 */
1908 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1909 {
1910 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1911 core_ticks_per_usec(sge->adapter);
1912 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1913 return 0;
1914 }
1915
1916 /*
1917 * Allocates both RX and TX resources and configures the SGE. However,
1918 * the hardware is not enabled yet.
1919 */
1920 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1921 {
1922 if (alloc_rx_resources(sge, p))
1923 return -ENOMEM;
1924 if (alloc_tx_resources(sge, p)) {
1925 free_rx_resources(sge);
1926 return -ENOMEM;
1927 }
1928 configure_sge(sge, p);
1929
1930 /*
1931 * Now that we have sized the free lists calculate the payload
1932 * capacity of the large buffers. Other parts of the driver use
1933 * this to set the max offload coalescing size so that RX packets
1934 * do not overflow our large buffers.
1935 */
1936 p->large_buf_capacity = jumbo_payload_capacity(sge);
1937 return 0;
1938 }
1939
1940 /*
1941 * Disables the DMA engine.
1942 */
1943 void t1_sge_stop(struct sge *sge)
1944 {
1945 int i;
1946 writel(0, sge->adapter->regs + A_SG_CONTROL);
1947 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1948
1949 if (is_T2(sge->adapter))
1950 del_timer_sync(&sge->espibug_timer);
1951
1952 del_timer_sync(&sge->tx_reclaim_timer);
1953 if (sge->tx_sched)
1954 tx_sched_stop(sge);
1955
1956 for (i = 0; i < MAX_NPORTS; i++)
1957 kfree_skb(sge->espibug_skb[i]);
1958 }
1959
1960 /*
1961 * Enables the DMA engine.
1962 */
1963 void t1_sge_start(struct sge *sge)
1964 {
1965 refill_free_list(sge, &sge->freelQ[0]);
1966 refill_free_list(sge, &sge->freelQ[1]);
1967
1968 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1969 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1970 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1971
1972 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1973
1974 if (is_T2(sge->adapter))
1975 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1976 }
1977
1978 /*
1979 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1980 */
1981 static void espibug_workaround_t204(unsigned long data)
1982 {
1983 struct adapter *adapter = (struct adapter *)data;
1984 struct sge *sge = adapter->sge;
1985 unsigned int nports = adapter->params.nports;
1986 u32 seop[MAX_NPORTS];
1987
1988 if (adapter->open_device_map & PORT_MASK) {
1989 int i;
1990
1991 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
1992 return;
1993
1994 for (i = 0; i < nports; i++) {
1995 struct sk_buff *skb = sge->espibug_skb[i];
1996
1997 if (!netif_running(adapter->port[i].dev) ||
1998 netif_queue_stopped(adapter->port[i].dev) ||
1999 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2000 continue;
2001
2002 if (!skb->cb[0]) {
2003 skb_copy_to_linear_data_offset(skb,
2004 sizeof(struct cpl_tx_pkt),
2005 ch_mac_addr,
2006 ETH_ALEN);
2007 skb_copy_to_linear_data_offset(skb,
2008 skb->len - 10,
2009 ch_mac_addr,
2010 ETH_ALEN);
2011 skb->cb[0] = 0xff;
2012 }
2013
2014 /* bump the reference count to avoid freeing of
2015 * the skb once the DMA has completed.
2016 */
2017 skb = skb_get(skb);
2018 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2019 }
2020 }
2021 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2022 }
2023
2024 static void espibug_workaround(unsigned long data)
2025 {
2026 struct adapter *adapter = (struct adapter *)data;
2027 struct sge *sge = adapter->sge;
2028
2029 if (netif_running(adapter->port[0].dev)) {
2030 struct sk_buff *skb = sge->espibug_skb[0];
2031 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2032
2033 if ((seop & 0xfff0fff) == 0xfff && skb) {
2034 if (!skb->cb[0]) {
2035 skb_copy_to_linear_data_offset(skb,
2036 sizeof(struct cpl_tx_pkt),
2037 ch_mac_addr,
2038 ETH_ALEN);
2039 skb_copy_to_linear_data_offset(skb,
2040 skb->len - 10,
2041 ch_mac_addr,
2042 ETH_ALEN);
2043 skb->cb[0] = 0xff;
2044 }
2045
2046 /* bump the reference count to avoid freeing of the
2047 * skb once the DMA has completed.
2048 */
2049 skb = skb_get(skb);
2050 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2051 }
2052 }
2053 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2054 }
2055
2056 /*
2057 * Creates a t1_sge structure and returns suggested resource parameters.
2058 */
2059 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
2060 {
2061 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2062 int i;
2063
2064 if (!sge)
2065 return NULL;
2066
2067 sge->adapter = adapter;
2068 sge->netdev = adapter->port[0].dev;
2069 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2070 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2071
2072 for_each_port(adapter, i) {
2073 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2074 if (!sge->port_stats[i])
2075 goto nomem_port;
2076 }
2077
2078 init_timer(&sge->tx_reclaim_timer);
2079 sge->tx_reclaim_timer.data = (unsigned long)sge;
2080 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2081
2082 if (is_T2(sge->adapter)) {
2083 init_timer(&sge->espibug_timer);
2084
2085 if (adapter->params.nports > 1) {
2086 tx_sched_init(sge);
2087 sge->espibug_timer.function = espibug_workaround_t204;
2088 } else
2089 sge->espibug_timer.function = espibug_workaround;
2090 sge->espibug_timer.data = (unsigned long)sge->adapter;
2091
2092 sge->espibug_timeout = 1;
2093 /* for T204, every 10ms */
2094 if (adapter->params.nports > 1)
2095 sge->espibug_timeout = HZ/100;
2096 }
2097
2098
2099 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2100 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2101 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2102 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2103 if (sge->tx_sched) {
2104 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2105 p->rx_coalesce_usecs = 15;
2106 else
2107 p->rx_coalesce_usecs = 50;
2108 } else
2109 p->rx_coalesce_usecs = 50;
2110
2111 p->coalesce_enable = 0;
2112 p->sample_interval_usecs = 0;
2113
2114 return sge;
2115 nomem_port:
2116 while (i >= 0) {
2117 free_percpu(sge->port_stats[i]);
2118 --i;
2119 }
2120 kfree(sge);
2121 return NULL;
2122
2123 }