]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/chelsio/sge.c
chelsio: the return statement is not a function
[mirror_ubuntu-focal-kernel.git] / drivers / net / chelsio / sge.c
CommitLineData
8199d3a7
CL
1/*****************************************************************************
2 * *
3 * File: sge.c *
559fb51b
SB
4 * $Revision: 1.26 $ *
5 * $Date: 2005/06/21 18:29:48 $ *
8199d3a7
CL
6 * Description: *
7 * DMA engine. *
8 * part of the Chelsio 10Gb Ethernet Driver. *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
13 * *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, write to the Free Software Foundation, Inc., *
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * *
22 * http://www.chelsio.com *
23 * *
24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
25 * All rights reserved. *
26 * *
27 * Maintainers: maintainers@chelsio.com *
28 * *
29 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
30 * Tina Yang <tainay@chelsio.com> *
31 * Felix Marti <felix@chelsio.com> *
32 * Scott Bardone <sbardone@chelsio.com> *
33 * Kurt Ottaway <kottaway@chelsio.com> *
34 * Frank DiMambro <frank@chelsio.com> *
35 * *
36 * History: *
37 * *
38 ****************************************************************************/
39
40#include "common.h"
41
8199d3a7
CL
42#include <linux/types.h>
43#include <linux/errno.h>
44#include <linux/pci.h>
f1d3d38a 45#include <linux/ktime.h>
8199d3a7
CL
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/if_vlan.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/mm.h>
f1d3d38a 52#include <linux/tcp.h>
8199d3a7
CL
53#include <linux/ip.h>
54#include <linux/in.h>
55#include <linux/if_arp.h>
56
57#include "cpl5_cmd.h"
58#include "sge.h"
59#include "regs.h"
60#include "espi.h"
61
f1d3d38a
SH
62/* This belongs in if_ether.h */
63#define ETH_P_CPL5 0xf
8199d3a7
CL
64
65#define SGE_CMDQ_N 2
66#define SGE_FREELQ_N 2
559fb51b 67#define SGE_CMDQ0_E_N 1024
8199d3a7
CL
68#define SGE_CMDQ1_E_N 128
69#define SGE_FREEL_SIZE 4096
70#define SGE_JUMBO_FREEL_SIZE 512
71#define SGE_FREEL_REFILL_THRESH 16
72#define SGE_RESPQ_E_N 1024
559fb51b
SB
73#define SGE_INTRTIMER_NRES 1000
74#define SGE_RX_COPY_THRES 256
8199d3a7 75#define SGE_RX_SM_BUF_SIZE 1536
f1d3d38a 76#define SGE_TX_DESC_MAX_PLEN 16384
8199d3a7 77
559fb51b
SB
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81
82/*
83 * Period of the TX buffer reclaim timer. This timer does not need to run
84 * frequently as TX buffers are usually reclaimed by new TX packets.
85 */
86#define TX_RECLAIM_PERIOD (HZ / 4)
8199d3a7 87
8199d3a7 88#ifndef NET_IP_ALIGN
559fb51b 89# define NET_IP_ALIGN 2
8199d3a7
CL
90#endif
91
559fb51b
SB
92#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
95#define V_CMD_GEN1(v) ((v) << 31)
96#define V_CMD_GEN2(v) (v)
97#define F_CMD_DATAVALID (1 << 1)
98#define F_CMD_SOP (1 << 2)
99#define V_CMD_EOP(v) ((v) << 3)
100
8199d3a7 101/*
559fb51b 102 * Command queue, receive buffer list, and response queue descriptors.
8199d3a7
CL
103 */
104#if defined(__BIG_ENDIAN_BITFIELD)
105struct cmdQ_e {
559fb51b
SB
106 u32 addr_lo;
107 u32 len_gen;
108 u32 flags;
109 u32 addr_hi;
8199d3a7
CL
110};
111
112struct freelQ_e {
559fb51b
SB
113 u32 addr_lo;
114 u32 len_gen;
115 u32 gen2;
116 u32 addr_hi;
8199d3a7
CL
117};
118
119struct respQ_e {
120 u32 Qsleeping : 4;
121 u32 Cmdq1CreditReturn : 5;
122 u32 Cmdq1DmaComplete : 5;
123 u32 Cmdq0CreditReturn : 5;
124 u32 Cmdq0DmaComplete : 5;
125 u32 FreelistQid : 2;
126 u32 CreditValid : 1;
127 u32 DataValid : 1;
128 u32 Offload : 1;
129 u32 Eop : 1;
130 u32 Sop : 1;
131 u32 GenerationBit : 1;
132 u32 BufferLength;
133};
8199d3a7
CL
134#elif defined(__LITTLE_ENDIAN_BITFIELD)
135struct cmdQ_e {
559fb51b
SB
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 flags;
8199d3a7
CL
140};
141
142struct freelQ_e {
559fb51b
SB
143 u32 len_gen;
144 u32 addr_lo;
145 u32 addr_hi;
146 u32 gen2;
8199d3a7
CL
147};
148
149struct respQ_e {
150 u32 BufferLength;
151 u32 GenerationBit : 1;
152 u32 Sop : 1;
153 u32 Eop : 1;
154 u32 Offload : 1;
155 u32 DataValid : 1;
156 u32 CreditValid : 1;
157 u32 FreelistQid : 2;
158 u32 Cmdq0DmaComplete : 5;
159 u32 Cmdq0CreditReturn : 5;
160 u32 Cmdq1DmaComplete : 5;
161 u32 Cmdq1CreditReturn : 5;
162 u32 Qsleeping : 4;
163} ;
164#endif
165
166/*
167 * SW Context Command and Freelist Queue Descriptors
168 */
169struct cmdQ_ce {
170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len);
8199d3a7
CL
173};
174
175struct freelQ_ce {
176 struct sk_buff *skb;
177 DECLARE_PCI_UNMAP_ADDR(dma_addr);
178 DECLARE_PCI_UNMAP_LEN(dma_len);
179};
180
181/*
559fb51b 182 * SW command, freelist and response rings
8199d3a7
CL
183 */
184struct cmdQ {
559fb51b
SB
185 unsigned long status; /* HW DMA fetch status */
186 unsigned int in_use; /* # of in-use command descriptors */
187 unsigned int size; /* # of descriptors */
f1d3d38a
SH
188 unsigned int processed; /* total # of descs HW has processed */
189 unsigned int cleaned; /* total # of descs SW has reclaimed */
190 unsigned int stop_thres; /* SW TX queue suspend threshold */
559fb51b
SB
191 u16 pidx; /* producer index (SW) */
192 u16 cidx; /* consumer index (HW) */
193 u8 genbit; /* current generation (=valid) bit */
f1d3d38a 194 u8 sop; /* is next entry start of packet? */
559fb51b
SB
195 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */
559fb51b 197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
f1d3d38a 198 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
8199d3a7
CL
199};
200
201struct freelQ {
559fb51b
SB
202 unsigned int credits; /* # of available RX buffers */
203 unsigned int size; /* free list capacity */
204 u16 pidx; /* producer index (SW) */
205 u16 cidx; /* consumer index (HW) */
8199d3a7 206 u16 rx_buffer_size; /* Buffer size on this free list */
f1d3d38a
SH
207 u16 dma_offset; /* DMA offset to align IP headers */
208 u16 recycleq_idx; /* skb recycle q to use */
559fb51b
SB
209 u8 genbit; /* current generation (=valid) bit */
210 struct freelQ_e *entries; /* HW freelist descriptor Q */
211 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
212 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
8199d3a7
CL
213};
214
215struct respQ {
559fb51b
SB
216 unsigned int credits; /* credits to be returned to SGE */
217 unsigned int size; /* # of response Q descriptors */
218 u16 cidx; /* consumer index (SW) */
219 u8 genbit; /* current generation(=valid) bit */
8199d3a7 220 struct respQ_e *entries; /* HW response descriptor Q */
559fb51b
SB
221 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
222};
223
224/* Bit flags for cmdQ.status */
225enum {
226 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
227 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
8199d3a7
CL
228};
229
f1d3d38a
SH
230/* T204 TX SW scheduler */
231
232/* Per T204 TX port */
233struct sched_port {
234 unsigned int avail; /* available bits - quota */
235 unsigned int drain_bits_per_1024ns; /* drain rate */
236 unsigned int speed; /* drain rate, mbps */
237 unsigned int mtu; /* mtu size */
238 struct sk_buff_head skbq; /* pending skbs */
239};
240
241/* Per T204 device */
242struct sched {
243 ktime_t last_updated; /* last time quotas were computed */
244 unsigned int max_avail; /* max bits to be sent to any port */
245 unsigned int port; /* port index (round robin ports) */
246 unsigned int num; /* num skbs in per port queues */
247 struct sched_port p[MAX_NPORTS];
248 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
249};
250static void restart_sched(unsigned long);
251
252
8199d3a7
CL
253/*
254 * Main SGE data structure
255 *
256 * Interrupts are handled by a single CPU and it is likely that on a MP system
257 * the application is migrated to another CPU. In that scenario, we try to
258 * seperate the RX(in irq context) and TX state in order to decrease memory
259 * contention.
260 */
261struct sge {
262 struct adapter *adapter; /* adapter backpointer */
559fb51b
SB
263 struct net_device *netdev; /* netdevice backpointer */
264 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
265 struct respQ respQ; /* response Q */
266 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
8199d3a7
CL
267 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
268 unsigned int jumbo_fl; /* jumbo freelist Q index */
559fb51b 269 unsigned int intrtimer_nres; /* no-resource interrupt timer */
f1d3d38a 270 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
559fb51b
SB
271 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
272 struct timer_list espibug_timer;
f1d3d38a
SH
273 unsigned long espibug_timeout;
274 struct sk_buff *espibug_skb[MAX_NPORTS];
559fb51b
SB
275 u32 sge_control; /* shadow value of sge control reg */
276 struct sge_intr_counts stats;
56f643c2 277 struct sge_port_stats *port_stats[MAX_NPORTS];
f1d3d38a 278 struct sched *tx_sched;
559fb51b 279 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
8199d3a7
CL
280};
281
f1d3d38a
SH
282/*
283 * stop tasklet and free all pending skb's
284 */
285static void tx_sched_stop(struct sge *sge)
286{
287 struct sched *s = sge->tx_sched;
288 int i;
289
290 tasklet_kill(&s->sched_tsk);
291
292 for (i = 0; i < MAX_NPORTS; i++)
293 __skb_queue_purge(&s->p[s->port].skbq);
294}
295
296/*
297 * t1_sched_update_parms() is called when the MTU or link speed changes. It
298 * re-computes scheduler parameters to scope with the change.
299 */
300unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
301 unsigned int mtu, unsigned int speed)
302{
303 struct sched *s = sge->tx_sched;
304 struct sched_port *p = &s->p[port];
305 unsigned int max_avail_segs;
306
307 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
308 if (speed)
309 p->speed = speed;
310 if (mtu)
311 p->mtu = mtu;
312
313 if (speed || mtu) {
314 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
315 do_div(drain, (p->mtu + 50) * 1000);
316 p->drain_bits_per_1024ns = (unsigned int) drain;
317
318 if (p->speed < 1000)
319 p->drain_bits_per_1024ns =
320 90 * p->drain_bits_per_1024ns / 100;
321 }
322
323 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
324 p->drain_bits_per_1024ns -= 16;
325 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
326 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
327 } else {
328 s->max_avail = 16384;
329 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
330 }
331
332 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
333 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
334 p->speed, s->max_avail, max_avail_segs,
335 p->drain_bits_per_1024ns);
336
337 return max_avail_segs * (p->mtu - 40);
338}
339
340/*
341 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
342 * data that can be pushed per port.
343 */
344void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
345{
346 struct sched *s = sge->tx_sched;
347 unsigned int i;
348
349 s->max_avail = val;
350 for (i = 0; i < MAX_NPORTS; i++)
351 t1_sched_update_parms(sge, i, 0, 0);
352}
353
354/*
355 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
356 * is draining.
357 */
358void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
359 unsigned int val)
360{
361 struct sched *s = sge->tx_sched;
362 struct sched_port *p = &s->p[port];
363 p->drain_bits_per_1024ns = val * 1024 / 1000;
364 t1_sched_update_parms(sge, port, 0, 0);
365}
366
367
368/*
369 * get_clock() implements a ns clock (see ktime_get)
370 */
371static inline ktime_t get_clock(void)
372{
373 struct timespec ts;
374
375 ktime_get_ts(&ts);
376 return timespec_to_ktime(ts);
377}
378
379/*
380 * tx_sched_init() allocates resources and does basic initialization.
381 */
382static int tx_sched_init(struct sge *sge)
383{
384 struct sched *s;
385 int i;
386
387 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
388 if (!s)
389 return -ENOMEM;
390
391 pr_debug("tx_sched_init\n");
392 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
393 sge->tx_sched = s;
394
395 for (i = 0; i < MAX_NPORTS; i++) {
396 skb_queue_head_init(&s->p[i].skbq);
397 t1_sched_update_parms(sge, i, 1500, 1000);
398 }
399
400 return 0;
401}
402
403/*
404 * sched_update_avail() computes the delta since the last time it was called
405 * and updates the per port quota (number of bits that can be sent to the any
406 * port).
407 */
408static inline int sched_update_avail(struct sge *sge)
409{
410 struct sched *s = sge->tx_sched;
411 ktime_t now = get_clock();
412 unsigned int i;
413 long long delta_time_ns;
414
415 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
416
417 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
418 if (delta_time_ns < 15000)
419 return 0;
420
421 for (i = 0; i < MAX_NPORTS; i++) {
422 struct sched_port *p = &s->p[i];
423 unsigned int delta_avail;
424
425 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
426 p->avail = min(p->avail + delta_avail, s->max_avail);
427 }
428
429 s->last_updated = now;
430
431 return 1;
432}
433
434/*
435 * sched_skb() is called from two different places. In the tx path, any
436 * packet generating load on an output port will call sched_skb()
437 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
438 * context (skb == NULL).
439 * The scheduler only returns a skb (which will then be sent) if the
440 * length of the skb is <= the current quota of the output port.
441 */
442static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
443 unsigned int credits)
444{
445 struct sched *s = sge->tx_sched;
446 struct sk_buff_head *skbq;
447 unsigned int i, len, update = 1;
448
449 pr_debug("sched_skb %p\n", skb);
450 if (!skb) {
451 if (!s->num)
452 return NULL;
453 } else {
454 skbq = &s->p[skb->dev->if_port].skbq;
455 __skb_queue_tail(skbq, skb);
456 s->num++;
457 skb = NULL;
458 }
459
460 if (credits < MAX_SKB_FRAGS + 1)
461 goto out;
462
463 again:
464 for (i = 0; i < MAX_NPORTS; i++) {
465 s->port = ++s->port & (MAX_NPORTS - 1);
466 skbq = &s->p[s->port].skbq;
467
468 skb = skb_peek(skbq);
469
470 if (!skb)
471 continue;
472
473 len = skb->len;
474 if (len <= s->p[s->port].avail) {
475 s->p[s->port].avail -= len;
476 s->num--;
477 __skb_unlink(skb, skbq);
478 goto out;
479 }
480 skb = NULL;
481 }
482
483 if (update-- && sched_update_avail(sge))
484 goto again;
485
486 out:
487 /* If there are more pending skbs, we use the hardware to schedule us
488 * again.
489 */
490 if (s->num && !skb) {
491 struct cmdQ *q = &sge->cmdQ[0];
492 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
493 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
494 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
495 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
496 }
497 }
498 pr_debug("sched_skb ret %p\n", skb);
499
500 return skb;
501}
502
8199d3a7
CL
503/*
504 * PIO to indicate that memory mapped Q contains valid descriptor(s).
505 */
559fb51b 506static inline void doorbell_pio(struct adapter *adapter, u32 val)
8199d3a7
CL
507{
508 wmb();
559fb51b 509 writel(val, adapter->regs + A_SG_DOORBELL);
8199d3a7
CL
510}
511
512/*
513 * Frees all RX buffers on the freelist Q. The caller must make sure that
514 * the SGE is turned off before calling this function.
515 */
559fb51b 516static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
8199d3a7 517{
559fb51b 518 unsigned int cidx = q->cidx;
8199d3a7 519
559fb51b
SB
520 while (q->credits--) {
521 struct freelQ_ce *ce = &q->centries[cidx];
8199d3a7
CL
522
523 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
524 pci_unmap_len(ce, dma_len),
525 PCI_DMA_FROMDEVICE);
526 dev_kfree_skb(ce->skb);
527 ce->skb = NULL;
559fb51b 528 if (++cidx == q->size)
8199d3a7
CL
529 cidx = 0;
530 }
531}
532
533/*
534 * Free RX free list and response queue resources.
535 */
536static void free_rx_resources(struct sge *sge)
537{
538 struct pci_dev *pdev = sge->adapter->pdev;
539 unsigned int size, i;
540
541 if (sge->respQ.entries) {
559fb51b 542 size = sizeof(struct respQ_e) * sge->respQ.size;
8199d3a7
CL
543 pci_free_consistent(pdev, size, sge->respQ.entries,
544 sge->respQ.dma_addr);
545 }
546
547 for (i = 0; i < SGE_FREELQ_N; i++) {
559fb51b 548 struct freelQ *q = &sge->freelQ[i];
8199d3a7 549
559fb51b
SB
550 if (q->centries) {
551 free_freelQ_buffers(pdev, q);
552 kfree(q->centries);
8199d3a7 553 }
559fb51b
SB
554 if (q->entries) {
555 size = sizeof(struct freelQ_e) * q->size;
556 pci_free_consistent(pdev, size, q->entries,
557 q->dma_addr);
8199d3a7
CL
558 }
559 }
560}
561
562/*
563 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
559fb51b 564 * response queue.
8199d3a7
CL
565 */
566static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
567{
568 struct pci_dev *pdev = sge->adapter->pdev;
569 unsigned int size, i;
570
571 for (i = 0; i < SGE_FREELQ_N; i++) {
559fb51b
SB
572 struct freelQ *q = &sge->freelQ[i];
573
574 q->genbit = 1;
575 q->size = p->freelQ_size[i];
576 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
577 size = sizeof(struct freelQ_e) * q->size;
578 q->entries = (struct freelQ_e *)
579 pci_alloc_consistent(pdev, size, &q->dma_addr);
580 if (!q->entries)
8199d3a7 581 goto err_no_mem;
559fb51b
SB
582 memset(q->entries, 0, size);
583 size = sizeof(struct freelQ_ce) * q->size;
cbee9f91 584 q->centries = kzalloc(size, GFP_KERNEL);
559fb51b 585 if (!q->centries)
8199d3a7
CL
586 goto err_no_mem;
587 }
588
589 /*
590 * Calculate the buffer sizes for the two free lists. FL0 accommodates
591 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
592 * including all the sk_buff overhead.
593 *
594 * Note: For T2 FL0 and FL1 are reversed.
595 */
596 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
597 sizeof(struct cpl_rx_data) +
598 sge->freelQ[!sge->jumbo_fl].dma_offset;
f1d3d38a
SH
599
600 size = (16 * 1024) -
601 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
602
603 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
8199d3a7 604
559fb51b
SB
605 /*
606 * Setup which skb recycle Q should be used when recycling buffers from
607 * each free list.
608 */
609 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
610 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
611
8199d3a7 612 sge->respQ.genbit = 1;
559fb51b
SB
613 sge->respQ.size = SGE_RESPQ_E_N;
614 sge->respQ.credits = 0;
615 size = sizeof(struct respQ_e) * sge->respQ.size;
8199d3a7
CL
616 sge->respQ.entries = (struct respQ_e *)
617 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
618 if (!sge->respQ.entries)
619 goto err_no_mem;
620 memset(sge->respQ.entries, 0, size);
621 return 0;
622
623err_no_mem:
624 free_rx_resources(sge);
625 return -ENOMEM;
626}
627
628/*
559fb51b 629 * Reclaims n TX descriptors and frees the buffers associated with them.
8199d3a7 630 */
559fb51b 631static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
8199d3a7 632{
559fb51b 633 struct cmdQ_ce *ce;
8199d3a7 634 struct pci_dev *pdev = sge->adapter->pdev;
559fb51b 635 unsigned int cidx = q->cidx;
8199d3a7 636
559fb51b
SB
637 q->in_use -= n;
638 ce = &q->centries[cidx];
639 while (n--) {
f1d3d38a
SH
640 if (q->sop) {
641 if (likely(pci_unmap_len(ce, dma_len))) {
642 pci_unmap_single(pdev,
643 pci_unmap_addr(ce, dma_addr),
644 pci_unmap_len(ce, dma_len),
645 PCI_DMA_TODEVICE);
646 q->sop = 0;
647 }
648 } else {
649 if (likely(pci_unmap_len(ce, dma_len))) {
650 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
651 pci_unmap_len(ce, dma_len),
652 PCI_DMA_TODEVICE);
653 }
654 }
559fb51b 655 if (ce->skb) {
f1d3d38a 656 dev_kfree_skb_any(ce->skb);
559fb51b
SB
657 q->sop = 1;
658 }
8199d3a7 659 ce++;
559fb51b 660 if (++cidx == q->size) {
8199d3a7 661 cidx = 0;
559fb51b 662 ce = q->centries;
8199d3a7
CL
663 }
664 }
559fb51b 665 q->cidx = cidx;
8199d3a7
CL
666}
667
668/*
669 * Free TX resources.
670 *
671 * Assumes that SGE is stopped and all interrupts are disabled.
672 */
673static void free_tx_resources(struct sge *sge)
674{
675 struct pci_dev *pdev = sge->adapter->pdev;
676 unsigned int size, i;
677
678 for (i = 0; i < SGE_CMDQ_N; i++) {
559fb51b 679 struct cmdQ *q = &sge->cmdQ[i];
8199d3a7 680
559fb51b
SB
681 if (q->centries) {
682 if (q->in_use)
683 free_cmdQ_buffers(sge, q, q->in_use);
684 kfree(q->centries);
8199d3a7 685 }
559fb51b
SB
686 if (q->entries) {
687 size = sizeof(struct cmdQ_e) * q->size;
688 pci_free_consistent(pdev, size, q->entries,
689 q->dma_addr);
8199d3a7
CL
690 }
691 }
692}
693
694/*
695 * Allocates basic TX resources, consisting of memory mapped command Qs.
696 */
697static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
698{
699 struct pci_dev *pdev = sge->adapter->pdev;
700 unsigned int size, i;
701
702 for (i = 0; i < SGE_CMDQ_N; i++) {
559fb51b
SB
703 struct cmdQ *q = &sge->cmdQ[i];
704
705 q->genbit = 1;
706 q->sop = 1;
707 q->size = p->cmdQ_size[i];
708 q->in_use = 0;
709 q->status = 0;
710 q->processed = q->cleaned = 0;
711 q->stop_thres = 0;
712 spin_lock_init(&q->lock);
713 size = sizeof(struct cmdQ_e) * q->size;
714 q->entries = (struct cmdQ_e *)
715 pci_alloc_consistent(pdev, size, &q->dma_addr);
716 if (!q->entries)
8199d3a7 717 goto err_no_mem;
559fb51b
SB
718 memset(q->entries, 0, size);
719 size = sizeof(struct cmdQ_ce) * q->size;
cbee9f91 720 q->centries = kzalloc(size, GFP_KERNEL);
559fb51b 721 if (!q->centries)
8199d3a7
CL
722 goto err_no_mem;
723 }
724
559fb51b
SB
725 /*
726 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
727 * only. For queue 0 set the stop threshold so we can handle one more
728 * packet from each port, plus reserve an additional 24 entries for
729 * Ethernet packets only. Queue 1 never suspends nor do we reserve
730 * space for Ethernet packets.
731 */
732 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
733 (MAX_SKB_FRAGS + 1);
8199d3a7
CL
734 return 0;
735
736err_no_mem:
737 free_tx_resources(sge);
738 return -ENOMEM;
739}
740
741static inline void setup_ring_params(struct adapter *adapter, u64 addr,
742 u32 size, int base_reg_lo,
743 int base_reg_hi, int size_reg)
744{
559fb51b
SB
745 writel((u32)addr, adapter->regs + base_reg_lo);
746 writel(addr >> 32, adapter->regs + base_reg_hi);
747 writel(size, adapter->regs + size_reg);
8199d3a7
CL
748}
749
750/*
751 * Enable/disable VLAN acceleration.
752 */
753void t1_set_vlan_accel(struct adapter *adapter, int on_off)
754{
755 struct sge *sge = adapter->sge;
756
757 sge->sge_control &= ~F_VLAN_XTRACT;
758 if (on_off)
759 sge->sge_control |= F_VLAN_XTRACT;
760 if (adapter->open_device_map) {
559fb51b 761 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
f1d3d38a 762 readl(adapter->regs + A_SG_CONTROL); /* flush */
8199d3a7
CL
763 }
764}
765
8199d3a7
CL
766/*
767 * Programs the various SGE registers. However, the engine is not yet enabled,
768 * but sge->sge_control is setup and ready to go.
769 */
770static void configure_sge(struct sge *sge, struct sge_params *p)
771{
772 struct adapter *ap = sge->adapter;
559fb51b
SB
773
774 writel(0, ap->regs + A_SG_CONTROL);
775 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
8199d3a7 776 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
559fb51b 777 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
8199d3a7
CL
778 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
779 setup_ring_params(ap, sge->freelQ[0].dma_addr,
559fb51b 780 sge->freelQ[0].size, A_SG_FL0BASELWR,
8199d3a7
CL
781 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
782 setup_ring_params(ap, sge->freelQ[1].dma_addr,
559fb51b 783 sge->freelQ[1].size, A_SG_FL1BASELWR,
8199d3a7
CL
784 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
785
786 /* The threshold comparison uses <. */
559fb51b 787 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
8199d3a7 788
559fb51b
SB
789 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
790 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
791 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
8199d3a7
CL
792
793 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
794 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
795 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
796 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
797
798#if defined(__BIG_ENDIAN_BITFIELD)
799 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
800#endif
801
559fb51b
SB
802 /* Initialize no-resource timer */
803 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
804
805 t1_sge_set_coalesce_params(sge, p);
8199d3a7
CL
806}
807
808/*
809 * Return the payload capacity of the jumbo free-list buffers.
810 */
811static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
812{
813 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
559fb51b
SB
814 sge->freelQ[sge->jumbo_fl].dma_offset -
815 sizeof(struct cpl_rx_data);
8199d3a7
CL
816}
817
818/*
819 * Frees all SGE related resources and the sge structure itself
820 */
821void t1_sge_destroy(struct sge *sge)
822{
56f643c2
SH
823 int i;
824
825 for_each_port(sge->adapter, i)
826 free_percpu(sge->port_stats[i]);
827
f1d3d38a 828 kfree(sge->tx_sched);
8199d3a7
CL
829 free_tx_resources(sge);
830 free_rx_resources(sge);
831 kfree(sge);
832}
833
834/*
835 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
836 * context Q) until the Q is full or alloc_skb fails.
837 *
838 * It is possible that the generation bits already match, indicating that the
839 * buffer is already valid and nothing needs to be done. This happens when we
840 * copied a received buffer into a new sk_buff during the interrupt processing.
841 *
842 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
843 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
844 * aligned.
845 */
559fb51b 846static void refill_free_list(struct sge *sge, struct freelQ *q)
8199d3a7
CL
847{
848 struct pci_dev *pdev = sge->adapter->pdev;
559fb51b
SB
849 struct freelQ_ce *ce = &q->centries[q->pidx];
850 struct freelQ_e *e = &q->entries[q->pidx];
851 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
8199d3a7
CL
852
853
559fb51b
SB
854 while (q->credits < q->size) {
855 struct sk_buff *skb;
856 dma_addr_t mapping;
8199d3a7 857
559fb51b
SB
858 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
859 if (!skb)
860 break;
861
862 skb_reserve(skb, q->dma_offset);
863 mapping = pci_map_single(pdev, skb->data, dma_len,
864 PCI_DMA_FROMDEVICE);
865 ce->skb = skb;
866 pci_unmap_addr_set(ce, dma_addr, mapping);
867 pci_unmap_len_set(ce, dma_len, dma_len);
868 e->addr_lo = (u32)mapping;
869 e->addr_hi = (u64)mapping >> 32;
870 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
871 wmb();
872 e->gen2 = V_CMD_GEN2(q->genbit);
8199d3a7
CL
873
874 e++;
875 ce++;
559fb51b
SB
876 if (++q->pidx == q->size) {
877 q->pidx = 0;
878 q->genbit ^= 1;
879 ce = q->centries;
880 e = q->entries;
8199d3a7 881 }
559fb51b 882 q->credits++;
8199d3a7
CL
883 }
884
885}
886
887/*
559fb51b
SB
888 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
889 * of both rings, we go into 'few interrupt mode' in order to give the system
890 * time to free up resources.
8199d3a7
CL
891 */
892static void freelQs_empty(struct sge *sge)
893{
559fb51b
SB
894 struct adapter *adapter = sge->adapter;
895 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
8199d3a7
CL
896 u32 irqholdoff_reg;
897
898 refill_free_list(sge, &sge->freelQ[0]);
899 refill_free_list(sge, &sge->freelQ[1]);
900
559fb51b
SB
901 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
902 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
8199d3a7 903 irq_reg |= F_FL_EXHAUSTED;
559fb51b 904 irqholdoff_reg = sge->fixed_intrtimer;
8199d3a7
CL
905 } else {
906 /* Clear the F_FL_EXHAUSTED interrupts for now */
907 irq_reg &= ~F_FL_EXHAUSTED;
908 irqholdoff_reg = sge->intrtimer_nres;
909 }
559fb51b
SB
910 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
911 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
8199d3a7
CL
912
913 /* We reenable the Qs to force a freelist GTS interrupt later */
559fb51b 914 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
8199d3a7
CL
915}
916
917#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
918#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
919#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
920 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
921
922/*
923 * Disable SGE Interrupts
924 */
925void t1_sge_intr_disable(struct sge *sge)
926{
559fb51b 927 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
8199d3a7 928
559fb51b
SB
929 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
930 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
8199d3a7
CL
931}
932
933/*
934 * Enable SGE interrupts.
935 */
936void t1_sge_intr_enable(struct sge *sge)
937{
938 u32 en = SGE_INT_ENABLE;
559fb51b 939 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
8199d3a7
CL
940
941 if (sge->adapter->flags & TSO_CAPABLE)
942 en &= ~F_PACKET_TOO_BIG;
559fb51b
SB
943 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
944 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
8199d3a7
CL
945}
946
947/*
948 * Clear SGE interrupts.
949 */
950void t1_sge_intr_clear(struct sge *sge)
951{
559fb51b
SB
952 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
953 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
8199d3a7
CL
954}
955
956/*
957 * SGE 'Error' interrupt handler
958 */
959int t1_sge_intr_error_handler(struct sge *sge)
960{
961 struct adapter *adapter = sge->adapter;
559fb51b 962 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
8199d3a7
CL
963
964 if (adapter->flags & TSO_CAPABLE)
965 cause &= ~F_PACKET_TOO_BIG;
966 if (cause & F_RESPQ_EXHAUSTED)
559fb51b 967 sge->stats.respQ_empty++;
8199d3a7 968 if (cause & F_RESPQ_OVERFLOW) {
559fb51b 969 sge->stats.respQ_overflow++;
8199d3a7
CL
970 CH_ALERT("%s: SGE response queue overflow\n",
971 adapter->name);
972 }
973 if (cause & F_FL_EXHAUSTED) {
559fb51b 974 sge->stats.freelistQ_empty++;
8199d3a7
CL
975 freelQs_empty(sge);
976 }
977 if (cause & F_PACKET_TOO_BIG) {
559fb51b 978 sge->stats.pkt_too_big++;
8199d3a7
CL
979 CH_ALERT("%s: SGE max packet size exceeded\n",
980 adapter->name);
981 }
982 if (cause & F_PACKET_MISMATCH) {
559fb51b 983 sge->stats.pkt_mismatch++;
8199d3a7
CL
984 CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
985 }
986 if (cause & SGE_INT_FATAL)
987 t1_fatal_err(adapter);
988
559fb51b 989 writel(cause, adapter->regs + A_SG_INT_CAUSE);
8199d3a7
CL
990 return 0;
991}
992
56f643c2 993const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
559fb51b
SB
994{
995 return &sge->stats;
996}
997
56f643c2
SH
998void t1_sge_get_port_stats(const struct sge *sge, int port,
999 struct sge_port_stats *ss)
559fb51b 1000{
56f643c2
SH
1001 int cpu;
1002
1003 memset(ss, 0, sizeof(*ss));
1004 for_each_possible_cpu(cpu) {
1005 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
1006
1007 ss->rx_packets += st->rx_packets;
1008 ss->rx_cso_good += st->rx_cso_good;
1009 ss->tx_packets += st->tx_packets;
1010 ss->tx_cso += st->tx_cso;
1011 ss->tx_tso += st->tx_tso;
1012 ss->vlan_xtract += st->vlan_xtract;
1013 ss->vlan_insert += st->vlan_insert;
1014 }
559fb51b
SB
1015}
1016
1017/**
1018 * recycle_fl_buf - recycle a free list buffer
1019 * @fl: the free list
1020 * @idx: index of buffer to recycle
8199d3a7 1021 *
559fb51b
SB
1022 * Recycles the specified buffer on the given free list by adding it at
1023 * the next available slot on the list.
8199d3a7 1024 */
559fb51b 1025static void recycle_fl_buf(struct freelQ *fl, int idx)
8199d3a7 1026{
559fb51b
SB
1027 struct freelQ_e *from = &fl->entries[idx];
1028 struct freelQ_e *to = &fl->entries[fl->pidx];
8199d3a7 1029
559fb51b
SB
1030 fl->centries[fl->pidx] = fl->centries[idx];
1031 to->addr_lo = from->addr_lo;
1032 to->addr_hi = from->addr_hi;
1033 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1034 wmb();
1035 to->gen2 = V_CMD_GEN2(fl->genbit);
1036 fl->credits++;
8199d3a7 1037
559fb51b
SB
1038 if (++fl->pidx == fl->size) {
1039 fl->pidx = 0;
1040 fl->genbit ^= 1;
8199d3a7 1041 }
559fb51b 1042}
8199d3a7 1043
559fb51b
SB
1044/**
1045 * get_packet - return the next ingress packet buffer
1046 * @pdev: the PCI device that received the packet
1047 * @fl: the SGE free list holding the packet
1048 * @len: the actual packet length, excluding any SGE padding
1049 * @dma_pad: padding at beginning of buffer left by SGE DMA
1050 * @skb_pad: padding to be used if the packet is copied
1051 * @copy_thres: length threshold under which a packet should be copied
1052 * @drop_thres: # of remaining buffers before we start dropping packets
1053 *
1054 * Get the next packet from a free list and complete setup of the
1055 * sk_buff. If the packet is small we make a copy and recycle the
1056 * original buffer, otherwise we use the original buffer itself. If a
1057 * positive drop threshold is supplied packets are dropped and their
1058 * buffers recycled if (a) the number of remaining buffers is under the
1059 * threshold and the packet is too big to copy, or (b) the packet should
1060 * be copied but there is no memory for the copy.
1061 */
1062static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1063 struct freelQ *fl, unsigned int len,
1064 int dma_pad, int skb_pad,
1065 unsigned int copy_thres,
1066 unsigned int drop_thres)
1067{
1068 struct sk_buff *skb;
1069 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1070
1071 if (len < copy_thres) {
1072 skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
1073 if (likely(skb != NULL)) {
1074 skb_reserve(skb, skb_pad);
1075 skb_put(skb, len);
1076 pci_dma_sync_single_for_cpu(pdev,
1077 pci_unmap_addr(ce, dma_addr),
1078 pci_unmap_len(ce, dma_len),
1079 PCI_DMA_FROMDEVICE);
1080 memcpy(skb->data, ce->skb->data + dma_pad, len);
1081 pci_dma_sync_single_for_device(pdev,
1082 pci_unmap_addr(ce, dma_addr),
1083 pci_unmap_len(ce, dma_len),
1084 PCI_DMA_FROMDEVICE);
1085 } else if (!drop_thres)
1086 goto use_orig_buf;
8199d3a7 1087
559fb51b
SB
1088 recycle_fl_buf(fl, fl->cidx);
1089 return skb;
8199d3a7
CL
1090 }
1091
559fb51b
SB
1092 if (fl->credits < drop_thres) {
1093 recycle_fl_buf(fl, fl->cidx);
1094 return NULL;
1095 }
8199d3a7 1096
559fb51b
SB
1097use_orig_buf:
1098 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
1099 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1100 skb = ce->skb;
1101 skb_reserve(skb, dma_pad);
1102 skb_put(skb, len);
1103 return skb;
1104}
8199d3a7 1105
559fb51b
SB
1106/**
1107 * unexpected_offload - handle an unexpected offload packet
1108 * @adapter: the adapter
1109 * @fl: the free list that received the packet
1110 *
1111 * Called when we receive an unexpected offload packet (e.g., the TOE
1112 * function is disabled or the card is a NIC). Prints a message and
1113 * recycles the buffer.
1114 */
1115static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1116{
1117 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1118 struct sk_buff *skb = ce->skb;
1119
1120 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
1121 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1122 CH_ERR("%s: unexpected offload packet, cmd %u\n",
1123 adapter->name, *skb->data);
1124 recycle_fl_buf(fl, fl->cidx);
8199d3a7
CL
1125}
1126
f1d3d38a
SH
1127/*
1128 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1129 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1130 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1131 * Note that the *_large_page_tx_descs stuff will be optimized out when
1132 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1133 *
1134 * compute_large_page_descs() computes how many additional descriptors are
1135 * required to break down the stack's request.
1136 */
1137static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1138{
1139 unsigned int count = 0;
1140 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1141 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1142 unsigned int i, len = skb->len - skb->data_len;
1143 while (len > SGE_TX_DESC_MAX_PLEN) {
1144 count++;
1145 len -= SGE_TX_DESC_MAX_PLEN;
1146 }
1147 for (i = 0; nfrags--; i++) {
1148 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1149 len = frag->size;
1150 while (len > SGE_TX_DESC_MAX_PLEN) {
1151 count++;
1152 len -= SGE_TX_DESC_MAX_PLEN;
1153 }
1154 }
1155 }
1156 return count;
1157}
1158
1159/*
1160 * Write a cmdQ entry.
1161 *
1162 * Since this function writes the 'flags' field, it must not be used to
1163 * write the first cmdQ entry.
1164 */
1165static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1166 unsigned int len, unsigned int gen,
1167 unsigned int eop)
1168{
1169 if (unlikely(len > SGE_TX_DESC_MAX_PLEN))
1170 BUG();
1171 e->addr_lo = (u32)mapping;
1172 e->addr_hi = (u64)mapping >> 32;
1173 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1174 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1175}
1176
1177/*
1178 * See comment for previous function.
1179 *
1180 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1181 * *desc_len exceeds HW's capability.
1182 */
1183static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1184 struct cmdQ_e **e,
1185 struct cmdQ_ce **ce,
1186 unsigned int *gen,
1187 dma_addr_t *desc_mapping,
1188 unsigned int *desc_len,
1189 unsigned int nfrags,
1190 struct cmdQ *q)
1191{
1192 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1193 struct cmdQ_e *e1 = *e;
1194 struct cmdQ_ce *ce1 = *ce;
1195
1196 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1197 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1198 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1199 *gen, nfrags == 0 && *desc_len == 0);
1200 ce1->skb = NULL;
1201 pci_unmap_len_set(ce1, dma_len, 0);
1202 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1203 if (*desc_len) {
1204 ce1++;
1205 e1++;
1206 if (++pidx == q->size) {
1207 pidx = 0;
1208 *gen ^= 1;
1209 ce1 = q->centries;
1210 e1 = q->entries;
1211 }
1212 }
1213 }
1214 *e = e1;
1215 *ce = ce1;
1216 }
1217 return pidx;
1218}
1219
8199d3a7 1220/*
559fb51b
SB
1221 * Write the command descriptors to transmit the given skb starting at
1222 * descriptor pidx with the given generation.
8199d3a7 1223 */
559fb51b
SB
1224static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1225 unsigned int pidx, unsigned int gen,
1226 struct cmdQ *q)
8199d3a7 1227{
f1d3d38a 1228 dma_addr_t mapping, desc_mapping;
559fb51b
SB
1229 struct cmdQ_e *e, *e1;
1230 struct cmdQ_ce *ce;
f1d3d38a
SH
1231 unsigned int i, flags, first_desc_len, desc_len,
1232 nfrags = skb_shinfo(skb)->nr_frags;
559fb51b 1233
f1d3d38a 1234 e = e1 = &q->entries[pidx];
559fb51b 1235 ce = &q->centries[pidx];
f1d3d38a
SH
1236
1237 mapping = pci_map_single(adapter->pdev, skb->data,
1238 skb->len - skb->data_len, PCI_DMA_TODEVICE);
1239
1240 desc_mapping = mapping;
1241 desc_len = skb->len - skb->data_len;
1242
1243 flags = F_CMD_DATAVALID | F_CMD_SOP |
1244 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1245 V_CMD_GEN2(gen);
1246 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1247 desc_len : SGE_TX_DESC_MAX_PLEN;
1248 e->addr_lo = (u32)desc_mapping;
1249 e->addr_hi = (u64)desc_mapping >> 32;
1250 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1251 ce->skb = NULL;
1252 pci_unmap_len_set(ce, dma_len, 0);
1253
1254 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1255 desc_len > SGE_TX_DESC_MAX_PLEN) {
1256 desc_mapping += first_desc_len;
1257 desc_len -= first_desc_len;
1258 e1++;
1259 ce++;
1260 if (++pidx == q->size) {
1261 pidx = 0;
1262 gen ^= 1;
1263 e1 = q->entries;
1264 ce = q->centries;
1265 }
1266 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1267 &desc_mapping, &desc_len,
1268 nfrags, q);
1269
1270 if (likely(desc_len))
1271 write_tx_desc(e1, desc_mapping, desc_len, gen,
1272 nfrags == 0);
1273 }
1274
559fb51b
SB
1275 ce->skb = NULL;
1276 pci_unmap_addr_set(ce, dma_addr, mapping);
1277 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
8199d3a7 1278
f1d3d38a 1279 for (i = 0; nfrags--; i++) {
559fb51b 1280 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
559fb51b 1281 e1++;
f1d3d38a 1282 ce++;
559fb51b
SB
1283 if (++pidx == q->size) {
1284 pidx = 0;
1285 gen ^= 1;
559fb51b 1286 e1 = q->entries;
f1d3d38a 1287 ce = q->centries;
8199d3a7 1288 }
8199d3a7 1289
559fb51b
SB
1290 mapping = pci_map_page(adapter->pdev, frag->page,
1291 frag->page_offset, frag->size,
1292 PCI_DMA_TODEVICE);
f1d3d38a
SH
1293 desc_mapping = mapping;
1294 desc_len = frag->size;
1295
1296 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1297 &desc_mapping, &desc_len,
1298 nfrags, q);
1299 if (likely(desc_len))
1300 write_tx_desc(e1, desc_mapping, desc_len, gen,
1301 nfrags == 0);
559fb51b
SB
1302 ce->skb = NULL;
1303 pci_unmap_addr_set(ce, dma_addr, mapping);
1304 pci_unmap_len_set(ce, dma_len, frag->size);
8199d3a7 1305 }
559fb51b
SB
1306 ce->skb = skb;
1307 wmb();
1308 e->flags = flags;
1309}
8199d3a7 1310
559fb51b
SB
1311/*
1312 * Clean up completed Tx buffers.
1313 */
1314static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1315{
1316 unsigned int reclaim = q->processed - q->cleaned;
8199d3a7 1317
559fb51b 1318 if (reclaim) {
f1d3d38a
SH
1319 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1320 q->processed, q->cleaned);
559fb51b
SB
1321 free_cmdQ_buffers(sge, q, reclaim);
1322 q->cleaned += reclaim;
8199d3a7 1323 }
559fb51b 1324}
8199d3a7 1325
f1d3d38a
SH
1326/*
1327 * Called from tasklet. Checks the scheduler for any
1328 * pending skbs that can be sent.
1329 */
1330static void restart_sched(unsigned long arg)
1331{
1332 struct sge *sge = (struct sge *) arg;
1333 struct adapter *adapter = sge->adapter;
1334 struct cmdQ *q = &sge->cmdQ[0];
1335 struct sk_buff *skb;
1336 unsigned int credits, queued_skb = 0;
1337
1338 spin_lock(&q->lock);
1339 reclaim_completed_tx(sge, q);
1340
1341 credits = q->size - q->in_use;
1342 pr_debug("restart_sched credits=%d\n", credits);
1343 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1344 unsigned int genbit, pidx, count;
1345 count = 1 + skb_shinfo(skb)->nr_frags;
1346 count += compute_large_page_tx_descs(skb);
1347 q->in_use += count;
1348 genbit = q->genbit;
1349 pidx = q->pidx;
1350 q->pidx += count;
1351 if (q->pidx >= q->size) {
1352 q->pidx -= q->size;
1353 q->genbit ^= 1;
1354 }
1355 write_tx_descs(adapter, skb, pidx, genbit, q);
1356 credits = q->size - q->in_use;
1357 queued_skb = 1;
1358 }
1359
1360 if (queued_skb) {
1361 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1362 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1363 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1364 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1365 }
1366 }
1367 spin_unlock(&q->lock);
1368}
8199d3a7 1369
559fb51b
SB
1370/**
1371 * sge_rx - process an ingress ethernet packet
1372 * @sge: the sge structure
1373 * @fl: the free list that contains the packet buffer
1374 * @len: the packet length
8199d3a7 1375 *
559fb51b 1376 * Process an ingress ethernet pakcet and deliver it to the stack.
8199d3a7 1377 */
559fb51b 1378static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
8199d3a7 1379{
559fb51b
SB
1380 struct sk_buff *skb;
1381 struct cpl_rx_pkt *p;
1382 struct adapter *adapter = sge->adapter;
56f643c2 1383 struct sge_port_stats *st;
8199d3a7 1384
559fb51b
SB
1385 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
1386 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
1387 SGE_RX_DROP_THRES);
56f643c2
SH
1388 if (unlikely(!skb)) {
1389 sge->stats.rx_drops++;
559fb51b 1390 return 0;
8199d3a7 1391 }
559fb51b
SB
1392
1393 p = (struct cpl_rx_pkt *)skb->data;
1394 skb_pull(skb, sizeof(*p));
f1d3d38a
SH
1395 if (p->iff >= adapter->params.nports) {
1396 kfree_skb(skb);
1397 return 0;
1398 }
1399
56f643c2 1400 skb->dev = adapter->port[p->iff].dev;
559fb51b 1401 skb->dev->last_rx = jiffies;
56f643c2
SH
1402 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
1403 st->rx_packets++;
1404
559fb51b
SB
1405 skb->protocol = eth_type_trans(skb, skb->dev);
1406 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
1407 skb->protocol == htons(ETH_P_IP) &&
1408 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
56f643c2 1409 ++st->rx_cso_good;
559fb51b
SB
1410 skb->ip_summed = CHECKSUM_UNNECESSARY;
1411 } else
1412 skb->ip_summed = CHECKSUM_NONE;
1413
1414 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
56f643c2 1415 st->vlan_xtract++;
7fe26a60 1416#ifdef CONFIG_CHELSIO_T1_NAPI
559fb51b
SB
1417 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1418 ntohs(p->vlan));
7fe26a60 1419#else
559fb51b
SB
1420 vlan_hwaccel_rx(skb, adapter->vlan_grp,
1421 ntohs(p->vlan));
7fe26a60
SH
1422#endif
1423 } else {
1424#ifdef CONFIG_CHELSIO_T1_NAPI
559fb51b 1425 netif_receive_skb(skb);
7fe26a60 1426#else
559fb51b 1427 netif_rx(skb);
7fe26a60
SH
1428#endif
1429 }
559fb51b 1430 return 0;
8199d3a7
CL
1431}
1432
1433/*
559fb51b 1434 * Returns true if a command queue has enough available descriptors that
8199d3a7
CL
1435 * we can resume Tx operation after temporarily disabling its packet queue.
1436 */
559fb51b 1437static inline int enough_free_Tx_descs(const struct cmdQ *q)
8199d3a7 1438{
559fb51b
SB
1439 unsigned int r = q->processed - q->cleaned;
1440
1441 return q->in_use - r < (q->size >> 1);
8199d3a7
CL
1442}
1443
1444/*
559fb51b
SB
1445 * Called when sufficient space has become available in the SGE command queues
1446 * after the Tx packet schedulers have been suspended to restart the Tx path.
8199d3a7 1447 */
559fb51b 1448static void restart_tx_queues(struct sge *sge)
8199d3a7 1449{
559fb51b 1450 struct adapter *adap = sge->adapter;
8199d3a7 1451
559fb51b
SB
1452 if (enough_free_Tx_descs(&sge->cmdQ[0])) {
1453 int i;
1454
1455 for_each_port(adap, i) {
1456 struct net_device *nd = adap->port[i].dev;
1457
1458 if (test_and_clear_bit(nd->if_port,
1459 &sge->stopped_tx_queues) &&
1460 netif_running(nd)) {
232a347a 1461 sge->stats.cmdQ_restarted[2]++;
559fb51b
SB
1462 netif_wake_queue(nd);
1463 }
1464 }
1465 }
1466}
1467
1468/*
1469 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1470 * information.
1471 */
1472static unsigned int update_tx_info(struct adapter *adapter,
1473 unsigned int flags,
1474 unsigned int pr0)
1475{
1476 struct sge *sge = adapter->sge;
1477 struct cmdQ *cmdq = &sge->cmdQ[0];
8199d3a7 1478
559fb51b 1479 cmdq->processed += pr0;
f1d3d38a
SH
1480 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1481 freelQs_empty(sge);
1482 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1483 }
559fb51b
SB
1484 if (flags & F_CMDQ0_ENABLE) {
1485 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
f1d3d38a 1486
559fb51b
SB
1487 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1488 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1489 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1490 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1491 }
f1d3d38a
SH
1492 if (sge->tx_sched)
1493 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1494
1495 flags &= ~F_CMDQ0_ENABLE;
559fb51b 1496 }
f1d3d38a 1497
559fb51b
SB
1498 if (unlikely(sge->stopped_tx_queues != 0))
1499 restart_tx_queues(sge);
8199d3a7 1500
559fb51b
SB
1501 return flags;
1502}
8199d3a7 1503
559fb51b
SB
1504/*
1505 * Process SGE responses, up to the supplied budget. Returns the number of
1506 * responses processed. A negative budget is effectively unlimited.
1507 */
1508static int process_responses(struct adapter *adapter, int budget)
1509{
1510 struct sge *sge = adapter->sge;
1511 struct respQ *q = &sge->respQ;
1512 struct respQ_e *e = &q->entries[q->cidx];
1513 int budget_left = budget;
1514 unsigned int flags = 0;
1515 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1516
1517
1518 while (likely(budget_left && e->GenerationBit == q->genbit)) {
1519 flags |= e->Qsleeping;
1520
1521 cmdq_processed[0] += e->Cmdq0CreditReturn;
1522 cmdq_processed[1] += e->Cmdq1CreditReturn;
1523
1524 /* We batch updates to the TX side to avoid cacheline
1525 * ping-pong of TX state information on MP where the sender
1526 * might run on a different CPU than this function...
1527 */
1528 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
1529 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1530 cmdq_processed[0] = 0;
1531 }
1532 if (unlikely(cmdq_processed[1] > 16)) {
1533 sge->cmdQ[1].processed += cmdq_processed[1];
1534 cmdq_processed[1] = 0;
8199d3a7
CL
1535 }
1536 if (likely(e->DataValid)) {
559fb51b
SB
1537 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1538
5d9428de 1539 BUG_ON(!e->Sop || !e->Eop);
559fb51b
SB
1540 if (unlikely(e->Offload))
1541 unexpected_offload(adapter, fl);
1542 else
1543 sge_rx(sge, fl, e->BufferLength);
1544
1545 /*
1546 * Note: this depends on each packet consuming a
1547 * single free-list buffer; cf. the BUG above.
1548 */
1549 if (++fl->cidx == fl->size)
1550 fl->cidx = 0;
1551 if (unlikely(--fl->credits <
1552 fl->size - SGE_FREEL_REFILL_THRESH))
1553 refill_free_list(sge, fl);
1554 } else
1555 sge->stats.pure_rsps++;
8199d3a7 1556
8199d3a7 1557 e++;
559fb51b
SB
1558 if (unlikely(++q->cidx == q->size)) {
1559 q->cidx = 0;
1560 q->genbit ^= 1;
1561 e = q->entries;
1562 }
1563 prefetch(e);
1564
1565 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1566 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1567 q->credits = 0;
8199d3a7 1568 }
559fb51b 1569 --budget_left;
8199d3a7
CL
1570 }
1571
559fb51b
SB
1572 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1573 sge->cmdQ[1].processed += cmdq_processed[1];
8199d3a7 1574
559fb51b
SB
1575 budget -= budget_left;
1576 return budget;
1577}
8199d3a7 1578
7fe26a60 1579#ifdef CONFIG_CHELSIO_T1_NAPI
559fb51b
SB
1580/*
1581 * A simpler version of process_responses() that handles only pure (i.e.,
1582 * non data-carrying) responses. Such respones are too light-weight to justify
1583 * calling a softirq when using NAPI, so we handle them specially in hard
1584 * interrupt context. The function is called with a pointer to a response,
1585 * which the caller must ensure is a valid pure response. Returns 1 if it
1586 * encounters a valid data-carrying response, 0 otherwise.
1587 */
1588static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1589{
1590 struct sge *sge = adapter->sge;
1591 struct respQ *q = &sge->respQ;
1592 unsigned int flags = 0;
1593 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
8199d3a7 1594
559fb51b
SB
1595 do {
1596 flags |= e->Qsleeping;
8199d3a7 1597
559fb51b
SB
1598 cmdq_processed[0] += e->Cmdq0CreditReturn;
1599 cmdq_processed[1] += e->Cmdq1CreditReturn;
1600
1601 e++;
1602 if (unlikely(++q->cidx == q->size)) {
1603 q->cidx = 0;
1604 q->genbit ^= 1;
1605 e = q->entries;
8199d3a7 1606 }
559fb51b 1607 prefetch(e);
8199d3a7 1608
559fb51b
SB
1609 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1610 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1611 q->credits = 0;
8199d3a7 1612 }
559fb51b
SB
1613 sge->stats.pure_rsps++;
1614 } while (e->GenerationBit == q->genbit && !e->DataValid);
8199d3a7 1615
559fb51b
SB
1616 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1617 sge->cmdQ[1].processed += cmdq_processed[1];
8199d3a7 1618
559fb51b 1619 return e->GenerationBit == q->genbit;
8199d3a7
CL
1620}
1621
1622/*
559fb51b
SB
1623 * Handler for new data events when using NAPI. This does not need any locking
1624 * or protection from interrupts as data interrupts are off at this point and
1625 * other adapter interrupts do not interfere.
8199d3a7 1626 */
7fe26a60 1627int t1_poll(struct net_device *dev, int *budget)
8199d3a7 1628{
559fb51b
SB
1629 struct adapter *adapter = dev->priv;
1630 int effective_budget = min(*budget, dev->quota);
559fb51b 1631 int work_done = process_responses(adapter, effective_budget);
7fe26a60 1632
559fb51b
SB
1633 *budget -= work_done;
1634 dev->quota -= work_done;
8199d3a7 1635
559fb51b
SB
1636 if (work_done >= effective_budget)
1637 return 1;
1638
7fe26a60 1639 spin_lock_irq(&adapter->async_lock);
559fb51b 1640 __netif_rx_complete(dev);
559fb51b 1641 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
7fe26a60
SH
1642 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1643 adapter->regs + A_PL_ENABLE);
1644 spin_unlock_irq(&adapter->async_lock);
8199d3a7 1645
7fe26a60 1646 return 0;
559fb51b 1647}
8199d3a7 1648
559fb51b
SB
1649/*
1650 * NAPI version of the main interrupt handler.
1651 */
7fe26a60 1652irqreturn_t t1_interrupt(int irq, void *data)
559fb51b 1653{
559fb51b 1654 struct adapter *adapter = data;
7fe26a60 1655 struct net_device *dev = adapter->sge->netdev;
559fb51b 1656 struct sge *sge = adapter->sge;
7fe26a60
SH
1657 u32 cause;
1658 int handled = 0;
8199d3a7 1659
7fe26a60
SH
1660 cause = readl(adapter->regs + A_PL_CAUSE);
1661 if (cause == 0 || cause == ~0)
1662 return IRQ_NONE;
559fb51b
SB
1663
1664 spin_lock(&adapter->async_lock);
7fe26a60
SH
1665 if (cause & F_PL_INTR_SGE_DATA) {
1666 struct respQ *q = &adapter->sge->respQ;
559fb51b
SB
1667 struct respQ_e *e = &q->entries[q->cidx];
1668
7fe26a60
SH
1669 handled = 1;
1670 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1671
1672 if (e->GenerationBit == q->genbit &&
1673 __netif_rx_schedule_prep(dev)) {
1674 if (e->DataValid || process_pure_responses(adapter, e)) {
1675 /* mask off data IRQ */
1676 writel(adapter->slow_intr_mask,
1677 adapter->regs + A_PL_ENABLE);
1678 __netif_rx_schedule(sge->netdev);
1679 goto unlock;
1680 }
1681 /* no data, no NAPI needed */
1682 netif_poll_enable(dev);
1683
1684 }
1685 writel(q->cidx, adapter->regs + A_SG_SLEEPING);
1686 } else
1687 handled = t1_slow_intr_handler(adapter);
1688
559fb51b
SB
1689 if (!handled)
1690 sge->stats.unhandled_irqs++;
7fe26a60 1691unlock:
559fb51b
SB
1692 spin_unlock(&adapter->async_lock);
1693 return IRQ_RETVAL(handled != 0);
1694}
8199d3a7 1695
7fe26a60 1696#else
559fb51b
SB
1697/*
1698 * Main interrupt handler, optimized assuming that we took a 'DATA'
1699 * interrupt.
1700 *
1701 * 1. Clear the interrupt
1702 * 2. Loop while we find valid descriptors and process them; accumulate
1703 * information that can be processed after the loop
1704 * 3. Tell the SGE at which index we stopped processing descriptors
1705 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
1706 * outstanding TX buffers waiting, replenish RX buffers, potentially
1707 * reenable upper layers if they were turned off due to lack of TX
1708 * resources which are available again.
1709 * 5. If we took an interrupt, but no valid respQ descriptors was found we
1710 * let the slow_intr_handler run and do error handling.
1711 */
7fe26a60 1712irqreturn_t t1_interrupt(int irq, void *cookie)
559fb51b
SB
1713{
1714 int work_done;
1715 struct respQ_e *e;
1716 struct adapter *adapter = cookie;
1717 struct respQ *Q = &adapter->sge->respQ;
8199d3a7 1718
559fb51b
SB
1719 spin_lock(&adapter->async_lock);
1720 e = &Q->entries[Q->cidx];
1721 prefetch(e);
8199d3a7 1722
559fb51b 1723 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
8199d3a7 1724
559fb51b
SB
1725 if (likely(e->GenerationBit == Q->genbit))
1726 work_done = process_responses(adapter, -1);
1727 else
1728 work_done = t1_slow_intr_handler(adapter);
8199d3a7 1729
559fb51b
SB
1730 /*
1731 * The unconditional clearing of the PL_CAUSE above may have raced
1732 * with DMA completion and the corresponding generation of a response
1733 * to cause us to miss the resulting data interrupt. The next write
1734 * is also unconditional to recover the missed interrupt and render
1735 * this race harmless.
1736 */
1737 writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
1738
1739 if (!work_done)
1740 adapter->sge->stats.unhandled_irqs++;
1741 spin_unlock(&adapter->async_lock);
1742 return IRQ_RETVAL(work_done != 0);
1743}
7fe26a60 1744#endif
559fb51b
SB
1745
1746/*
1747 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1748 *
1749 * The code figures out how many entries the sk_buff will require in the
1750 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1751 * has complete. Then, it doesn't access the global structure anymore, but
1752 * uses the corresponding fields on the stack. In conjuction with a spinlock
1753 * around that code, we can make the function reentrant without holding the
1754 * lock when we actually enqueue (which might be expensive, especially on
1755 * architectures with IO MMUs).
1756 *
1757 * This runs with softirqs disabled.
1758 */
aa84505f
SH
1759static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1760 unsigned int qid, struct net_device *dev)
559fb51b
SB
1761{
1762 struct sge *sge = adapter->sge;
1763 struct cmdQ *q = &sge->cmdQ[qid];
f1d3d38a 1764 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
559fb51b 1765
cabdfb37
SH
1766 if (!spin_trylock(&q->lock))
1767 return NETDEV_TX_LOCKED;
1768
559fb51b
SB
1769 reclaim_completed_tx(sge, q);
1770
1771 pidx = q->pidx;
1772 credits = q->size - q->in_use;
1773 count = 1 + skb_shinfo(skb)->nr_frags;
f1d3d38a 1774 count += compute_large_page_tx_descs(skb);
559fb51b 1775
f1d3d38a
SH
1776 /* Ethernet packet */
1777 if (unlikely(credits < count)) {
1778 if (!netif_queue_stopped(dev)) {
559fb51b
SB
1779 netif_stop_queue(dev);
1780 set_bit(dev->if_port, &sge->stopped_tx_queues);
232a347a 1781 sge->stats.cmdQ_full[2]++;
f1d3d38a
SH
1782 CH_ERR("%s: Tx ring full while queue awake!\n",
1783 adapter->name);
8199d3a7 1784 }
f1d3d38a
SH
1785 spin_unlock(&q->lock);
1786 return NETDEV_TX_BUSY;
1787 }
1788
1789 if (unlikely(credits - count < q->stop_thres)) {
1790 netif_stop_queue(dev);
1791 set_bit(dev->if_port, &sge->stopped_tx_queues);
1792 sge->stats.cmdQ_full[2]++;
1793 }
1794
1795 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1796 * through the scheduler.
1797 */
1798 if (sge->tx_sched && !qid && skb->dev) {
1799 use_sched:
1800 use_sched_skb = 1;
1801 /* Note that the scheduler might return a different skb than
1802 * the one passed in.
1803 */
1804 skb = sched_skb(sge, skb, credits);
1805 if (!skb) {
1806 spin_unlock(&q->lock);
1807 return NETDEV_TX_OK;
559fb51b 1808 }
f1d3d38a
SH
1809 pidx = q->pidx;
1810 count = 1 + skb_shinfo(skb)->nr_frags;
1811 count += compute_large_page_tx_descs(skb);
559fb51b 1812 }
f1d3d38a 1813
559fb51b
SB
1814 q->in_use += count;
1815 genbit = q->genbit;
f1d3d38a 1816 pidx = q->pidx;
559fb51b
SB
1817 q->pidx += count;
1818 if (q->pidx >= q->size) {
1819 q->pidx -= q->size;
1820 q->genbit ^= 1;
8199d3a7 1821 }
559fb51b 1822 spin_unlock(&q->lock);
8199d3a7 1823
559fb51b 1824 write_tx_descs(adapter, skb, pidx, genbit, q);
8199d3a7
CL
1825
1826 /*
1827 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1828 * the doorbell if the Q is asleep. There is a natural race, where
1829 * the hardware is going to sleep just after we checked, however,
1830 * then the interrupt handler will detect the outstanding TX packet
1831 * and ring the doorbell for us.
1832 */
559fb51b
SB
1833 if (qid)
1834 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1835 else {
1836 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1837 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1838 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1839 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1840 }
8199d3a7 1841 }
f1d3d38a
SH
1842
1843 if (use_sched_skb) {
1844 if (spin_trylock(&q->lock)) {
1845 credits = q->size - q->in_use;
1846 skb = NULL;
1847 goto use_sched;
1848 }
1849 }
aa84505f 1850 return NETDEV_TX_OK;
8199d3a7
CL
1851}
1852
1853#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1854
559fb51b
SB
1855/*
1856 * eth_hdr_len - return the length of an Ethernet header
1857 * @data: pointer to the start of the Ethernet header
1858 *
1859 * Returns the length of an Ethernet header, including optional VLAN tag.
1860 */
1861static inline int eth_hdr_len(const void *data)
1862{
1863 const struct ethhdr *e = data;
1864
1865 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1866}
1867
8199d3a7
CL
1868/*
1869 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1870 */
1871int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1872{
1873 struct adapter *adapter = dev->priv;
559fb51b 1874 struct sge *sge = adapter->sge;
56f643c2 1875 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
8199d3a7 1876 struct cpl_tx_pkt *cpl;
cabdfb37
SH
1877 struct sk_buff *orig_skb = skb;
1878 int ret;
8199d3a7 1879
f1d3d38a
SH
1880 if (skb->protocol == htons(ETH_P_CPL5))
1881 goto send;
1882
1883 if (skb_shinfo(skb)->gso_size) {
8199d3a7
CL
1884 int eth_type;
1885 struct cpl_tx_pkt_lso *hdr;
1886
56f643c2 1887 ++st->tx_tso;
559fb51b 1888
8199d3a7
CL
1889 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
1890 CPL_ETH_II : CPL_ETH_II_VLAN;
1891
1892 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1893 hdr->opcode = CPL_TX_PKT_LSO;
1894 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1895 hdr->ip_hdr_words = skb->nh.iph->ihl;
1896 hdr->tcp_hdr_words = skb->h.th->doff;
1897 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
f1d3d38a 1898 skb_shinfo(skb)->gso_size));
8199d3a7
CL
1899 hdr->len = htonl(skb->len - sizeof(*hdr));
1900 cpl = (struct cpl_tx_pkt *)hdr;
f1d3d38a 1901 } else {
8199d3a7 1902 /*
559fb51b
SB
1903 * Packets shorter than ETH_HLEN can break the MAC, drop them
1904 * early. Also, we may get oversized packets because some
1905 * parts of the kernel don't handle our unusual hard_header_len
1906 * right, drop those too.
8199d3a7 1907 */
559fb51b
SB
1908 if (unlikely(skb->len < ETH_HLEN ||
1909 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
f1d3d38a
SH
1910 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
1911 skb->len, eth_hdr_len(skb->data), dev->mtu);
559fb51b 1912 dev_kfree_skb_any(skb);
aa84505f 1913 return NETDEV_TX_OK;
559fb51b
SB
1914 }
1915
1916 /*
1917 * We are using a non-standard hard_header_len and some kernel
1918 * components, such as pktgen, do not handle it right.
1919 * Complain when this happens but try to fix things up.
1920 */
f1d3d38a 1921 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
f1d3d38a
SH
1922 pr_debug("%s: headroom %d header_len %d\n", dev->name,
1923 skb_headroom(skb), dev->hard_header_len);
1924
559fb51b
SB
1925 if (net_ratelimit())
1926 printk(KERN_ERR "%s: inadequate headroom in "
1927 "Tx packet\n", dev->name);
1928 skb = skb_realloc_headroom(skb, sizeof(*cpl));
1929 dev_kfree_skb_any(orig_skb);
1930 if (!skb)
aa84505f 1931 return NETDEV_TX_OK;
559fb51b 1932 }
8199d3a7
CL
1933
1934 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
84fa7933 1935 skb->ip_summed == CHECKSUM_PARTIAL &&
f1d3d38a 1936 skb->nh.iph->protocol == IPPROTO_UDP) {
84fa7933 1937 if (unlikely(skb_checksum_help(skb))) {
f1d3d38a 1938 pr_debug("%s: unable to do udp checksum\n", dev->name);
559fb51b 1939 dev_kfree_skb_any(skb);
aa84505f 1940 return NETDEV_TX_OK;
559fb51b 1941 }
f1d3d38a 1942 }
8199d3a7 1943
559fb51b
SB
1944 /* Hmmm, assuming to catch the gratious arp... and we'll use
1945 * it to flush out stuck espi packets...
f1d3d38a
SH
1946 */
1947 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
8199d3a7 1948 if (skb->protocol == htons(ETH_P_ARP) &&
559fb51b 1949 skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
f1d3d38a 1950 adapter->sge->espibug_skb[dev->if_port] = skb;
559fb51b
SB
1951 /* We want to re-use this skb later. We
1952 * simply bump the reference count and it
1953 * will not be freed...
1954 */
1955 skb = skb_get(skb);
1956 }
8199d3a7 1957 }
559fb51b
SB
1958
1959 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
8199d3a7
CL
1960 cpl->opcode = CPL_TX_PKT;
1961 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
84fa7933 1962 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
8199d3a7 1963 /* the length field isn't used so don't bother setting it */
559fb51b 1964
84fa7933 1965 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
8199d3a7
CL
1966 }
1967 cpl->iff = dev->if_port;
1968
1969#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1970 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
1971 cpl->vlan_valid = 1;
1972 cpl->vlan = htons(vlan_tx_tag_get(skb));
559fb51b 1973 st->vlan_insert++;
8199d3a7
CL
1974 } else
1975#endif
1976 cpl->vlan_valid = 0;
1977
f1d3d38a 1978send:
56f643c2 1979 st->tx_packets++;
8199d3a7 1980 dev->trans_start = jiffies;
cabdfb37
SH
1981 ret = t1_sge_tx(skb, adapter, 0, dev);
1982
1983 /* If transmit busy, and we reallocated skb's due to headroom limit,
1984 * then silently discard to avoid leak.
1985 */
1986 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1987 dev_kfree_skb_any(skb);
1988 ret = NETDEV_TX_OK;
1989 }
1990 return ret;
559fb51b 1991}
8199d3a7 1992
559fb51b
SB
1993/*
1994 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1995 */
1996static void sge_tx_reclaim_cb(unsigned long data)
1997{
1998 int i;
1999 struct sge *sge = (struct sge *)data;
2000
2001 for (i = 0; i < SGE_CMDQ_N; ++i) {
2002 struct cmdQ *q = &sge->cmdQ[i];
2003
2004 if (!spin_trylock(&q->lock))
2005 continue;
8199d3a7 2006
559fb51b 2007 reclaim_completed_tx(sge, q);
f1d3d38a
SH
2008 if (i == 0 && q->in_use) { /* flush pending credits */
2009 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
2010 }
559fb51b
SB
2011 spin_unlock(&q->lock);
2012 }
2013 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2014}
2015
2016/*
2017 * Propagate changes of the SGE coalescing parameters to the HW.
2018 */
2019int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
2020{
559fb51b
SB
2021 sge->fixed_intrtimer = p->rx_coalesce_usecs *
2022 core_ticks_per_usec(sge->adapter);
2023 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
8199d3a7
CL
2024 return 0;
2025}
2026
559fb51b
SB
2027/*
2028 * Allocates both RX and TX resources and configures the SGE. However,
2029 * the hardware is not enabled yet.
2030 */
2031int t1_sge_configure(struct sge *sge, struct sge_params *p)
8199d3a7 2032{
559fb51b
SB
2033 if (alloc_rx_resources(sge, p))
2034 return -ENOMEM;
2035 if (alloc_tx_resources(sge, p)) {
2036 free_rx_resources(sge);
2037 return -ENOMEM;
2038 }
2039 configure_sge(sge, p);
2040
2041 /*
2042 * Now that we have sized the free lists calculate the payload
2043 * capacity of the large buffers. Other parts of the driver use
2044 * this to set the max offload coalescing size so that RX packets
2045 * do not overflow our large buffers.
2046 */
2047 p->large_buf_capacity = jumbo_payload_capacity(sge);
2048 return 0;
2049}
8199d3a7 2050
559fb51b
SB
2051/*
2052 * Disables the DMA engine.
2053 */
2054void t1_sge_stop(struct sge *sge)
2055{
f1d3d38a 2056 int i;
559fb51b 2057 writel(0, sge->adapter->regs + A_SG_CONTROL);
f1d3d38a
SH
2058 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
2059
559fb51b
SB
2060 if (is_T2(sge->adapter))
2061 del_timer_sync(&sge->espibug_timer);
f1d3d38a 2062
559fb51b 2063 del_timer_sync(&sge->tx_reclaim_timer);
f1d3d38a
SH
2064 if (sge->tx_sched)
2065 tx_sched_stop(sge);
2066
2067 for (i = 0; i < MAX_NPORTS; i++)
2068 if (sge->espibug_skb[i])
2069 kfree_skb(sge->espibug_skb[i]);
8199d3a7
CL
2070}
2071
559fb51b
SB
2072/*
2073 * Enables the DMA engine.
2074 */
2075void t1_sge_start(struct sge *sge)
8199d3a7 2076{
559fb51b
SB
2077 refill_free_list(sge, &sge->freelQ[0]);
2078 refill_free_list(sge, &sge->freelQ[1]);
2079
2080 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
2081 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
f1d3d38a 2082 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
559fb51b
SB
2083
2084 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2085
f1d3d38a 2086 if (is_T2(sge->adapter))
559fb51b
SB
2087 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2088}
2089
2090/*
2091 * Callback for the T2 ESPI 'stuck packet feature' workaorund
2092 */
f1d3d38a 2093static void espibug_workaround_t204(unsigned long data)
559fb51b
SB
2094{
2095 struct adapter *adapter = (struct adapter *)data;
8199d3a7 2096 struct sge *sge = adapter->sge;
f1d3d38a
SH
2097 unsigned int nports = adapter->params.nports;
2098 u32 seop[MAX_NPORTS];
8199d3a7 2099
f1d3d38a
SH
2100 if (adapter->open_device_map & PORT_MASK) {
2101 int i;
2102 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) {
2103 return;
2104 }
2105 for (i = 0; i < nports; i++) {
2106 struct sk_buff *skb = sge->espibug_skb[i];
2107 if ( (netif_running(adapter->port[i].dev)) &&
2108 !(netif_queue_stopped(adapter->port[i].dev)) &&
2109 (seop[i] && ((seop[i] & 0xfff) == 0)) &&
2110 skb ) {
2111 if (!skb->cb[0]) {
2112 u8 ch_mac_addr[ETH_ALEN] =
2113 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2114 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2115 ch_mac_addr, ETH_ALEN);
2116 memcpy(skb->data + skb->len - 10,
2117 ch_mac_addr, ETH_ALEN);
2118 skb->cb[0] = 0xff;
2119 }
2120
2121 /* bump the reference count to avoid freeing of
2122 * the skb once the DMA has completed.
2123 */
2124 skb = skb_get(skb);
2125 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
559fb51b 2126 }
559fb51b
SB
2127 }
2128 }
2129 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
8199d3a7
CL
2130}
2131
f1d3d38a
SH
2132static void espibug_workaround(unsigned long data)
2133{
2134 struct adapter *adapter = (struct adapter *)data;
2135 struct sge *sge = adapter->sge;
2136
2137 if (netif_running(adapter->port[0].dev)) {
2138 struct sk_buff *skb = sge->espibug_skb[0];
2139 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2140
2141 if ((seop & 0xfff0fff) == 0xfff && skb) {
2142 if (!skb->cb[0]) {
2143 u8 ch_mac_addr[ETH_ALEN] =
2144 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2145 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2146 ch_mac_addr, ETH_ALEN);
2147 memcpy(skb->data + skb->len - 10, ch_mac_addr,
2148 ETH_ALEN);
2149 skb->cb[0] = 0xff;
2150 }
2151
2152 /* bump the reference count to avoid freeing of the
2153 * skb once the DMA has completed.
2154 */
2155 skb = skb_get(skb);
2156 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2157 }
2158 }
2159 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2160}
2161
559fb51b
SB
2162/*
2163 * Creates a t1_sge structure and returns suggested resource parameters.
2164 */
2165struct sge * __devinit t1_sge_create(struct adapter *adapter,
2166 struct sge_params *p)
2167{
cbee9f91 2168 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
56f643c2 2169 int i;
559fb51b
SB
2170
2171 if (!sge)
2172 return NULL;
559fb51b
SB
2173
2174 sge->adapter = adapter;
2175 sge->netdev = adapter->port[0].dev;
2176 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2177 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2178
56f643c2
SH
2179 for_each_port(adapter, i) {
2180 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2181 if (!sge->port_stats[i])
2182 goto nomem_port;
2183 }
2184
559fb51b
SB
2185 init_timer(&sge->tx_reclaim_timer);
2186 sge->tx_reclaim_timer.data = (unsigned long)sge;
2187 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2188
2189 if (is_T2(sge->adapter)) {
2190 init_timer(&sge->espibug_timer);
f1d3d38a
SH
2191
2192 if (adapter->params.nports > 1) {
2193 tx_sched_init(sge);
2194 sge->espibug_timer.function = espibug_workaround_t204;
2195 } else {
2196 sge->espibug_timer.function = espibug_workaround;
2197 }
559fb51b 2198 sge->espibug_timer.data = (unsigned long)sge->adapter;
f1d3d38a 2199
559fb51b 2200 sge->espibug_timeout = 1;
f1d3d38a
SH
2201 /* for T204, every 10ms */
2202 if (adapter->params.nports > 1)
2203 sge->espibug_timeout = HZ/100;
559fb51b
SB
2204 }
2205
2206
2207 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2208 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2209 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2210 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
f1d3d38a
SH
2211 if (sge->tx_sched) {
2212 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2213 p->rx_coalesce_usecs = 15;
2214 else
2215 p->rx_coalesce_usecs = 50;
2216 } else
2217 p->rx_coalesce_usecs = 50;
2218
559fb51b
SB
2219 p->coalesce_enable = 0;
2220 p->sample_interval_usecs = 0;
559fb51b
SB
2221
2222 return sge;
56f643c2
SH
2223nomem_port:
2224 while (i >= 0) {
2225 free_percpu(sge->port_stats[i]);
2226 --i;
2227 }
2228 kfree(sge);
2229 return NULL;
2230
559fb51b 2231}