]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/b44.c
332c60356285688fe7807b0f692fcce9a9c153ff
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
9 *
10 * Distribute under GPL.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/ssb/ssb.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34
35
36 #include "b44.h"
37
38 #define DRV_MODULE_NAME "b44"
39 #define DRV_MODULE_VERSION "2.0"
40
41 #define B44_DEF_MSG_ENABLE \
42 (NETIF_MSG_DRV | \
43 NETIF_MSG_PROBE | \
44 NETIF_MSG_LINK | \
45 NETIF_MSG_TIMER | \
46 NETIF_MSG_IFDOWN | \
47 NETIF_MSG_IFUP | \
48 NETIF_MSG_RX_ERR | \
49 NETIF_MSG_TX_ERR)
50
51 /* length of time before we decide the hardware is borked,
52 * and dev->tx_timeout() should be called to fix the problem
53 */
54 #define B44_TX_TIMEOUT (5 * HZ)
55
56 /* hardware minimum and maximum for a single frame's data payload */
57 #define B44_MIN_MTU 60
58 #define B44_MAX_MTU 1500
59
60 #define B44_RX_RING_SIZE 512
61 #define B44_DEF_RX_RING_PENDING 200
62 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
63 B44_RX_RING_SIZE)
64 #define B44_TX_RING_SIZE 512
65 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
66 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
67 B44_TX_RING_SIZE)
68
69 #define TX_RING_GAP(BP) \
70 (B44_TX_RING_SIZE - (BP)->tx_pending)
71 #define TX_BUFFS_AVAIL(BP) \
72 (((BP)->tx_cons <= (BP)->tx_prod) ? \
73 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
74 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
75 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
76
77 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
78 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
79
80 /* minimum number of free TX descriptors required to wake up TX process */
81 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
82
83 /* b44 internal pattern match filter info */
84 #define B44_PATTERN_BASE 0x400
85 #define B44_PATTERN_SIZE 0x80
86 #define B44_PMASK_BASE 0x600
87 #define B44_PMASK_SIZE 0x10
88 #define B44_MAX_PATTERNS 16
89 #define B44_ETHIPV6UDP_HLEN 62
90 #define B44_ETHIPV4UDP_HLEN 42
91
92 static char version[] __devinitdata =
93 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
94
95 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
96 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_MODULE_VERSION);
99
100 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103
104
105 #ifdef CONFIG_B44_PCI
106 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 { 0 } /* terminate list with empty entry */
111 };
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113
114 static struct pci_driver b44_pci_driver = {
115 .name = DRV_MODULE_NAME,
116 .id_table = b44_pci_tbl,
117 };
118 #endif /* CONFIG_B44_PCI */
119
120 static const struct ssb_device_id b44_ssb_tbl[] = {
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 SSB_DEVTABLE_END
123 };
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
128
129 #define B44_FULL_RESET 1
130 #define B44_FULL_RESET_SKIP_PHY 2
131 #define B44_PARTIAL_RESET 3
132 #define B44_CHIP_RESET_FULL 4
133 #define B44_CHIP_RESET_PARTIAL 5
134
135 static void b44_init_hw(struct b44 *, int);
136
137 static int dma_desc_align_mask;
138 static int dma_desc_sync_size;
139 static int instance;
140
141 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
142 #define _B44(x...) # x,
143 B44_STAT_REG_DECLARE
144 #undef _B44
145 };
146
147 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 dma_addr_t dma_base,
149 unsigned long offset,
150 enum dma_data_direction dir)
151 {
152 ssb_dma_sync_single_range_for_device(sdev, dma_base,
153 offset & dma_desc_align_mask,
154 dma_desc_sync_size, dir);
155 }
156
157 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
158 dma_addr_t dma_base,
159 unsigned long offset,
160 enum dma_data_direction dir)
161 {
162 ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
163 offset & dma_desc_align_mask,
164 dma_desc_sync_size, dir);
165 }
166
167 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
168 {
169 return ssb_read32(bp->sdev, reg);
170 }
171
172 static inline void bw32(const struct b44 *bp,
173 unsigned long reg, unsigned long val)
174 {
175 ssb_write32(bp->sdev, reg, val);
176 }
177
178 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
179 u32 bit, unsigned long timeout, const int clear)
180 {
181 unsigned long i;
182
183 for (i = 0; i < timeout; i++) {
184 u32 val = br32(bp, reg);
185
186 if (clear && !(val & bit))
187 break;
188 if (!clear && (val & bit))
189 break;
190 udelay(10);
191 }
192 if (i == timeout) {
193 if (net_ratelimit())
194 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
195 bit, reg, clear ? "clear" : "set");
196
197 return -ENODEV;
198 }
199 return 0;
200 }
201
202 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
203 {
204 u32 val;
205
206 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
207 (index << CAM_CTRL_INDEX_SHIFT)));
208
209 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
210
211 val = br32(bp, B44_CAM_DATA_LO);
212
213 data[2] = (val >> 24) & 0xFF;
214 data[3] = (val >> 16) & 0xFF;
215 data[4] = (val >> 8) & 0xFF;
216 data[5] = (val >> 0) & 0xFF;
217
218 val = br32(bp, B44_CAM_DATA_HI);
219
220 data[0] = (val >> 8) & 0xFF;
221 data[1] = (val >> 0) & 0xFF;
222 }
223
224 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
225 {
226 u32 val;
227
228 val = ((u32) data[2]) << 24;
229 val |= ((u32) data[3]) << 16;
230 val |= ((u32) data[4]) << 8;
231 val |= ((u32) data[5]) << 0;
232 bw32(bp, B44_CAM_DATA_LO, val);
233 val = (CAM_DATA_HI_VALID |
234 (((u32) data[0]) << 8) |
235 (((u32) data[1]) << 0));
236 bw32(bp, B44_CAM_DATA_HI, val);
237 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
238 (index << CAM_CTRL_INDEX_SHIFT)));
239 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
240 }
241
242 static inline void __b44_disable_ints(struct b44 *bp)
243 {
244 bw32(bp, B44_IMASK, 0);
245 }
246
247 static void b44_disable_ints(struct b44 *bp)
248 {
249 __b44_disable_ints(bp);
250
251 /* Flush posted writes. */
252 br32(bp, B44_IMASK);
253 }
254
255 static void b44_enable_ints(struct b44 *bp)
256 {
257 bw32(bp, B44_IMASK, bp->imask);
258 }
259
260 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
261 {
262 int err;
263
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 (phy_addr << MDIO_DATA_PMD_SHIFT) |
268 (reg << MDIO_DATA_RA_SHIFT) |
269 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
270 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
271 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
272
273 return err;
274 }
275
276 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
277 {
278 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
279 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
280 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
281 (phy_addr << MDIO_DATA_PMD_SHIFT) |
282 (reg << MDIO_DATA_RA_SHIFT) |
283 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
284 (val & MDIO_DATA_DATA)));
285 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
286 }
287
288 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
289 {
290 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
291 return 0;
292
293 return __b44_readphy(bp, bp->phy_addr, reg, val);
294 }
295
296 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
297 {
298 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
299 return 0;
300
301 return __b44_writephy(bp, bp->phy_addr, reg, val);
302 }
303
304 /* miilib interface */
305 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
306 {
307 u32 val;
308 struct b44 *bp = netdev_priv(dev);
309 int rc = __b44_readphy(bp, phy_id, location, &val);
310 if (rc)
311 return 0xffffffff;
312 return val;
313 }
314
315 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
316 int val)
317 {
318 struct b44 *bp = netdev_priv(dev);
319 __b44_writephy(bp, phy_id, location, val);
320 }
321
322 static int b44_phy_reset(struct b44 *bp)
323 {
324 u32 val;
325 int err;
326
327 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
328 return 0;
329 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
330 if (err)
331 return err;
332 udelay(100);
333 err = b44_readphy(bp, MII_BMCR, &val);
334 if (!err) {
335 if (val & BMCR_RESET) {
336 netdev_err(bp->dev, "PHY Reset would not complete\n");
337 err = -ENODEV;
338 }
339 }
340
341 return err;
342 }
343
344 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
345 {
346 u32 val;
347
348 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
349 bp->flags |= pause_flags;
350
351 val = br32(bp, B44_RXCONFIG);
352 if (pause_flags & B44_FLAG_RX_PAUSE)
353 val |= RXCONFIG_FLOW;
354 else
355 val &= ~RXCONFIG_FLOW;
356 bw32(bp, B44_RXCONFIG, val);
357
358 val = br32(bp, B44_MAC_FLOW);
359 if (pause_flags & B44_FLAG_TX_PAUSE)
360 val |= (MAC_FLOW_PAUSE_ENAB |
361 (0xc0 & MAC_FLOW_RX_HI_WATER));
362 else
363 val &= ~MAC_FLOW_PAUSE_ENAB;
364 bw32(bp, B44_MAC_FLOW, val);
365 }
366
367 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
368 {
369 u32 pause_enab = 0;
370
371 /* The driver supports only rx pause by default because
372 the b44 mac tx pause mechanism generates excessive
373 pause frames.
374 Use ethtool to turn on b44 tx pause if necessary.
375 */
376 if ((local & ADVERTISE_PAUSE_CAP) &&
377 (local & ADVERTISE_PAUSE_ASYM)){
378 if ((remote & LPA_PAUSE_ASYM) &&
379 !(remote & LPA_PAUSE_CAP))
380 pause_enab |= B44_FLAG_RX_PAUSE;
381 }
382
383 __b44_set_flow_ctrl(bp, pause_enab);
384 }
385
386 #ifdef SSB_DRIVER_MIPS
387 extern char *nvram_get(char *name);
388 static void b44_wap54g10_workaround(struct b44 *bp)
389 {
390 const char *str;
391 u32 val;
392 int err;
393
394 /*
395 * workaround for bad hardware design in Linksys WAP54G v1.0
396 * see https://dev.openwrt.org/ticket/146
397 * check and reset bit "isolate"
398 */
399 str = nvram_get("boardnum");
400 if (!str)
401 return;
402 if (simple_strtoul(str, NULL, 0) == 2) {
403 err = __b44_readphy(bp, 0, MII_BMCR, &val);
404 if (err)
405 goto error;
406 if (!(val & BMCR_ISOLATE))
407 return;
408 val &= ~BMCR_ISOLATE;
409 err = __b44_writephy(bp, 0, MII_BMCR, val);
410 if (err)
411 goto error;
412 }
413 return;
414 error:
415 pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
416 }
417 #else
418 static inline void b44_wap54g10_workaround(struct b44 *bp)
419 {
420 }
421 #endif
422
423 static int b44_setup_phy(struct b44 *bp)
424 {
425 u32 val;
426 int err;
427
428 b44_wap54g10_workaround(bp);
429
430 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
431 return 0;
432 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
433 goto out;
434 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
435 val & MII_ALEDCTRL_ALLMSK)) != 0)
436 goto out;
437 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
438 goto out;
439 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
440 val | MII_TLEDCTRL_ENABLE)) != 0)
441 goto out;
442
443 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
444 u32 adv = ADVERTISE_CSMA;
445
446 if (bp->flags & B44_FLAG_ADV_10HALF)
447 adv |= ADVERTISE_10HALF;
448 if (bp->flags & B44_FLAG_ADV_10FULL)
449 adv |= ADVERTISE_10FULL;
450 if (bp->flags & B44_FLAG_ADV_100HALF)
451 adv |= ADVERTISE_100HALF;
452 if (bp->flags & B44_FLAG_ADV_100FULL)
453 adv |= ADVERTISE_100FULL;
454
455 if (bp->flags & B44_FLAG_PAUSE_AUTO)
456 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
457
458 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
459 goto out;
460 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
461 BMCR_ANRESTART))) != 0)
462 goto out;
463 } else {
464 u32 bmcr;
465
466 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
467 goto out;
468 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
469 if (bp->flags & B44_FLAG_100_BASE_T)
470 bmcr |= BMCR_SPEED100;
471 if (bp->flags & B44_FLAG_FULL_DUPLEX)
472 bmcr |= BMCR_FULLDPLX;
473 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
474 goto out;
475
476 /* Since we will not be negotiating there is no safe way
477 * to determine if the link partner supports flow control
478 * or not. So just disable it completely in this case.
479 */
480 b44_set_flow_ctrl(bp, 0, 0);
481 }
482
483 out:
484 return err;
485 }
486
487 static void b44_stats_update(struct b44 *bp)
488 {
489 unsigned long reg;
490 u32 *val;
491
492 val = &bp->hw_stats.tx_good_octets;
493 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
494 *val++ += br32(bp, reg);
495 }
496
497 /* Pad */
498 reg += 8*4UL;
499
500 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
501 *val++ += br32(bp, reg);
502 }
503 }
504
505 static void b44_link_report(struct b44 *bp)
506 {
507 if (!netif_carrier_ok(bp->dev)) {
508 netdev_info(bp->dev, "Link is down\n");
509 } else {
510 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
511 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
512 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
513
514 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
515 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
516 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
517 }
518 }
519
520 static void b44_check_phy(struct b44 *bp)
521 {
522 u32 bmsr, aux;
523
524 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
525 bp->flags |= B44_FLAG_100_BASE_T;
526 bp->flags |= B44_FLAG_FULL_DUPLEX;
527 if (!netif_carrier_ok(bp->dev)) {
528 u32 val = br32(bp, B44_TX_CTRL);
529 val |= TX_CTRL_DUPLEX;
530 bw32(bp, B44_TX_CTRL, val);
531 netif_carrier_on(bp->dev);
532 b44_link_report(bp);
533 }
534 return;
535 }
536
537 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
538 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
539 (bmsr != 0xffff)) {
540 if (aux & MII_AUXCTRL_SPEED)
541 bp->flags |= B44_FLAG_100_BASE_T;
542 else
543 bp->flags &= ~B44_FLAG_100_BASE_T;
544 if (aux & MII_AUXCTRL_DUPLEX)
545 bp->flags |= B44_FLAG_FULL_DUPLEX;
546 else
547 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
548
549 if (!netif_carrier_ok(bp->dev) &&
550 (bmsr & BMSR_LSTATUS)) {
551 u32 val = br32(bp, B44_TX_CTRL);
552 u32 local_adv, remote_adv;
553
554 if (bp->flags & B44_FLAG_FULL_DUPLEX)
555 val |= TX_CTRL_DUPLEX;
556 else
557 val &= ~TX_CTRL_DUPLEX;
558 bw32(bp, B44_TX_CTRL, val);
559
560 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
561 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
562 !b44_readphy(bp, MII_LPA, &remote_adv))
563 b44_set_flow_ctrl(bp, local_adv, remote_adv);
564
565 /* Link now up */
566 netif_carrier_on(bp->dev);
567 b44_link_report(bp);
568 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
569 /* Link now down */
570 netif_carrier_off(bp->dev);
571 b44_link_report(bp);
572 }
573
574 if (bmsr & BMSR_RFAULT)
575 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
576 if (bmsr & BMSR_JCD)
577 netdev_warn(bp->dev, "Jabber detected in PHY\n");
578 }
579 }
580
581 static void b44_timer(unsigned long __opaque)
582 {
583 struct b44 *bp = (struct b44 *) __opaque;
584
585 spin_lock_irq(&bp->lock);
586
587 b44_check_phy(bp);
588
589 b44_stats_update(bp);
590
591 spin_unlock_irq(&bp->lock);
592
593 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
594 }
595
596 static void b44_tx(struct b44 *bp)
597 {
598 u32 cur, cons;
599
600 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
601 cur /= sizeof(struct dma_desc);
602
603 /* XXX needs updating when NETIF_F_SG is supported */
604 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
605 struct ring_info *rp = &bp->tx_buffers[cons];
606 struct sk_buff *skb = rp->skb;
607
608 BUG_ON(skb == NULL);
609
610 ssb_dma_unmap_single(bp->sdev,
611 rp->mapping,
612 skb->len,
613 DMA_TO_DEVICE);
614 rp->skb = NULL;
615 dev_kfree_skb_irq(skb);
616 }
617
618 bp->tx_cons = cons;
619 if (netif_queue_stopped(bp->dev) &&
620 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
621 netif_wake_queue(bp->dev);
622
623 bw32(bp, B44_GPTIMER, 0);
624 }
625
626 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
627 * before the DMA address you give it. So we allocate 30 more bytes
628 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
629 * point the chip at 30 bytes past where the rx_header will go.
630 */
631 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
632 {
633 struct dma_desc *dp;
634 struct ring_info *src_map, *map;
635 struct rx_header *rh;
636 struct sk_buff *skb;
637 dma_addr_t mapping;
638 int dest_idx;
639 u32 ctrl;
640
641 src_map = NULL;
642 if (src_idx >= 0)
643 src_map = &bp->rx_buffers[src_idx];
644 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
645 map = &bp->rx_buffers[dest_idx];
646 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
647 if (skb == NULL)
648 return -ENOMEM;
649
650 mapping = ssb_dma_map_single(bp->sdev, skb->data,
651 RX_PKT_BUF_SZ,
652 DMA_FROM_DEVICE);
653
654 /* Hardware bug work-around, the chip is unable to do PCI DMA
655 to/from anything above 1GB :-( */
656 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
657 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
658 /* Sigh... */
659 if (!ssb_dma_mapping_error(bp->sdev, mapping))
660 ssb_dma_unmap_single(bp->sdev, mapping,
661 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
662 dev_kfree_skb_any(skb);
663 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
664 if (skb == NULL)
665 return -ENOMEM;
666 mapping = ssb_dma_map_single(bp->sdev, skb->data,
667 RX_PKT_BUF_SZ,
668 DMA_FROM_DEVICE);
669 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
670 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
671 if (!ssb_dma_mapping_error(bp->sdev, mapping))
672 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
673 dev_kfree_skb_any(skb);
674 return -ENOMEM;
675 }
676 bp->force_copybreak = 1;
677 }
678
679 rh = (struct rx_header *) skb->data;
680
681 rh->len = 0;
682 rh->flags = 0;
683
684 map->skb = skb;
685 map->mapping = mapping;
686
687 if (src_map != NULL)
688 src_map->skb = NULL;
689
690 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
691 if (dest_idx == (B44_RX_RING_SIZE - 1))
692 ctrl |= DESC_CTRL_EOT;
693
694 dp = &bp->rx_ring[dest_idx];
695 dp->ctrl = cpu_to_le32(ctrl);
696 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
697
698 if (bp->flags & B44_FLAG_RX_RING_HACK)
699 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
700 dest_idx * sizeof(*dp),
701 DMA_BIDIRECTIONAL);
702
703 return RX_PKT_BUF_SZ;
704 }
705
706 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
707 {
708 struct dma_desc *src_desc, *dest_desc;
709 struct ring_info *src_map, *dest_map;
710 struct rx_header *rh;
711 int dest_idx;
712 __le32 ctrl;
713
714 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
715 dest_desc = &bp->rx_ring[dest_idx];
716 dest_map = &bp->rx_buffers[dest_idx];
717 src_desc = &bp->rx_ring[src_idx];
718 src_map = &bp->rx_buffers[src_idx];
719
720 dest_map->skb = src_map->skb;
721 rh = (struct rx_header *) src_map->skb->data;
722 rh->len = 0;
723 rh->flags = 0;
724 dest_map->mapping = src_map->mapping;
725
726 if (bp->flags & B44_FLAG_RX_RING_HACK)
727 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
728 src_idx * sizeof(*src_desc),
729 DMA_BIDIRECTIONAL);
730
731 ctrl = src_desc->ctrl;
732 if (dest_idx == (B44_RX_RING_SIZE - 1))
733 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
734 else
735 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
736
737 dest_desc->ctrl = ctrl;
738 dest_desc->addr = src_desc->addr;
739
740 src_map->skb = NULL;
741
742 if (bp->flags & B44_FLAG_RX_RING_HACK)
743 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
744 dest_idx * sizeof(*dest_desc),
745 DMA_BIDIRECTIONAL);
746
747 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
748 RX_PKT_BUF_SZ,
749 DMA_FROM_DEVICE);
750 }
751
752 static int b44_rx(struct b44 *bp, int budget)
753 {
754 int received;
755 u32 cons, prod;
756
757 received = 0;
758 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
759 prod /= sizeof(struct dma_desc);
760 cons = bp->rx_cons;
761
762 while (cons != prod && budget > 0) {
763 struct ring_info *rp = &bp->rx_buffers[cons];
764 struct sk_buff *skb = rp->skb;
765 dma_addr_t map = rp->mapping;
766 struct rx_header *rh;
767 u16 len;
768
769 ssb_dma_sync_single_for_cpu(bp->sdev, map,
770 RX_PKT_BUF_SZ,
771 DMA_FROM_DEVICE);
772 rh = (struct rx_header *) skb->data;
773 len = le16_to_cpu(rh->len);
774 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
775 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
776 drop_it:
777 b44_recycle_rx(bp, cons, bp->rx_prod);
778 drop_it_no_recycle:
779 bp->dev->stats.rx_dropped++;
780 goto next_pkt;
781 }
782
783 if (len == 0) {
784 int i = 0;
785
786 do {
787 udelay(2);
788 barrier();
789 len = le16_to_cpu(rh->len);
790 } while (len == 0 && i++ < 5);
791 if (len == 0)
792 goto drop_it;
793 }
794
795 /* Omit CRC. */
796 len -= 4;
797
798 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
799 int skb_size;
800 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
801 if (skb_size < 0)
802 goto drop_it;
803 ssb_dma_unmap_single(bp->sdev, map,
804 skb_size, DMA_FROM_DEVICE);
805 /* Leave out rx_header */
806 skb_put(skb, len + RX_PKT_OFFSET);
807 skb_pull(skb, RX_PKT_OFFSET);
808 } else {
809 struct sk_buff *copy_skb;
810
811 b44_recycle_rx(bp, cons, bp->rx_prod);
812 copy_skb = netdev_alloc_skb(bp->dev, len + 2);
813 if (copy_skb == NULL)
814 goto drop_it_no_recycle;
815
816 skb_reserve(copy_skb, 2);
817 skb_put(copy_skb, len);
818 /* DMA sync done above, copy just the actual packet */
819 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
820 copy_skb->data, len);
821 skb = copy_skb;
822 }
823 skb->ip_summed = CHECKSUM_NONE;
824 skb->protocol = eth_type_trans(skb, bp->dev);
825 netif_receive_skb(skb);
826 received++;
827 budget--;
828 next_pkt:
829 bp->rx_prod = (bp->rx_prod + 1) &
830 (B44_RX_RING_SIZE - 1);
831 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832 }
833
834 bp->rx_cons = cons;
835 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
836
837 return received;
838 }
839
840 static int b44_poll(struct napi_struct *napi, int budget)
841 {
842 struct b44 *bp = container_of(napi, struct b44, napi);
843 int work_done;
844 unsigned long flags;
845
846 spin_lock_irqsave(&bp->lock, flags);
847
848 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
849 /* spin_lock(&bp->tx_lock); */
850 b44_tx(bp);
851 /* spin_unlock(&bp->tx_lock); */
852 }
853 spin_unlock_irqrestore(&bp->lock, flags);
854
855 work_done = 0;
856 if (bp->istat & ISTAT_RX)
857 work_done += b44_rx(bp, budget);
858
859 if (bp->istat & ISTAT_ERRORS) {
860 spin_lock_irqsave(&bp->lock, flags);
861 b44_halt(bp);
862 b44_init_rings(bp);
863 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
864 netif_wake_queue(bp->dev);
865 spin_unlock_irqrestore(&bp->lock, flags);
866 work_done = 0;
867 }
868
869 if (work_done < budget) {
870 napi_complete(napi);
871 b44_enable_ints(bp);
872 }
873
874 return work_done;
875 }
876
877 static irqreturn_t b44_interrupt(int irq, void *dev_id)
878 {
879 struct net_device *dev = dev_id;
880 struct b44 *bp = netdev_priv(dev);
881 u32 istat, imask;
882 int handled = 0;
883
884 spin_lock(&bp->lock);
885
886 istat = br32(bp, B44_ISTAT);
887 imask = br32(bp, B44_IMASK);
888
889 /* The interrupt mask register controls which interrupt bits
890 * will actually raise an interrupt to the CPU when set by hw/firmware,
891 * but doesn't mask off the bits.
892 */
893 istat &= imask;
894 if (istat) {
895 handled = 1;
896
897 if (unlikely(!netif_running(dev))) {
898 netdev_info(dev, "late interrupt\n");
899 goto irq_ack;
900 }
901
902 if (napi_schedule_prep(&bp->napi)) {
903 /* NOTE: These writes are posted by the readback of
904 * the ISTAT register below.
905 */
906 bp->istat = istat;
907 __b44_disable_ints(bp);
908 __napi_schedule(&bp->napi);
909 }
910
911 irq_ack:
912 bw32(bp, B44_ISTAT, istat);
913 br32(bp, B44_ISTAT);
914 }
915 spin_unlock(&bp->lock);
916 return IRQ_RETVAL(handled);
917 }
918
919 static void b44_tx_timeout(struct net_device *dev)
920 {
921 struct b44 *bp = netdev_priv(dev);
922
923 netdev_err(dev, "transmit timed out, resetting\n");
924
925 spin_lock_irq(&bp->lock);
926
927 b44_halt(bp);
928 b44_init_rings(bp);
929 b44_init_hw(bp, B44_FULL_RESET);
930
931 spin_unlock_irq(&bp->lock);
932
933 b44_enable_ints(bp);
934
935 netif_wake_queue(dev);
936 }
937
938 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
939 {
940 struct b44 *bp = netdev_priv(dev);
941 int rc = NETDEV_TX_OK;
942 dma_addr_t mapping;
943 u32 len, entry, ctrl;
944 unsigned long flags;
945
946 len = skb->len;
947 spin_lock_irqsave(&bp->lock, flags);
948
949 /* This is a hard error, log it. */
950 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
951 netif_stop_queue(dev);
952 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
953 goto err_out;
954 }
955
956 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
957 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
958 struct sk_buff *bounce_skb;
959
960 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
961 if (!ssb_dma_mapping_error(bp->sdev, mapping))
962 ssb_dma_unmap_single(bp->sdev, mapping, len,
963 DMA_TO_DEVICE);
964
965 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
966 if (!bounce_skb)
967 goto err_out;
968
969 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
970 len, DMA_TO_DEVICE);
971 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
972 if (!ssb_dma_mapping_error(bp->sdev, mapping))
973 ssb_dma_unmap_single(bp->sdev, mapping,
974 len, DMA_TO_DEVICE);
975 dev_kfree_skb_any(bounce_skb);
976 goto err_out;
977 }
978
979 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
980 dev_kfree_skb_any(skb);
981 skb = bounce_skb;
982 }
983
984 entry = bp->tx_prod;
985 bp->tx_buffers[entry].skb = skb;
986 bp->tx_buffers[entry].mapping = mapping;
987
988 ctrl = (len & DESC_CTRL_LEN);
989 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
990 if (entry == (B44_TX_RING_SIZE - 1))
991 ctrl |= DESC_CTRL_EOT;
992
993 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
994 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
995
996 if (bp->flags & B44_FLAG_TX_RING_HACK)
997 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
998 entry * sizeof(bp->tx_ring[0]),
999 DMA_TO_DEVICE);
1000
1001 entry = NEXT_TX(entry);
1002
1003 bp->tx_prod = entry;
1004
1005 wmb();
1006
1007 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1008 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1009 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1010 if (bp->flags & B44_FLAG_REORDER_BUG)
1011 br32(bp, B44_DMATX_PTR);
1012
1013 if (TX_BUFFS_AVAIL(bp) < 1)
1014 netif_stop_queue(dev);
1015
1016 dev->trans_start = jiffies;
1017
1018 out_unlock:
1019 spin_unlock_irqrestore(&bp->lock, flags);
1020
1021 return rc;
1022
1023 err_out:
1024 rc = NETDEV_TX_BUSY;
1025 goto out_unlock;
1026 }
1027
1028 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1029 {
1030 struct b44 *bp = netdev_priv(dev);
1031
1032 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1033 return -EINVAL;
1034
1035 if (!netif_running(dev)) {
1036 /* We'll just catch it later when the
1037 * device is up'd.
1038 */
1039 dev->mtu = new_mtu;
1040 return 0;
1041 }
1042
1043 spin_lock_irq(&bp->lock);
1044 b44_halt(bp);
1045 dev->mtu = new_mtu;
1046 b44_init_rings(bp);
1047 b44_init_hw(bp, B44_FULL_RESET);
1048 spin_unlock_irq(&bp->lock);
1049
1050 b44_enable_ints(bp);
1051
1052 return 0;
1053 }
1054
1055 /* Free up pending packets in all rx/tx rings.
1056 *
1057 * The chip has been shut down and the driver detached from
1058 * the networking, so no interrupts or new tx packets will
1059 * end up in the driver. bp->lock is not held and we are not
1060 * in an interrupt context and thus may sleep.
1061 */
1062 static void b44_free_rings(struct b44 *bp)
1063 {
1064 struct ring_info *rp;
1065 int i;
1066
1067 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1068 rp = &bp->rx_buffers[i];
1069
1070 if (rp->skb == NULL)
1071 continue;
1072 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1073 DMA_FROM_DEVICE);
1074 dev_kfree_skb_any(rp->skb);
1075 rp->skb = NULL;
1076 }
1077
1078 /* XXX needs changes once NETIF_F_SG is set... */
1079 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1080 rp = &bp->tx_buffers[i];
1081
1082 if (rp->skb == NULL)
1083 continue;
1084 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1085 DMA_TO_DEVICE);
1086 dev_kfree_skb_any(rp->skb);
1087 rp->skb = NULL;
1088 }
1089 }
1090
1091 /* Initialize tx/rx rings for packet processing.
1092 *
1093 * The chip has been shut down and the driver detached from
1094 * the networking, so no interrupts or new tx packets will
1095 * end up in the driver.
1096 */
1097 static void b44_init_rings(struct b44 *bp)
1098 {
1099 int i;
1100
1101 b44_free_rings(bp);
1102
1103 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1104 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1105
1106 if (bp->flags & B44_FLAG_RX_RING_HACK)
1107 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1108 DMA_TABLE_BYTES,
1109 DMA_BIDIRECTIONAL);
1110
1111 if (bp->flags & B44_FLAG_TX_RING_HACK)
1112 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1113 DMA_TABLE_BYTES,
1114 DMA_TO_DEVICE);
1115
1116 for (i = 0; i < bp->rx_pending; i++) {
1117 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1118 break;
1119 }
1120 }
1121
1122 /*
1123 * Must not be invoked with interrupt sources disabled and
1124 * the hardware shutdown down.
1125 */
1126 static void b44_free_consistent(struct b44 *bp)
1127 {
1128 kfree(bp->rx_buffers);
1129 bp->rx_buffers = NULL;
1130 kfree(bp->tx_buffers);
1131 bp->tx_buffers = NULL;
1132 if (bp->rx_ring) {
1133 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1134 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1135 DMA_TABLE_BYTES,
1136 DMA_BIDIRECTIONAL);
1137 kfree(bp->rx_ring);
1138 } else
1139 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1140 bp->rx_ring, bp->rx_ring_dma,
1141 GFP_KERNEL);
1142 bp->rx_ring = NULL;
1143 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1144 }
1145 if (bp->tx_ring) {
1146 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1147 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1148 DMA_TABLE_BYTES,
1149 DMA_TO_DEVICE);
1150 kfree(bp->tx_ring);
1151 } else
1152 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1153 bp->tx_ring, bp->tx_ring_dma,
1154 GFP_KERNEL);
1155 bp->tx_ring = NULL;
1156 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1157 }
1158 }
1159
1160 /*
1161 * Must not be invoked with interrupt sources disabled and
1162 * the hardware shutdown down. Can sleep.
1163 */
1164 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165 {
1166 int size;
1167
1168 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1169 bp->rx_buffers = kzalloc(size, gfp);
1170 if (!bp->rx_buffers)
1171 goto out_err;
1172
1173 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1174 bp->tx_buffers = kzalloc(size, gfp);
1175 if (!bp->tx_buffers)
1176 goto out_err;
1177
1178 size = DMA_TABLE_BYTES;
1179 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1180 if (!bp->rx_ring) {
1181 /* Allocation may have failed due to pci_alloc_consistent
1182 insisting on use of GFP_DMA, which is more restrictive
1183 than necessary... */
1184 struct dma_desc *rx_ring;
1185 dma_addr_t rx_ring_dma;
1186
1187 rx_ring = kzalloc(size, gfp);
1188 if (!rx_ring)
1189 goto out_err;
1190
1191 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1192 DMA_TABLE_BYTES,
1193 DMA_BIDIRECTIONAL);
1194
1195 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1196 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1197 kfree(rx_ring);
1198 goto out_err;
1199 }
1200
1201 bp->rx_ring = rx_ring;
1202 bp->rx_ring_dma = rx_ring_dma;
1203 bp->flags |= B44_FLAG_RX_RING_HACK;
1204 }
1205
1206 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1207 if (!bp->tx_ring) {
1208 /* Allocation may have failed due to ssb_dma_alloc_consistent
1209 insisting on use of GFP_DMA, which is more restrictive
1210 than necessary... */
1211 struct dma_desc *tx_ring;
1212 dma_addr_t tx_ring_dma;
1213
1214 tx_ring = kzalloc(size, gfp);
1215 if (!tx_ring)
1216 goto out_err;
1217
1218 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1219 DMA_TABLE_BYTES,
1220 DMA_TO_DEVICE);
1221
1222 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1223 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1224 kfree(tx_ring);
1225 goto out_err;
1226 }
1227
1228 bp->tx_ring = tx_ring;
1229 bp->tx_ring_dma = tx_ring_dma;
1230 bp->flags |= B44_FLAG_TX_RING_HACK;
1231 }
1232
1233 return 0;
1234
1235 out_err:
1236 b44_free_consistent(bp);
1237 return -ENOMEM;
1238 }
1239
1240 /* bp->lock is held. */
1241 static void b44_clear_stats(struct b44 *bp)
1242 {
1243 unsigned long reg;
1244
1245 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1246 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1247 br32(bp, reg);
1248 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1249 br32(bp, reg);
1250 }
1251
1252 /* bp->lock is held. */
1253 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1254 {
1255 struct ssb_device *sdev = bp->sdev;
1256 bool was_enabled;
1257
1258 was_enabled = ssb_device_is_enabled(bp->sdev);
1259
1260 ssb_device_enable(bp->sdev, 0);
1261 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1262
1263 if (was_enabled) {
1264 bw32(bp, B44_RCV_LAZY, 0);
1265 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1266 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1267 bw32(bp, B44_DMATX_CTRL, 0);
1268 bp->tx_prod = bp->tx_cons = 0;
1269 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1270 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1271 100, 0);
1272 }
1273 bw32(bp, B44_DMARX_CTRL, 0);
1274 bp->rx_prod = bp->rx_cons = 0;
1275 }
1276
1277 b44_clear_stats(bp);
1278
1279 /*
1280 * Don't enable PHY if we are doing a partial reset
1281 * we are probably going to power down
1282 */
1283 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1284 return;
1285
1286 switch (sdev->bus->bustype) {
1287 case SSB_BUSTYPE_SSB:
1288 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1289 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1290 B44_MDC_RATIO)
1291 & MDIO_CTRL_MAXF_MASK)));
1292 break;
1293 case SSB_BUSTYPE_PCI:
1294 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1295 (0x0d & MDIO_CTRL_MAXF_MASK)));
1296 break;
1297 case SSB_BUSTYPE_PCMCIA:
1298 case SSB_BUSTYPE_SDIO:
1299 WARN_ON(1); /* A device with this bus does not exist. */
1300 break;
1301 }
1302
1303 br32(bp, B44_MDIO_CTRL);
1304
1305 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1306 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1307 br32(bp, B44_ENET_CTRL);
1308 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1309 } else {
1310 u32 val = br32(bp, B44_DEVCTRL);
1311
1312 if (val & DEVCTRL_EPR) {
1313 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1314 br32(bp, B44_DEVCTRL);
1315 udelay(100);
1316 }
1317 bp->flags |= B44_FLAG_INTERNAL_PHY;
1318 }
1319 }
1320
1321 /* bp->lock is held. */
1322 static void b44_halt(struct b44 *bp)
1323 {
1324 b44_disable_ints(bp);
1325 /* reset PHY */
1326 b44_phy_reset(bp);
1327 /* power down PHY */
1328 netdev_info(bp->dev, "powering down PHY\n");
1329 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1330 /* now reset the chip, but without enabling the MAC&PHY
1331 * part of it. This has to be done _after_ we shut down the PHY */
1332 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1333 }
1334
1335 /* bp->lock is held. */
1336 static void __b44_set_mac_addr(struct b44 *bp)
1337 {
1338 bw32(bp, B44_CAM_CTRL, 0);
1339 if (!(bp->dev->flags & IFF_PROMISC)) {
1340 u32 val;
1341
1342 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1343 val = br32(bp, B44_CAM_CTRL);
1344 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1345 }
1346 }
1347
1348 static int b44_set_mac_addr(struct net_device *dev, void *p)
1349 {
1350 struct b44 *bp = netdev_priv(dev);
1351 struct sockaddr *addr = p;
1352 u32 val;
1353
1354 if (netif_running(dev))
1355 return -EBUSY;
1356
1357 if (!is_valid_ether_addr(addr->sa_data))
1358 return -EINVAL;
1359
1360 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1361
1362 spin_lock_irq(&bp->lock);
1363
1364 val = br32(bp, B44_RXCONFIG);
1365 if (!(val & RXCONFIG_CAM_ABSENT))
1366 __b44_set_mac_addr(bp);
1367
1368 spin_unlock_irq(&bp->lock);
1369
1370 return 0;
1371 }
1372
1373 /* Called at device open time to get the chip ready for
1374 * packet processing. Invoked with bp->lock held.
1375 */
1376 static void __b44_set_rx_mode(struct net_device *);
1377 static void b44_init_hw(struct b44 *bp, int reset_kind)
1378 {
1379 u32 val;
1380
1381 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1382 if (reset_kind == B44_FULL_RESET) {
1383 b44_phy_reset(bp);
1384 b44_setup_phy(bp);
1385 }
1386
1387 /* Enable CRC32, set proper LED modes and power on PHY */
1388 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1389 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1390
1391 /* This sets the MAC address too. */
1392 __b44_set_rx_mode(bp->dev);
1393
1394 /* MTU + eth header + possible VLAN tag + struct rx_header */
1395 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1396 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1397
1398 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1399 if (reset_kind == B44_PARTIAL_RESET) {
1400 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1401 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1402 } else {
1403 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1404 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1405 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1407 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1408
1409 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1410 bp->rx_prod = bp->rx_pending;
1411
1412 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1413 }
1414
1415 val = br32(bp, B44_ENET_CTRL);
1416 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1417 }
1418
1419 static int b44_open(struct net_device *dev)
1420 {
1421 struct b44 *bp = netdev_priv(dev);
1422 int err;
1423
1424 err = b44_alloc_consistent(bp, GFP_KERNEL);
1425 if (err)
1426 goto out;
1427
1428 napi_enable(&bp->napi);
1429
1430 b44_init_rings(bp);
1431 b44_init_hw(bp, B44_FULL_RESET);
1432
1433 b44_check_phy(bp);
1434
1435 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1436 if (unlikely(err < 0)) {
1437 napi_disable(&bp->napi);
1438 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1439 b44_free_rings(bp);
1440 b44_free_consistent(bp);
1441 goto out;
1442 }
1443
1444 init_timer(&bp->timer);
1445 bp->timer.expires = jiffies + HZ;
1446 bp->timer.data = (unsigned long) bp;
1447 bp->timer.function = b44_timer;
1448 add_timer(&bp->timer);
1449
1450 b44_enable_ints(bp);
1451 netif_start_queue(dev);
1452 out:
1453 return err;
1454 }
1455
1456 #ifdef CONFIG_NET_POLL_CONTROLLER
1457 /*
1458 * Polling receive - used by netconsole and other diagnostic tools
1459 * to allow network i/o with interrupts disabled.
1460 */
1461 static void b44_poll_controller(struct net_device *dev)
1462 {
1463 disable_irq(dev->irq);
1464 b44_interrupt(dev->irq, dev);
1465 enable_irq(dev->irq);
1466 }
1467 #endif
1468
1469 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1470 {
1471 u32 i;
1472 u32 *pattern = (u32 *) pp;
1473
1474 for (i = 0; i < bytes; i += sizeof(u32)) {
1475 bw32(bp, B44_FILT_ADDR, table_offset + i);
1476 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1477 }
1478 }
1479
1480 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1481 {
1482 int magicsync = 6;
1483 int k, j, len = offset;
1484 int ethaddr_bytes = ETH_ALEN;
1485
1486 memset(ppattern + offset, 0xff, magicsync);
1487 for (j = 0; j < magicsync; j++)
1488 set_bit(len++, (unsigned long *) pmask);
1489
1490 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1491 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1492 ethaddr_bytes = ETH_ALEN;
1493 else
1494 ethaddr_bytes = B44_PATTERN_SIZE - len;
1495 if (ethaddr_bytes <=0)
1496 break;
1497 for (k = 0; k< ethaddr_bytes; k++) {
1498 ppattern[offset + magicsync +
1499 (j * ETH_ALEN) + k] = macaddr[k];
1500 set_bit(len++, (unsigned long *) pmask);
1501 }
1502 }
1503 return len - 1;
1504 }
1505
1506 /* Setup magic packet patterns in the b44 WOL
1507 * pattern matching filter.
1508 */
1509 static void b44_setup_pseudo_magicp(struct b44 *bp)
1510 {
1511
1512 u32 val;
1513 int plen0, plen1, plen2;
1514 u8 *pwol_pattern;
1515 u8 pwol_mask[B44_PMASK_SIZE];
1516
1517 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1518 if (!pwol_pattern) {
1519 pr_err("Memory not available for WOL\n");
1520 return;
1521 }
1522
1523 /* Ipv4 magic packet pattern - pattern 0.*/
1524 memset(pwol_mask, 0, B44_PMASK_SIZE);
1525 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1526 B44_ETHIPV4UDP_HLEN);
1527
1528 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1529 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1530
1531 /* Raw ethernet II magic packet pattern - pattern 1 */
1532 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1533 memset(pwol_mask, 0, B44_PMASK_SIZE);
1534 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1535 ETH_HLEN);
1536
1537 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1538 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1539 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1540 B44_PMASK_BASE + B44_PMASK_SIZE);
1541
1542 /* Ipv6 magic packet pattern - pattern 2 */
1543 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1544 memset(pwol_mask, 0, B44_PMASK_SIZE);
1545 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1546 B44_ETHIPV6UDP_HLEN);
1547
1548 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1549 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1550 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1551 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1552
1553 kfree(pwol_pattern);
1554
1555 /* set these pattern's lengths: one less than each real length */
1556 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1557 bw32(bp, B44_WKUP_LEN, val);
1558
1559 /* enable wakeup pattern matching */
1560 val = br32(bp, B44_DEVCTRL);
1561 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1562
1563 }
1564
1565 #ifdef CONFIG_B44_PCI
1566 static void b44_setup_wol_pci(struct b44 *bp)
1567 {
1568 u16 val;
1569
1570 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1571 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1572 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1573 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1574 }
1575 }
1576 #else
1577 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1578 #endif /* CONFIG_B44_PCI */
1579
1580 static void b44_setup_wol(struct b44 *bp)
1581 {
1582 u32 val;
1583
1584 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1585
1586 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1587
1588 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1589
1590 val = bp->dev->dev_addr[2] << 24 |
1591 bp->dev->dev_addr[3] << 16 |
1592 bp->dev->dev_addr[4] << 8 |
1593 bp->dev->dev_addr[5];
1594 bw32(bp, B44_ADDR_LO, val);
1595
1596 val = bp->dev->dev_addr[0] << 8 |
1597 bp->dev->dev_addr[1];
1598 bw32(bp, B44_ADDR_HI, val);
1599
1600 val = br32(bp, B44_DEVCTRL);
1601 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1602
1603 } else {
1604 b44_setup_pseudo_magicp(bp);
1605 }
1606 b44_setup_wol_pci(bp);
1607 }
1608
1609 static int b44_close(struct net_device *dev)
1610 {
1611 struct b44 *bp = netdev_priv(dev);
1612
1613 netif_stop_queue(dev);
1614
1615 napi_disable(&bp->napi);
1616
1617 del_timer_sync(&bp->timer);
1618
1619 spin_lock_irq(&bp->lock);
1620
1621 b44_halt(bp);
1622 b44_free_rings(bp);
1623 netif_carrier_off(dev);
1624
1625 spin_unlock_irq(&bp->lock);
1626
1627 free_irq(dev->irq, dev);
1628
1629 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1630 b44_init_hw(bp, B44_PARTIAL_RESET);
1631 b44_setup_wol(bp);
1632 }
1633
1634 b44_free_consistent(bp);
1635
1636 return 0;
1637 }
1638
1639 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1640 {
1641 struct b44 *bp = netdev_priv(dev);
1642 struct net_device_stats *nstat = &dev->stats;
1643 struct b44_hw_stats *hwstat = &bp->hw_stats;
1644
1645 /* Convert HW stats into netdevice stats. */
1646 nstat->rx_packets = hwstat->rx_pkts;
1647 nstat->tx_packets = hwstat->tx_pkts;
1648 nstat->rx_bytes = hwstat->rx_octets;
1649 nstat->tx_bytes = hwstat->tx_octets;
1650 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1651 hwstat->tx_oversize_pkts +
1652 hwstat->tx_underruns +
1653 hwstat->tx_excessive_cols +
1654 hwstat->tx_late_cols);
1655 nstat->multicast = hwstat->tx_multicast_pkts;
1656 nstat->collisions = hwstat->tx_total_cols;
1657
1658 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1659 hwstat->rx_undersize);
1660 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1661 nstat->rx_frame_errors = hwstat->rx_align_errs;
1662 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1663 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1664 hwstat->rx_oversize_pkts +
1665 hwstat->rx_missed_pkts +
1666 hwstat->rx_crc_align_errs +
1667 hwstat->rx_undersize +
1668 hwstat->rx_crc_errs +
1669 hwstat->rx_align_errs +
1670 hwstat->rx_symbol_errs);
1671
1672 nstat->tx_aborted_errors = hwstat->tx_underruns;
1673 #if 0
1674 /* Carrier lost counter seems to be broken for some devices */
1675 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1676 #endif
1677
1678 return nstat;
1679 }
1680
1681 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1682 {
1683 struct dev_mc_list *mclist;
1684 int i, num_ents;
1685
1686 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1687 i = 0;
1688 netdev_for_each_mc_addr(mclist, dev) {
1689 if (i == num_ents)
1690 break;
1691 __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
1692 }
1693 return i+1;
1694 }
1695
1696 static void __b44_set_rx_mode(struct net_device *dev)
1697 {
1698 struct b44 *bp = netdev_priv(dev);
1699 u32 val;
1700
1701 val = br32(bp, B44_RXCONFIG);
1702 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1703 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1704 val |= RXCONFIG_PROMISC;
1705 bw32(bp, B44_RXCONFIG, val);
1706 } else {
1707 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1708 int i = 1;
1709
1710 __b44_set_mac_addr(bp);
1711
1712 if ((dev->flags & IFF_ALLMULTI) ||
1713 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1714 val |= RXCONFIG_ALLMULTI;
1715 else
1716 i = __b44_load_mcast(bp, dev);
1717
1718 for (; i < 64; i++)
1719 __b44_cam_write(bp, zero, i);
1720
1721 bw32(bp, B44_RXCONFIG, val);
1722 val = br32(bp, B44_CAM_CTRL);
1723 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1724 }
1725 }
1726
1727 static void b44_set_rx_mode(struct net_device *dev)
1728 {
1729 struct b44 *bp = netdev_priv(dev);
1730
1731 spin_lock_irq(&bp->lock);
1732 __b44_set_rx_mode(dev);
1733 spin_unlock_irq(&bp->lock);
1734 }
1735
1736 static u32 b44_get_msglevel(struct net_device *dev)
1737 {
1738 struct b44 *bp = netdev_priv(dev);
1739 return bp->msg_enable;
1740 }
1741
1742 static void b44_set_msglevel(struct net_device *dev, u32 value)
1743 {
1744 struct b44 *bp = netdev_priv(dev);
1745 bp->msg_enable = value;
1746 }
1747
1748 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1749 {
1750 struct b44 *bp = netdev_priv(dev);
1751 struct ssb_bus *bus = bp->sdev->bus;
1752
1753 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1754 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1755 switch (bus->bustype) {
1756 case SSB_BUSTYPE_PCI:
1757 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1758 break;
1759 case SSB_BUSTYPE_SSB:
1760 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1761 break;
1762 case SSB_BUSTYPE_PCMCIA:
1763 case SSB_BUSTYPE_SDIO:
1764 WARN_ON(1); /* A device with this bus does not exist. */
1765 break;
1766 }
1767 }
1768
1769 static int b44_nway_reset(struct net_device *dev)
1770 {
1771 struct b44 *bp = netdev_priv(dev);
1772 u32 bmcr;
1773 int r;
1774
1775 spin_lock_irq(&bp->lock);
1776 b44_readphy(bp, MII_BMCR, &bmcr);
1777 b44_readphy(bp, MII_BMCR, &bmcr);
1778 r = -EINVAL;
1779 if (bmcr & BMCR_ANENABLE) {
1780 b44_writephy(bp, MII_BMCR,
1781 bmcr | BMCR_ANRESTART);
1782 r = 0;
1783 }
1784 spin_unlock_irq(&bp->lock);
1785
1786 return r;
1787 }
1788
1789 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1790 {
1791 struct b44 *bp = netdev_priv(dev);
1792
1793 cmd->supported = (SUPPORTED_Autoneg);
1794 cmd->supported |= (SUPPORTED_100baseT_Half |
1795 SUPPORTED_100baseT_Full |
1796 SUPPORTED_10baseT_Half |
1797 SUPPORTED_10baseT_Full |
1798 SUPPORTED_MII);
1799
1800 cmd->advertising = 0;
1801 if (bp->flags & B44_FLAG_ADV_10HALF)
1802 cmd->advertising |= ADVERTISED_10baseT_Half;
1803 if (bp->flags & B44_FLAG_ADV_10FULL)
1804 cmd->advertising |= ADVERTISED_10baseT_Full;
1805 if (bp->flags & B44_FLAG_ADV_100HALF)
1806 cmd->advertising |= ADVERTISED_100baseT_Half;
1807 if (bp->flags & B44_FLAG_ADV_100FULL)
1808 cmd->advertising |= ADVERTISED_100baseT_Full;
1809 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1810 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1811 SPEED_100 : SPEED_10;
1812 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1813 DUPLEX_FULL : DUPLEX_HALF;
1814 cmd->port = 0;
1815 cmd->phy_address = bp->phy_addr;
1816 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1817 XCVR_INTERNAL : XCVR_EXTERNAL;
1818 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1819 AUTONEG_DISABLE : AUTONEG_ENABLE;
1820 if (cmd->autoneg == AUTONEG_ENABLE)
1821 cmd->advertising |= ADVERTISED_Autoneg;
1822 if (!netif_running(dev)){
1823 cmd->speed = 0;
1824 cmd->duplex = 0xff;
1825 }
1826 cmd->maxtxpkt = 0;
1827 cmd->maxrxpkt = 0;
1828 return 0;
1829 }
1830
1831 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1832 {
1833 struct b44 *bp = netdev_priv(dev);
1834
1835 /* We do not support gigabit. */
1836 if (cmd->autoneg == AUTONEG_ENABLE) {
1837 if (cmd->advertising &
1838 (ADVERTISED_1000baseT_Half |
1839 ADVERTISED_1000baseT_Full))
1840 return -EINVAL;
1841 } else if ((cmd->speed != SPEED_100 &&
1842 cmd->speed != SPEED_10) ||
1843 (cmd->duplex != DUPLEX_HALF &&
1844 cmd->duplex != DUPLEX_FULL)) {
1845 return -EINVAL;
1846 }
1847
1848 spin_lock_irq(&bp->lock);
1849
1850 if (cmd->autoneg == AUTONEG_ENABLE) {
1851 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1852 B44_FLAG_100_BASE_T |
1853 B44_FLAG_FULL_DUPLEX |
1854 B44_FLAG_ADV_10HALF |
1855 B44_FLAG_ADV_10FULL |
1856 B44_FLAG_ADV_100HALF |
1857 B44_FLAG_ADV_100FULL);
1858 if (cmd->advertising == 0) {
1859 bp->flags |= (B44_FLAG_ADV_10HALF |
1860 B44_FLAG_ADV_10FULL |
1861 B44_FLAG_ADV_100HALF |
1862 B44_FLAG_ADV_100FULL);
1863 } else {
1864 if (cmd->advertising & ADVERTISED_10baseT_Half)
1865 bp->flags |= B44_FLAG_ADV_10HALF;
1866 if (cmd->advertising & ADVERTISED_10baseT_Full)
1867 bp->flags |= B44_FLAG_ADV_10FULL;
1868 if (cmd->advertising & ADVERTISED_100baseT_Half)
1869 bp->flags |= B44_FLAG_ADV_100HALF;
1870 if (cmd->advertising & ADVERTISED_100baseT_Full)
1871 bp->flags |= B44_FLAG_ADV_100FULL;
1872 }
1873 } else {
1874 bp->flags |= B44_FLAG_FORCE_LINK;
1875 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1876 if (cmd->speed == SPEED_100)
1877 bp->flags |= B44_FLAG_100_BASE_T;
1878 if (cmd->duplex == DUPLEX_FULL)
1879 bp->flags |= B44_FLAG_FULL_DUPLEX;
1880 }
1881
1882 if (netif_running(dev))
1883 b44_setup_phy(bp);
1884
1885 spin_unlock_irq(&bp->lock);
1886
1887 return 0;
1888 }
1889
1890 static void b44_get_ringparam(struct net_device *dev,
1891 struct ethtool_ringparam *ering)
1892 {
1893 struct b44 *bp = netdev_priv(dev);
1894
1895 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1896 ering->rx_pending = bp->rx_pending;
1897
1898 /* XXX ethtool lacks a tx_max_pending, oops... */
1899 }
1900
1901 static int b44_set_ringparam(struct net_device *dev,
1902 struct ethtool_ringparam *ering)
1903 {
1904 struct b44 *bp = netdev_priv(dev);
1905
1906 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1907 (ering->rx_mini_pending != 0) ||
1908 (ering->rx_jumbo_pending != 0) ||
1909 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1910 return -EINVAL;
1911
1912 spin_lock_irq(&bp->lock);
1913
1914 bp->rx_pending = ering->rx_pending;
1915 bp->tx_pending = ering->tx_pending;
1916
1917 b44_halt(bp);
1918 b44_init_rings(bp);
1919 b44_init_hw(bp, B44_FULL_RESET);
1920 netif_wake_queue(bp->dev);
1921 spin_unlock_irq(&bp->lock);
1922
1923 b44_enable_ints(bp);
1924
1925 return 0;
1926 }
1927
1928 static void b44_get_pauseparam(struct net_device *dev,
1929 struct ethtool_pauseparam *epause)
1930 {
1931 struct b44 *bp = netdev_priv(dev);
1932
1933 epause->autoneg =
1934 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1935 epause->rx_pause =
1936 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1937 epause->tx_pause =
1938 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1939 }
1940
1941 static int b44_set_pauseparam(struct net_device *dev,
1942 struct ethtool_pauseparam *epause)
1943 {
1944 struct b44 *bp = netdev_priv(dev);
1945
1946 spin_lock_irq(&bp->lock);
1947 if (epause->autoneg)
1948 bp->flags |= B44_FLAG_PAUSE_AUTO;
1949 else
1950 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1951 if (epause->rx_pause)
1952 bp->flags |= B44_FLAG_RX_PAUSE;
1953 else
1954 bp->flags &= ~B44_FLAG_RX_PAUSE;
1955 if (epause->tx_pause)
1956 bp->flags |= B44_FLAG_TX_PAUSE;
1957 else
1958 bp->flags &= ~B44_FLAG_TX_PAUSE;
1959 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1960 b44_halt(bp);
1961 b44_init_rings(bp);
1962 b44_init_hw(bp, B44_FULL_RESET);
1963 } else {
1964 __b44_set_flow_ctrl(bp, bp->flags);
1965 }
1966 spin_unlock_irq(&bp->lock);
1967
1968 b44_enable_ints(bp);
1969
1970 return 0;
1971 }
1972
1973 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1974 {
1975 switch(stringset) {
1976 case ETH_SS_STATS:
1977 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1978 break;
1979 }
1980 }
1981
1982 static int b44_get_sset_count(struct net_device *dev, int sset)
1983 {
1984 switch (sset) {
1985 case ETH_SS_STATS:
1986 return ARRAY_SIZE(b44_gstrings);
1987 default:
1988 return -EOPNOTSUPP;
1989 }
1990 }
1991
1992 static void b44_get_ethtool_stats(struct net_device *dev,
1993 struct ethtool_stats *stats, u64 *data)
1994 {
1995 struct b44 *bp = netdev_priv(dev);
1996 u32 *val = &bp->hw_stats.tx_good_octets;
1997 u32 i;
1998
1999 spin_lock_irq(&bp->lock);
2000
2001 b44_stats_update(bp);
2002
2003 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2004 *data++ = *val++;
2005
2006 spin_unlock_irq(&bp->lock);
2007 }
2008
2009 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2010 {
2011 struct b44 *bp = netdev_priv(dev);
2012
2013 wol->supported = WAKE_MAGIC;
2014 if (bp->flags & B44_FLAG_WOL_ENABLE)
2015 wol->wolopts = WAKE_MAGIC;
2016 else
2017 wol->wolopts = 0;
2018 memset(&wol->sopass, 0, sizeof(wol->sopass));
2019 }
2020
2021 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2022 {
2023 struct b44 *bp = netdev_priv(dev);
2024
2025 spin_lock_irq(&bp->lock);
2026 if (wol->wolopts & WAKE_MAGIC)
2027 bp->flags |= B44_FLAG_WOL_ENABLE;
2028 else
2029 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2030 spin_unlock_irq(&bp->lock);
2031
2032 return 0;
2033 }
2034
2035 static const struct ethtool_ops b44_ethtool_ops = {
2036 .get_drvinfo = b44_get_drvinfo,
2037 .get_settings = b44_get_settings,
2038 .set_settings = b44_set_settings,
2039 .nway_reset = b44_nway_reset,
2040 .get_link = ethtool_op_get_link,
2041 .get_wol = b44_get_wol,
2042 .set_wol = b44_set_wol,
2043 .get_ringparam = b44_get_ringparam,
2044 .set_ringparam = b44_set_ringparam,
2045 .get_pauseparam = b44_get_pauseparam,
2046 .set_pauseparam = b44_set_pauseparam,
2047 .get_msglevel = b44_get_msglevel,
2048 .set_msglevel = b44_set_msglevel,
2049 .get_strings = b44_get_strings,
2050 .get_sset_count = b44_get_sset_count,
2051 .get_ethtool_stats = b44_get_ethtool_stats,
2052 };
2053
2054 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2055 {
2056 struct mii_ioctl_data *data = if_mii(ifr);
2057 struct b44 *bp = netdev_priv(dev);
2058 int err = -EINVAL;
2059
2060 if (!netif_running(dev))
2061 goto out;
2062
2063 spin_lock_irq(&bp->lock);
2064 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2065 spin_unlock_irq(&bp->lock);
2066 out:
2067 return err;
2068 }
2069
2070 static int __devinit b44_get_invariants(struct b44 *bp)
2071 {
2072 struct ssb_device *sdev = bp->sdev;
2073 int err = 0;
2074 u8 *addr;
2075
2076 bp->dma_offset = ssb_dma_translation(sdev);
2077
2078 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2079 instance > 1) {
2080 addr = sdev->bus->sprom.et1mac;
2081 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2082 } else {
2083 addr = sdev->bus->sprom.et0mac;
2084 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2085 }
2086 /* Some ROMs have buggy PHY addresses with the high
2087 * bits set (sign extension?). Truncate them to a
2088 * valid PHY address. */
2089 bp->phy_addr &= 0x1F;
2090
2091 memcpy(bp->dev->dev_addr, addr, 6);
2092
2093 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2094 pr_err("Invalid MAC address found in EEPROM\n");
2095 return -EINVAL;
2096 }
2097
2098 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2099
2100 bp->imask = IMASK_DEF;
2101
2102 /* XXX - really required?
2103 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2104 */
2105
2106 if (bp->sdev->id.revision >= 7)
2107 bp->flags |= B44_FLAG_B0_ANDLATER;
2108
2109 return err;
2110 }
2111
2112 static const struct net_device_ops b44_netdev_ops = {
2113 .ndo_open = b44_open,
2114 .ndo_stop = b44_close,
2115 .ndo_start_xmit = b44_start_xmit,
2116 .ndo_get_stats = b44_get_stats,
2117 .ndo_set_multicast_list = b44_set_rx_mode,
2118 .ndo_set_mac_address = b44_set_mac_addr,
2119 .ndo_validate_addr = eth_validate_addr,
2120 .ndo_do_ioctl = b44_ioctl,
2121 .ndo_tx_timeout = b44_tx_timeout,
2122 .ndo_change_mtu = b44_change_mtu,
2123 #ifdef CONFIG_NET_POLL_CONTROLLER
2124 .ndo_poll_controller = b44_poll_controller,
2125 #endif
2126 };
2127
2128 static int __devinit b44_init_one(struct ssb_device *sdev,
2129 const struct ssb_device_id *ent)
2130 {
2131 static int b44_version_printed = 0;
2132 struct net_device *dev;
2133 struct b44 *bp;
2134 int err;
2135
2136 instance++;
2137
2138 if (b44_version_printed++ == 0)
2139 pr_info("%s", version);
2140
2141
2142 dev = alloc_etherdev(sizeof(*bp));
2143 if (!dev) {
2144 dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2145 err = -ENOMEM;
2146 goto out;
2147 }
2148
2149 SET_NETDEV_DEV(dev, sdev->dev);
2150
2151 /* No interesting netdevice features in this card... */
2152 dev->features |= 0;
2153
2154 bp = netdev_priv(dev);
2155 bp->sdev = sdev;
2156 bp->dev = dev;
2157 bp->force_copybreak = 0;
2158
2159 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2160
2161 spin_lock_init(&bp->lock);
2162
2163 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2164 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2165
2166 dev->netdev_ops = &b44_netdev_ops;
2167 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2168 dev->watchdog_timeo = B44_TX_TIMEOUT;
2169 dev->irq = sdev->irq;
2170 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2171
2172 netif_carrier_off(dev);
2173
2174 err = ssb_bus_powerup(sdev->bus, 0);
2175 if (err) {
2176 dev_err(sdev->dev,
2177 "Failed to powerup the bus\n");
2178 goto err_out_free_dev;
2179 }
2180 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2181 if (err) {
2182 dev_err(sdev->dev,
2183 "Required 30BIT DMA mask unsupported by the system\n");
2184 goto err_out_powerdown;
2185 }
2186 err = b44_get_invariants(bp);
2187 if (err) {
2188 dev_err(sdev->dev,
2189 "Problem fetching invariants of chip, aborting\n");
2190 goto err_out_powerdown;
2191 }
2192
2193 bp->mii_if.dev = dev;
2194 bp->mii_if.mdio_read = b44_mii_read;
2195 bp->mii_if.mdio_write = b44_mii_write;
2196 bp->mii_if.phy_id = bp->phy_addr;
2197 bp->mii_if.phy_id_mask = 0x1f;
2198 bp->mii_if.reg_num_mask = 0x1f;
2199
2200 /* By default, advertise all speed/duplex settings. */
2201 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2202 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2203
2204 /* By default, auto-negotiate PAUSE. */
2205 bp->flags |= B44_FLAG_PAUSE_AUTO;
2206
2207 err = register_netdev(dev);
2208 if (err) {
2209 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2210 goto err_out_powerdown;
2211 }
2212
2213 ssb_set_drvdata(sdev, dev);
2214
2215 /* Chip reset provides power to the b44 MAC & PCI cores, which
2216 * is necessary for MAC register access.
2217 */
2218 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2219
2220 /* do a phy reset to test if there is an active phy */
2221 if (b44_phy_reset(bp) < 0)
2222 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2223
2224 netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2225 dev->dev_addr);
2226
2227 return 0;
2228
2229 err_out_powerdown:
2230 ssb_bus_may_powerdown(sdev->bus);
2231
2232 err_out_free_dev:
2233 free_netdev(dev);
2234
2235 out:
2236 return err;
2237 }
2238
2239 static void __devexit b44_remove_one(struct ssb_device *sdev)
2240 {
2241 struct net_device *dev = ssb_get_drvdata(sdev);
2242
2243 unregister_netdev(dev);
2244 ssb_device_disable(sdev, 0);
2245 ssb_bus_may_powerdown(sdev->bus);
2246 free_netdev(dev);
2247 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2248 ssb_set_drvdata(sdev, NULL);
2249 }
2250
2251 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2252 {
2253 struct net_device *dev = ssb_get_drvdata(sdev);
2254 struct b44 *bp = netdev_priv(dev);
2255
2256 if (!netif_running(dev))
2257 return 0;
2258
2259 del_timer_sync(&bp->timer);
2260
2261 spin_lock_irq(&bp->lock);
2262
2263 b44_halt(bp);
2264 netif_carrier_off(bp->dev);
2265 netif_device_detach(bp->dev);
2266 b44_free_rings(bp);
2267
2268 spin_unlock_irq(&bp->lock);
2269
2270 free_irq(dev->irq, dev);
2271 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2272 b44_init_hw(bp, B44_PARTIAL_RESET);
2273 b44_setup_wol(bp);
2274 }
2275
2276 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2277 return 0;
2278 }
2279
2280 static int b44_resume(struct ssb_device *sdev)
2281 {
2282 struct net_device *dev = ssb_get_drvdata(sdev);
2283 struct b44 *bp = netdev_priv(dev);
2284 int rc = 0;
2285
2286 rc = ssb_bus_powerup(sdev->bus, 0);
2287 if (rc) {
2288 dev_err(sdev->dev,
2289 "Failed to powerup the bus\n");
2290 return rc;
2291 }
2292
2293 if (!netif_running(dev))
2294 return 0;
2295
2296 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2297 if (rc) {
2298 netdev_err(dev, "request_irq failed\n");
2299 return rc;
2300 }
2301
2302 spin_lock_irq(&bp->lock);
2303
2304 b44_init_rings(bp);
2305 b44_init_hw(bp, B44_FULL_RESET);
2306 netif_device_attach(bp->dev);
2307 spin_unlock_irq(&bp->lock);
2308
2309 b44_enable_ints(bp);
2310 netif_wake_queue(dev);
2311
2312 mod_timer(&bp->timer, jiffies + 1);
2313
2314 return 0;
2315 }
2316
2317 static struct ssb_driver b44_ssb_driver = {
2318 .name = DRV_MODULE_NAME,
2319 .id_table = b44_ssb_tbl,
2320 .probe = b44_init_one,
2321 .remove = __devexit_p(b44_remove_one),
2322 .suspend = b44_suspend,
2323 .resume = b44_resume,
2324 };
2325
2326 static inline int b44_pci_init(void)
2327 {
2328 int err = 0;
2329 #ifdef CONFIG_B44_PCI
2330 err = ssb_pcihost_register(&b44_pci_driver);
2331 #endif
2332 return err;
2333 }
2334
2335 static inline void b44_pci_exit(void)
2336 {
2337 #ifdef CONFIG_B44_PCI
2338 ssb_pcihost_unregister(&b44_pci_driver);
2339 #endif
2340 }
2341
2342 static int __init b44_init(void)
2343 {
2344 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2345 int err;
2346
2347 /* Setup paramaters for syncing RX/TX DMA descriptors */
2348 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2349 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2350
2351 err = b44_pci_init();
2352 if (err)
2353 return err;
2354 err = ssb_driver_register(&b44_ssb_driver);
2355 if (err)
2356 b44_pci_exit();
2357 return err;
2358 }
2359
2360 static void __exit b44_cleanup(void)
2361 {
2362 ssb_driver_unregister(&b44_ssb_driver);
2363 b44_pci_exit();
2364 }
2365
2366 module_init(b44_init);
2367 module_exit(b44_cleanup);
2368