]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/broadcom/b44.c
b44: abort when no PHY is available at all
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / broadcom / b44.c
CommitLineData
753f4920 1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
1da177e4
LT
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
753f4920
MB
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
8056bfaf 7 * Copyright (C) 2006 Broadcom Corporation.
eb032b98 8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
1da177e4
LT
9 *
10 * Distribute under GPL.
11 */
12
2fc96fff
JP
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
1da177e4
LT
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/types.h>
19#include <linux/netdevice.h>
20#include <linux/ethtool.h>
21#include <linux/mii.h>
22#include <linux/if_ether.h>
72f4861e 23#include <linux/if_vlan.h>
1da177e4
LT
24#include <linux/etherdevice.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/init.h>
a6b7a407 28#include <linux/interrupt.h>
89358f90 29#include <linux/dma-mapping.h>
753f4920 30#include <linux/ssb/ssb.h>
5a0e3ad6 31#include <linux/slab.h>
1da177e4
LT
32
33#include <asm/uaccess.h>
34#include <asm/io.h>
35#include <asm/irq.h>
36
753f4920 37
1da177e4
LT
38#include "b44.h"
39
40#define DRV_MODULE_NAME "b44"
753f4920 41#define DRV_MODULE_VERSION "2.0"
7329f0d5 42#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
1da177e4
LT
43
44#define B44_DEF_MSG_ENABLE \
45 (NETIF_MSG_DRV | \
46 NETIF_MSG_PROBE | \
47 NETIF_MSG_LINK | \
48 NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | \
50 NETIF_MSG_IFUP | \
51 NETIF_MSG_RX_ERR | \
52 NETIF_MSG_TX_ERR)
53
54/* length of time before we decide the hardware is borked,
55 * and dev->tx_timeout() should be called to fix the problem
56 */
57#define B44_TX_TIMEOUT (5 * HZ)
58
59/* hardware minimum and maximum for a single frame's data payload */
60#define B44_MIN_MTU 60
61#define B44_MAX_MTU 1500
62
63#define B44_RX_RING_SIZE 512
64#define B44_DEF_RX_RING_PENDING 200
65#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
66 B44_RX_RING_SIZE)
67#define B44_TX_RING_SIZE 512
68#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
69#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
70 B44_TX_RING_SIZE)
1da177e4
LT
71
72#define TX_RING_GAP(BP) \
73 (B44_TX_RING_SIZE - (BP)->tx_pending)
74#define TX_BUFFS_AVAIL(BP) \
75 (((BP)->tx_cons <= (BP)->tx_prod) ? \
76 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
77 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
79
4ca85795
FF
80#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
81#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
1da177e4
LT
82
83/* minimum number of free TX descriptors required to wake up TX process */
84#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
85
725ad800
GZ
86/* b44 internal pattern match filter info */
87#define B44_PATTERN_BASE 0x400
88#define B44_PATTERN_SIZE 0x80
89#define B44_PMASK_BASE 0x600
90#define B44_PMASK_SIZE 0x10
91#define B44_MAX_PATTERNS 16
92#define B44_ETHIPV6UDP_HLEN 62
93#define B44_ETHIPV4UDP_HLEN 42
94
753f4920 95MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
7329f0d5 96MODULE_DESCRIPTION(DRV_DESCRIPTION);
1da177e4
LT
97MODULE_LICENSE("GPL");
98MODULE_VERSION(DRV_MODULE_VERSION);
99
100static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
101module_param(b44_debug, int, 0);
102MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103
1da177e4 104
753f4920 105#ifdef CONFIG_B44_PCI
a3aa1884 106static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
753f4920
MB
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 { 0 } /* terminate list with empty entry */
111};
1da177e4
LT
112MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113
753f4920
MB
114static struct pci_driver b44_pci_driver = {
115 .name = DRV_MODULE_NAME,
116 .id_table = b44_pci_tbl,
117};
118#endif /* CONFIG_B44_PCI */
119
120static const struct ssb_device_id b44_ssb_tbl[] = {
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 SSB_DEVTABLE_END
123};
124MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125
1da177e4
LT
126static void b44_halt(struct b44 *);
127static void b44_init_rings(struct b44 *);
5fc7d61a
MC
128
129#define B44_FULL_RESET 1
130#define B44_FULL_RESET_SKIP_PHY 2
131#define B44_PARTIAL_RESET 3
fedb0eef
MB
132#define B44_CHIP_RESET_FULL 4
133#define B44_CHIP_RESET_PARTIAL 5
5fc7d61a 134
00e8b3aa 135static void b44_init_hw(struct b44 *, int);
1da177e4 136
9f38c636 137static int dma_desc_sync_size;
753f4920 138static int instance;
9f38c636 139
3353930d
FR
140static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141#define _B44(x...) # x,
142B44_STAT_REG_DECLARE
143#undef _B44
144};
145
753f4920
MB
146static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
9f38c636 150{
39a6f4bc
FT
151 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 dma_desc_sync_size, dir);
9f38c636
JL
153}
154
753f4920
MB
155static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156 dma_addr_t dma_base,
157 unsigned long offset,
158 enum dma_data_direction dir)
9f38c636 159{
39a6f4bc
FT
160 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 dma_desc_sync_size, dir);
9f38c636
JL
162}
163
1da177e4
LT
164static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165{
753f4920 166 return ssb_read32(bp->sdev, reg);
1da177e4
LT
167}
168
10badc21 169static inline void bw32(const struct b44 *bp,
1da177e4
LT
170 unsigned long reg, unsigned long val)
171{
753f4920 172 ssb_write32(bp->sdev, reg, val);
1da177e4
LT
173}
174
175static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 u32 bit, unsigned long timeout, const int clear)
177{
178 unsigned long i;
179
180 for (i = 0; i < timeout; i++) {
181 u32 val = br32(bp, reg);
182
183 if (clear && !(val & bit))
184 break;
185 if (!clear && (val & bit))
186 break;
187 udelay(10);
188 }
189 if (i == timeout) {
f6ca057f 190 if (net_ratelimit())
2fc96fff
JP
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192 bit, reg, clear ? "clear" : "set");
193
1da177e4
LT
194 return -ENODEV;
195 }
196 return 0;
197}
198
753f4920 199static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
1da177e4
LT
200{
201 u32 val;
202
753f4920
MB
203 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204 (index << CAM_CTRL_INDEX_SHIFT)));
1da177e4 205
753f4920 206 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
1da177e4 207
753f4920 208 val = br32(bp, B44_CAM_DATA_LO);
1da177e4 209
753f4920
MB
210 data[2] = (val >> 24) & 0xFF;
211 data[3] = (val >> 16) & 0xFF;
212 data[4] = (val >> 8) & 0xFF;
213 data[5] = (val >> 0) & 0xFF;
1da177e4 214
753f4920 215 val = br32(bp, B44_CAM_DATA_HI);
1da177e4 216
753f4920
MB
217 data[0] = (val >> 8) & 0xFF;
218 data[1] = (val >> 0) & 0xFF;
1da177e4
LT
219}
220
753f4920 221static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
1da177e4
LT
222{
223 u32 val;
224
225 val = ((u32) data[2]) << 24;
226 val |= ((u32) data[3]) << 16;
227 val |= ((u32) data[4]) << 8;
228 val |= ((u32) data[5]) << 0;
229 bw32(bp, B44_CAM_DATA_LO, val);
10badc21 230 val = (CAM_DATA_HI_VALID |
1da177e4
LT
231 (((u32) data[0]) << 8) |
232 (((u32) data[1]) << 0));
233 bw32(bp, B44_CAM_DATA_HI, val);
234 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235 (index << CAM_CTRL_INDEX_SHIFT)));
10badc21 236 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
1da177e4
LT
237}
238
239static inline void __b44_disable_ints(struct b44 *bp)
240{
241 bw32(bp, B44_IMASK, 0);
242}
243
244static void b44_disable_ints(struct b44 *bp)
245{
246 __b44_disable_ints(bp);
247
248 /* Flush posted writes. */
249 br32(bp, B44_IMASK);
250}
251
252static void b44_enable_ints(struct b44 *bp)
253{
254 bw32(bp, B44_IMASK, bp->imask);
255}
256
753f4920 257static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
1da177e4
LT
258{
259 int err;
260
261 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
753f4920 264 (phy_addr << MDIO_DATA_PMD_SHIFT) |
1da177e4
LT
265 (reg << MDIO_DATA_RA_SHIFT) |
266 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269
270 return err;
271}
272
753f4920 273static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
1da177e4
LT
274{
275 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
753f4920 278 (phy_addr << MDIO_DATA_PMD_SHIFT) |
1da177e4
LT
279 (reg << MDIO_DATA_RA_SHIFT) |
280 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281 (val & MDIO_DATA_DATA)));
282 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283}
284
753f4920
MB
285static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286{
d6194195 287 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
753f4920
MB
288 return 0;
289
290 return __b44_readphy(bp, bp->phy_addr, reg, val);
291}
292
293static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294{
d6194195 295 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
753f4920
MB
296 return 0;
297
298 return __b44_writephy(bp, bp->phy_addr, reg, val);
299}
300
1da177e4 301/* miilib interface */
1da177e4
LT
302static int b44_mii_read(struct net_device *dev, int phy_id, int location)
303{
304 u32 val;
305 struct b44 *bp = netdev_priv(dev);
753f4920 306 int rc = __b44_readphy(bp, phy_id, location, &val);
1da177e4
LT
307 if (rc)
308 return 0xffffffff;
309 return val;
310}
311
312static void b44_mii_write(struct net_device *dev, int phy_id, int location,
313 int val)
314{
315 struct b44 *bp = netdev_priv(dev);
753f4920 316 __b44_writephy(bp, phy_id, location, val);
1da177e4
LT
317}
318
319static int b44_phy_reset(struct b44 *bp)
320{
321 u32 val;
322 int err;
323
d6194195 324 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
753f4920 325 return 0;
1da177e4
LT
326 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
327 if (err)
328 return err;
329 udelay(100);
330 err = b44_readphy(bp, MII_BMCR, &val);
331 if (!err) {
332 if (val & BMCR_RESET) {
2fc96fff 333 netdev_err(bp->dev, "PHY Reset would not complete\n");
1da177e4
LT
334 err = -ENODEV;
335 }
336 }
337
8850dce1 338 return err;
1da177e4
LT
339}
340
341static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
342{
343 u32 val;
344
345 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
346 bp->flags |= pause_flags;
347
348 val = br32(bp, B44_RXCONFIG);
349 if (pause_flags & B44_FLAG_RX_PAUSE)
350 val |= RXCONFIG_FLOW;
351 else
352 val &= ~RXCONFIG_FLOW;
353 bw32(bp, B44_RXCONFIG, val);
354
355 val = br32(bp, B44_MAC_FLOW);
356 if (pause_flags & B44_FLAG_TX_PAUSE)
357 val |= (MAC_FLOW_PAUSE_ENAB |
358 (0xc0 & MAC_FLOW_RX_HI_WATER));
359 else
360 val &= ~MAC_FLOW_PAUSE_ENAB;
361 bw32(bp, B44_MAC_FLOW, val);
362}
363
364static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
365{
10badc21 366 u32 pause_enab = 0;
2b474cf5
GZ
367
368 /* The driver supports only rx pause by default because
10badc21
JG
369 the b44 mac tx pause mechanism generates excessive
370 pause frames.
2b474cf5
GZ
371 Use ethtool to turn on b44 tx pause if necessary.
372 */
373 if ((local & ADVERTISE_PAUSE_CAP) &&
10badc21 374 (local & ADVERTISE_PAUSE_ASYM)){
2b474cf5
GZ
375 if ((remote & LPA_PAUSE_ASYM) &&
376 !(remote & LPA_PAUSE_CAP))
377 pause_enab |= B44_FLAG_RX_PAUSE;
1da177e4
LT
378 }
379
380 __b44_set_flow_ctrl(bp, pause_enab);
381}
382
6c08af03 383#ifdef CONFIG_BCM47XX
111bd981 384#include <bcm47xx_nvram.h>
753f4920
MB
385static void b44_wap54g10_workaround(struct b44 *bp)
386{
6c08af03 387 char buf[20];
753f4920
MB
388 u32 val;
389 int err;
390
391 /*
392 * workaround for bad hardware design in Linksys WAP54G v1.0
393 * see https://dev.openwrt.org/ticket/146
394 * check and reset bit "isolate"
395 */
111bd981 396 if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
753f4920 397 return;
6c08af03 398 if (simple_strtoul(buf, NULL, 0) == 2) {
753f4920
MB
399 err = __b44_readphy(bp, 0, MII_BMCR, &val);
400 if (err)
401 goto error;
402 if (!(val & BMCR_ISOLATE))
403 return;
404 val &= ~BMCR_ISOLATE;
405 err = __b44_writephy(bp, 0, MII_BMCR, val);
406 if (err)
407 goto error;
408 }
409 return;
410error:
2fc96fff 411 pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
753f4920
MB
412}
413#else
414static inline void b44_wap54g10_workaround(struct b44 *bp)
415{
416}
417#endif
418
1da177e4
LT
419static int b44_setup_phy(struct b44 *bp)
420{
421 u32 val;
422 int err;
423
753f4920
MB
424 b44_wap54g10_workaround(bp);
425
d6194195 426 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
753f4920 427 return 0;
1da177e4
LT
428 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
429 goto out;
430 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431 val & MII_ALEDCTRL_ALLMSK)) != 0)
432 goto out;
433 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
434 goto out;
435 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436 val | MII_TLEDCTRL_ENABLE)) != 0)
437 goto out;
438
439 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440 u32 adv = ADVERTISE_CSMA;
441
442 if (bp->flags & B44_FLAG_ADV_10HALF)
443 adv |= ADVERTISE_10HALF;
444 if (bp->flags & B44_FLAG_ADV_10FULL)
445 adv |= ADVERTISE_10FULL;
446 if (bp->flags & B44_FLAG_ADV_100HALF)
447 adv |= ADVERTISE_100HALF;
448 if (bp->flags & B44_FLAG_ADV_100FULL)
449 adv |= ADVERTISE_100FULL;
450
451 if (bp->flags & B44_FLAG_PAUSE_AUTO)
452 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
453
454 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
455 goto out;
456 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457 BMCR_ANRESTART))) != 0)
458 goto out;
459 } else {
460 u32 bmcr;
461
462 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
463 goto out;
464 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
465 if (bp->flags & B44_FLAG_100_BASE_T)
466 bmcr |= BMCR_SPEED100;
467 if (bp->flags & B44_FLAG_FULL_DUPLEX)
468 bmcr |= BMCR_FULLDPLX;
469 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
470 goto out;
471
472 /* Since we will not be negotiating there is no safe way
473 * to determine if the link partner supports flow control
474 * or not. So just disable it completely in this case.
475 */
476 b44_set_flow_ctrl(bp, 0, 0);
477 }
478
479out:
480 return err;
481}
482
483static void b44_stats_update(struct b44 *bp)
484{
485 unsigned long reg;
eeda8585 486 u64 *val;
1da177e4
LT
487
488 val = &bp->hw_stats.tx_good_octets;
eeda8585
KG
489 u64_stats_update_begin(&bp->hw_stats.syncp);
490
1da177e4
LT
491 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
492 *val++ += br32(bp, reg);
493 }
3353930d
FR
494
495 /* Pad */
496 reg += 8*4UL;
497
1da177e4
LT
498 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
499 *val++ += br32(bp, reg);
500 }
eeda8585
KG
501
502 u64_stats_update_end(&bp->hw_stats.syncp);
1da177e4
LT
503}
504
505static void b44_link_report(struct b44 *bp)
506{
507 if (!netif_carrier_ok(bp->dev)) {
2fc96fff 508 netdev_info(bp->dev, "Link is down\n");
1da177e4 509 } else {
2fc96fff
JP
510 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
511 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
512 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
513
514 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
515 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
516 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
1da177e4
LT
517 }
518}
519
520static void b44_check_phy(struct b44 *bp)
521{
522 u32 bmsr, aux;
523
d6194195 524 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
753f4920
MB
525 bp->flags |= B44_FLAG_100_BASE_T;
526 bp->flags |= B44_FLAG_FULL_DUPLEX;
527 if (!netif_carrier_ok(bp->dev)) {
528 u32 val = br32(bp, B44_TX_CTRL);
529 val |= TX_CTRL_DUPLEX;
530 bw32(bp, B44_TX_CTRL, val);
531 netif_carrier_on(bp->dev);
532 b44_link_report(bp);
533 }
534 return;
535 }
536
1da177e4
LT
537 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
538 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
539 (bmsr != 0xffff)) {
540 if (aux & MII_AUXCTRL_SPEED)
541 bp->flags |= B44_FLAG_100_BASE_T;
542 else
543 bp->flags &= ~B44_FLAG_100_BASE_T;
544 if (aux & MII_AUXCTRL_DUPLEX)
545 bp->flags |= B44_FLAG_FULL_DUPLEX;
546 else
547 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
548
549 if (!netif_carrier_ok(bp->dev) &&
550 (bmsr & BMSR_LSTATUS)) {
551 u32 val = br32(bp, B44_TX_CTRL);
552 u32 local_adv, remote_adv;
553
554 if (bp->flags & B44_FLAG_FULL_DUPLEX)
555 val |= TX_CTRL_DUPLEX;
556 else
557 val &= ~TX_CTRL_DUPLEX;
558 bw32(bp, B44_TX_CTRL, val);
559
560 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
561 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
562 !b44_readphy(bp, MII_LPA, &remote_adv))
563 b44_set_flow_ctrl(bp, local_adv, remote_adv);
564
565 /* Link now up */
566 netif_carrier_on(bp->dev);
567 b44_link_report(bp);
568 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
569 /* Link now down */
570 netif_carrier_off(bp->dev);
571 b44_link_report(bp);
572 }
573
574 if (bmsr & BMSR_RFAULT)
2fc96fff 575 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
1da177e4 576 if (bmsr & BMSR_JCD)
2fc96fff 577 netdev_warn(bp->dev, "Jabber detected in PHY\n");
1da177e4
LT
578 }
579}
580
581static void b44_timer(unsigned long __opaque)
582{
583 struct b44 *bp = (struct b44 *) __opaque;
584
585 spin_lock_irq(&bp->lock);
586
587 b44_check_phy(bp);
588
589 b44_stats_update(bp);
590
591 spin_unlock_irq(&bp->lock);
592
a72a8179 593 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
1da177e4
LT
594}
595
596static void b44_tx(struct b44 *bp)
597{
598 u32 cur, cons;
5055544e 599 unsigned bytes_compl = 0, pkts_compl = 0;
1da177e4
LT
600
601 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
602 cur /= sizeof(struct dma_desc);
603
604 /* XXX needs updating when NETIF_F_SG is supported */
605 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
606 struct ring_info *rp = &bp->tx_buffers[cons];
607 struct sk_buff *skb = rp->skb;
608
5d9428de 609 BUG_ON(skb == NULL);
1da177e4 610
39a6f4bc
FT
611 dma_unmap_single(bp->sdev->dma_dev,
612 rp->mapping,
613 skb->len,
614 DMA_TO_DEVICE);
1da177e4 615 rp->skb = NULL;
5055544e
HM
616
617 bytes_compl += skb->len;
618 pkts_compl++;
619
15ac2b08 620 dev_kfree_skb_irq(skb);
1da177e4
LT
621 }
622
5055544e 623 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
1da177e4
LT
624 bp->tx_cons = cons;
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
628
629 bw32(bp, B44_GPTIMER, 0);
630}
631
632/* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
636 */
637static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638{
639 struct dma_desc *dp;
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
642 struct sk_buff *skb;
643 dma_addr_t mapping;
644 int dest_idx;
645 u32 ctrl;
646
647 src_map = NULL;
648 if (src_idx >= 0)
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
bf0dcbd9 652 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
1da177e4
LT
653 if (skb == NULL)
654 return -ENOMEM;
655
39a6f4bc
FT
656 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
657 RX_PKT_BUF_SZ,
658 DMA_FROM_DEVICE);
1da177e4
LT
659
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
39a6f4bc 662 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
28b76796 663 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
1da177e4 664 /* Sigh... */
39a6f4bc
FT
665 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
666 dma_unmap_single(bp->sdev->dma_dev, mapping,
f225763a 667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
1da177e4 668 dev_kfree_skb_any(skb);
acfa9e94 669 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
1da177e4
LT
670 if (skb == NULL)
671 return -ENOMEM;
39a6f4bc
FT
672 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE);
675 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
677 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
678 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
1da177e4
LT
679 dev_kfree_skb_any(skb);
680 return -ENOMEM;
681 }
a58c891a 682 bp->force_copybreak = 1;
1da177e4
LT
683 }
684
72f4861e 685 rh = (struct rx_header *) skb->data;
1da177e4 686
1da177e4
LT
687 rh->len = 0;
688 rh->flags = 0;
689
690 map->skb = skb;
753f4920 691 map->mapping = mapping;
1da177e4
LT
692
693 if (src_map != NULL)
694 src_map->skb = NULL;
695
4ca85795 696 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
1da177e4
LT
697 if (dest_idx == (B44_RX_RING_SIZE - 1))
698 ctrl |= DESC_CTRL_EOT;
699
700 dp = &bp->rx_ring[dest_idx];
701 dp->ctrl = cpu_to_le32(ctrl);
4ca85795 702 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
1da177e4 703
9f38c636 704 if (bp->flags & B44_FLAG_RX_RING_HACK)
753f4920 705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
5d4d9e8a 706 dest_idx * sizeof(*dp),
753f4920 707 DMA_BIDIRECTIONAL);
9f38c636 708
1da177e4
LT
709 return RX_PKT_BUF_SZ;
710}
711
712static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
713{
714 struct dma_desc *src_desc, *dest_desc;
715 struct ring_info *src_map, *dest_map;
716 struct rx_header *rh;
717 int dest_idx;
a7bed27d 718 __le32 ctrl;
1da177e4
LT
719
720 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
721 dest_desc = &bp->rx_ring[dest_idx];
722 dest_map = &bp->rx_buffers[dest_idx];
723 src_desc = &bp->rx_ring[src_idx];
724 src_map = &bp->rx_buffers[src_idx];
725
726 dest_map->skb = src_map->skb;
727 rh = (struct rx_header *) src_map->skb->data;
728 rh->len = 0;
729 rh->flags = 0;
753f4920 730 dest_map->mapping = src_map->mapping;
1da177e4 731
9f38c636 732 if (bp->flags & B44_FLAG_RX_RING_HACK)
753f4920 733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
5d4d9e8a 734 src_idx * sizeof(*src_desc),
753f4920 735 DMA_BIDIRECTIONAL);
9f38c636 736
1da177e4
LT
737 ctrl = src_desc->ctrl;
738 if (dest_idx == (B44_RX_RING_SIZE - 1))
739 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
740 else
741 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
742
743 dest_desc->ctrl = ctrl;
744 dest_desc->addr = src_desc->addr;
9f38c636 745
1da177e4
LT
746 src_map->skb = NULL;
747
9f38c636 748 if (bp->flags & B44_FLAG_RX_RING_HACK)
753f4920 749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
5d4d9e8a 750 dest_idx * sizeof(*dest_desc),
753f4920 751 DMA_BIDIRECTIONAL);
9f38c636 752
39a6f4bc
FT
753 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
754 RX_PKT_BUF_SZ,
755 DMA_FROM_DEVICE);
1da177e4
LT
756}
757
758static int b44_rx(struct b44 *bp, int budget)
759{
760 int received;
761 u32 cons, prod;
762
763 received = 0;
764 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
765 prod /= sizeof(struct dma_desc);
766 cons = bp->rx_cons;
767
768 while (cons != prod && budget > 0) {
769 struct ring_info *rp = &bp->rx_buffers[cons];
770 struct sk_buff *skb = rp->skb;
753f4920 771 dma_addr_t map = rp->mapping;
1da177e4
LT
772 struct rx_header *rh;
773 u16 len;
774
39a6f4bc
FT
775 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
776 RX_PKT_BUF_SZ,
777 DMA_FROM_DEVICE);
1da177e4 778 rh = (struct rx_header *) skb->data;
a7bed27d 779 len = le16_to_cpu(rh->len);
72f4861e 780 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
1da177e4
LT
781 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
782 drop_it:
783 b44_recycle_rx(bp, cons, bp->rx_prod);
784 drop_it_no_recycle:
553e2335 785 bp->dev->stats.rx_dropped++;
1da177e4
LT
786 goto next_pkt;
787 }
788
789 if (len == 0) {
790 int i = 0;
791
792 do {
793 udelay(2);
794 barrier();
a7bed27d 795 len = le16_to_cpu(rh->len);
1da177e4
LT
796 } while (len == 0 && i++ < 5);
797 if (len == 0)
798 goto drop_it;
799 }
800
801 /* Omit CRC. */
802 len -= 4;
803
a58c891a 804 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
1da177e4
LT
805 int skb_size;
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
807 if (skb_size < 0)
808 goto drop_it;
39a6f4bc
FT
809 dma_unmap_single(bp->sdev->dma_dev, map,
810 skb_size, DMA_FROM_DEVICE);
1da177e4 811 /* Leave out rx_header */
4ca85795
FF
812 skb_put(skb, len + RX_PKT_OFFSET);
813 skb_pull(skb, RX_PKT_OFFSET);
1da177e4
LT
814 } else {
815 struct sk_buff *copy_skb;
816
817 b44_recycle_rx(bp, cons, bp->rx_prod);
7341a73c 818 copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
1da177e4
LT
819 if (copy_skb == NULL)
820 goto drop_it_no_recycle;
821
1da177e4
LT
822 skb_put(copy_skb, len);
823 /* DMA sync done above, copy just the actual packet */
72f4861e 824 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
d626f62b 825 copy_skb->data, len);
1da177e4
LT
826 skb = copy_skb;
827 }
bc8acf2c 828 skb_checksum_none_assert(skb);
1da177e4
LT
829 skb->protocol = eth_type_trans(skb, bp->dev);
830 netif_receive_skb(skb);
1da177e4
LT
831 received++;
832 budget--;
833 next_pkt:
834 bp->rx_prod = (bp->rx_prod + 1) &
835 (B44_RX_RING_SIZE - 1);
836 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
837 }
838
839 bp->rx_cons = cons;
840 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
841
842 return received;
843}
844
bea3348e 845static int b44_poll(struct napi_struct *napi, int budget)
1da177e4 846{
bea3348e 847 struct b44 *bp = container_of(napi, struct b44, napi);
bea3348e 848 int work_done;
e99b1f04 849 unsigned long flags;
1da177e4 850
e99b1f04 851 spin_lock_irqsave(&bp->lock, flags);
1da177e4
LT
852
853 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
854 /* spin_lock(&bp->tx_lock); */
855 b44_tx(bp);
856 /* spin_unlock(&bp->tx_lock); */
857 }
32737e93
ML
858 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
859 bp->istat &= ~ISTAT_RFO;
860 b44_disable_ints(bp);
861 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
862 b44_init_rings(bp);
863 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
864 netif_wake_queue(bp->dev);
865 }
866
e99b1f04 867 spin_unlock_irqrestore(&bp->lock, flags);
1da177e4 868
bea3348e
SH
869 work_done = 0;
870 if (bp->istat & ISTAT_RX)
871 work_done += b44_rx(bp, budget);
1da177e4
LT
872
873 if (bp->istat & ISTAT_ERRORS) {
d15e9c4d 874 spin_lock_irqsave(&bp->lock, flags);
1da177e4
LT
875 b44_halt(bp);
876 b44_init_rings(bp);
5fc7d61a 877 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
1da177e4 878 netif_wake_queue(bp->dev);
d15e9c4d 879 spin_unlock_irqrestore(&bp->lock, flags);
bea3348e 880 work_done = 0;
1da177e4
LT
881 }
882
bea3348e 883 if (work_done < budget) {
288379f0 884 napi_complete(napi);
1da177e4
LT
885 b44_enable_ints(bp);
886 }
887
bea3348e 888 return work_done;
1da177e4
LT
889}
890
7d12e780 891static irqreturn_t b44_interrupt(int irq, void *dev_id)
1da177e4
LT
892{
893 struct net_device *dev = dev_id;
894 struct b44 *bp = netdev_priv(dev);
1da177e4
LT
895 u32 istat, imask;
896 int handled = 0;
897
65b984f2 898 spin_lock(&bp->lock);
1da177e4
LT
899
900 istat = br32(bp, B44_ISTAT);
901 imask = br32(bp, B44_IMASK);
902
e78181fe
JB
903 /* The interrupt mask register controls which interrupt bits
904 * will actually raise an interrupt to the CPU when set by hw/firmware,
905 * but doesn't mask off the bits.
1da177e4
LT
906 */
907 istat &= imask;
908 if (istat) {
909 handled = 1;
ba5eec9c
FR
910
911 if (unlikely(!netif_running(dev))) {
2fc96fff 912 netdev_info(dev, "late interrupt\n");
ba5eec9c
FR
913 goto irq_ack;
914 }
915
288379f0 916 if (napi_schedule_prep(&bp->napi)) {
1da177e4
LT
917 /* NOTE: These writes are posted by the readback of
918 * the ISTAT register below.
919 */
920 bp->istat = istat;
921 __b44_disable_ints(bp);
288379f0 922 __napi_schedule(&bp->napi);
1da177e4
LT
923 }
924
ba5eec9c 925irq_ack:
1da177e4
LT
926 bw32(bp, B44_ISTAT, istat);
927 br32(bp, B44_ISTAT);
928 }
65b984f2 929 spin_unlock(&bp->lock);
1da177e4
LT
930 return IRQ_RETVAL(handled);
931}
932
933static void b44_tx_timeout(struct net_device *dev)
934{
935 struct b44 *bp = netdev_priv(dev);
936
2fc96fff 937 netdev_err(dev, "transmit timed out, resetting\n");
1da177e4
LT
938
939 spin_lock_irq(&bp->lock);
940
941 b44_halt(bp);
942 b44_init_rings(bp);
5fc7d61a 943 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
944
945 spin_unlock_irq(&bp->lock);
946
947 b44_enable_ints(bp);
948
949 netif_wake_queue(dev);
950}
951
61357325 952static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
953{
954 struct b44 *bp = netdev_priv(dev);
c7193693 955 int rc = NETDEV_TX_OK;
1da177e4
LT
956 dma_addr_t mapping;
957 u32 len, entry, ctrl;
22580f89 958 unsigned long flags;
1da177e4
LT
959
960 len = skb->len;
22580f89 961 spin_lock_irqsave(&bp->lock, flags);
1da177e4
LT
962
963 /* This is a hard error, log it. */
964 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
965 netif_stop_queue(dev);
2fc96fff 966 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
c7193693 967 goto err_out;
1da177e4
LT
968 }
969
39a6f4bc
FT
970 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
971 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
f65a7177
SH
972 struct sk_buff *bounce_skb;
973
1da177e4 974 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
39a6f4bc
FT
975 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
976 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
f225763a 977 DMA_TO_DEVICE);
1da177e4 978
acfa9e94 979 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
1da177e4 980 if (!bounce_skb)
c7193693 981 goto err_out;
1da177e4 982
39a6f4bc
FT
983 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
984 len, DMA_TO_DEVICE);
985 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
986 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
987 dma_unmap_single(bp->sdev->dma_dev, mapping,
f225763a 988 len, DMA_TO_DEVICE);
1da177e4 989 dev_kfree_skb_any(bounce_skb);
c7193693 990 goto err_out;
1da177e4
LT
991 }
992
f65a7177 993 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1da177e4
LT
994 dev_kfree_skb_any(skb);
995 skb = bounce_skb;
996 }
997
998 entry = bp->tx_prod;
999 bp->tx_buffers[entry].skb = skb;
753f4920 1000 bp->tx_buffers[entry].mapping = mapping;
1da177e4
LT
1001
1002 ctrl = (len & DESC_CTRL_LEN);
1003 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1004 if (entry == (B44_TX_RING_SIZE - 1))
1005 ctrl |= DESC_CTRL_EOT;
1006
1007 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1008 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1009
9f38c636 1010 if (bp->flags & B44_FLAG_TX_RING_HACK)
753f4920
MB
1011 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1012 entry * sizeof(bp->tx_ring[0]),
1013 DMA_TO_DEVICE);
9f38c636 1014
1da177e4
LT
1015 entry = NEXT_TX(entry);
1016
1017 bp->tx_prod = entry;
1018
1019 wmb();
1020
1021 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1022 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1023 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1024 if (bp->flags & B44_FLAG_REORDER_BUG)
1025 br32(bp, B44_DMATX_PTR);
1026
5055544e
HM
1027 netdev_sent_queue(dev, skb->len);
1028
1da177e4
LT
1029 if (TX_BUFFS_AVAIL(bp) < 1)
1030 netif_stop_queue(dev);
1031
c7193693 1032out_unlock:
22580f89 1033 spin_unlock_irqrestore(&bp->lock, flags);
1da177e4 1034
c7193693 1035 return rc;
1da177e4 1036
c7193693
FR
1037err_out:
1038 rc = NETDEV_TX_BUSY;
1039 goto out_unlock;
1da177e4
LT
1040}
1041
1042static int b44_change_mtu(struct net_device *dev, int new_mtu)
1043{
1044 struct b44 *bp = netdev_priv(dev);
1045
1046 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1047 return -EINVAL;
1048
1049 if (!netif_running(dev)) {
1050 /* We'll just catch it later when the
1051 * device is up'd.
1052 */
1053 dev->mtu = new_mtu;
1054 return 0;
1055 }
1056
1057 spin_lock_irq(&bp->lock);
1058 b44_halt(bp);
1059 dev->mtu = new_mtu;
1060 b44_init_rings(bp);
5fc7d61a 1061 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1062 spin_unlock_irq(&bp->lock);
1063
1064 b44_enable_ints(bp);
10badc21 1065
1da177e4
LT
1066 return 0;
1067}
1068
1069/* Free up pending packets in all rx/tx rings.
1070 *
1071 * The chip has been shut down and the driver detached from
1072 * the networking, so no interrupts or new tx packets will
1073 * end up in the driver. bp->lock is not held and we are not
1074 * in an interrupt context and thus may sleep.
1075 */
1076static void b44_free_rings(struct b44 *bp)
1077{
1078 struct ring_info *rp;
1079 int i;
1080
1081 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1082 rp = &bp->rx_buffers[i];
1083
1084 if (rp->skb == NULL)
1085 continue;
39a6f4bc
FT
1086 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1087 DMA_FROM_DEVICE);
1da177e4
LT
1088 dev_kfree_skb_any(rp->skb);
1089 rp->skb = NULL;
1090 }
1091
1092 /* XXX needs changes once NETIF_F_SG is set... */
1093 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1094 rp = &bp->tx_buffers[i];
1095
1096 if (rp->skb == NULL)
1097 continue;
39a6f4bc
FT
1098 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1099 DMA_TO_DEVICE);
1da177e4
LT
1100 dev_kfree_skb_any(rp->skb);
1101 rp->skb = NULL;
1102 }
1103}
1104
1105/* Initialize tx/rx rings for packet processing.
1106 *
1107 * The chip has been shut down and the driver detached from
1108 * the networking, so no interrupts or new tx packets will
874a6214 1109 * end up in the driver.
1da177e4
LT
1110 */
1111static void b44_init_rings(struct b44 *bp)
1112{
1113 int i;
1114
1115 b44_free_rings(bp);
1116
1117 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1118 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1119
9f38c636 1120 if (bp->flags & B44_FLAG_RX_RING_HACK)
39a6f4bc
FT
1121 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1122 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
9f38c636
JL
1123
1124 if (bp->flags & B44_FLAG_TX_RING_HACK)
39a6f4bc
FT
1125 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1126 DMA_TABLE_BYTES, DMA_TO_DEVICE);
9f38c636 1127
1da177e4
LT
1128 for (i = 0; i < bp->rx_pending; i++) {
1129 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1130 break;
1131 }
1132}
1133
1134/*
1135 * Must not be invoked with interrupt sources disabled and
1136 * the hardware shutdown down.
1137 */
1138static void b44_free_consistent(struct b44 *bp)
1139{
b4558ea9
JJ
1140 kfree(bp->rx_buffers);
1141 bp->rx_buffers = NULL;
1142 kfree(bp->tx_buffers);
1143 bp->tx_buffers = NULL;
1da177e4 1144 if (bp->rx_ring) {
9f38c636 1145 if (bp->flags & B44_FLAG_RX_RING_HACK) {
39a6f4bc
FT
1146 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1147 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
9f38c636
JL
1148 kfree(bp->rx_ring);
1149 } else
39a6f4bc
FT
1150 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1151 bp->rx_ring, bp->rx_ring_dma);
1da177e4 1152 bp->rx_ring = NULL;
9f38c636 1153 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1da177e4
LT
1154 }
1155 if (bp->tx_ring) {
9f38c636 1156 if (bp->flags & B44_FLAG_TX_RING_HACK) {
39a6f4bc
FT
1157 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1158 DMA_TABLE_BYTES, DMA_TO_DEVICE);
9f38c636
JL
1159 kfree(bp->tx_ring);
1160 } else
39a6f4bc
FT
1161 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1162 bp->tx_ring, bp->tx_ring_dma);
1da177e4 1163 bp->tx_ring = NULL;
9f38c636 1164 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1da177e4
LT
1165 }
1166}
1167
1168/*
1169 * Must not be invoked with interrupt sources disabled and
1170 * the hardware shutdown down. Can sleep.
1171 */
753f4920 1172static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1da177e4
LT
1173{
1174 int size;
1175
1176 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
753f4920 1177 bp->rx_buffers = kzalloc(size, gfp);
1da177e4
LT
1178 if (!bp->rx_buffers)
1179 goto out_err;
1da177e4
LT
1180
1181 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
753f4920 1182 bp->tx_buffers = kzalloc(size, gfp);
1da177e4
LT
1183 if (!bp->tx_buffers)
1184 goto out_err;
1da177e4
LT
1185
1186 size = DMA_TABLE_BYTES;
39a6f4bc
FT
1187 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1188 &bp->rx_ring_dma, gfp);
9f38c636
JL
1189 if (!bp->rx_ring) {
1190 /* Allocation may have failed due to pci_alloc_consistent
1191 insisting on use of GFP_DMA, which is more restrictive
1192 than necessary... */
1193 struct dma_desc *rx_ring;
1194 dma_addr_t rx_ring_dma;
1195
753f4920 1196 rx_ring = kzalloc(size, gfp);
874a6214 1197 if (!rx_ring)
9f38c636
JL
1198 goto out_err;
1199
39a6f4bc
FT
1200 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1201 DMA_TABLE_BYTES,
1202 DMA_BIDIRECTIONAL);
9f38c636 1203
39a6f4bc 1204 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
28b76796 1205 rx_ring_dma + size > DMA_BIT_MASK(30)) {
9f38c636
JL
1206 kfree(rx_ring);
1207 goto out_err;
1208 }
1209
1210 bp->rx_ring = rx_ring;
1211 bp->rx_ring_dma = rx_ring_dma;
1212 bp->flags |= B44_FLAG_RX_RING_HACK;
1213 }
1da177e4 1214
39a6f4bc
FT
1215 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1216 &bp->tx_ring_dma, gfp);
9f38c636 1217 if (!bp->tx_ring) {
f225763a 1218 /* Allocation may have failed due to ssb_dma_alloc_consistent
9f38c636
JL
1219 insisting on use of GFP_DMA, which is more restrictive
1220 than necessary... */
1221 struct dma_desc *tx_ring;
1222 dma_addr_t tx_ring_dma;
1223
753f4920 1224 tx_ring = kzalloc(size, gfp);
874a6214 1225 if (!tx_ring)
9f38c636
JL
1226 goto out_err;
1227
39a6f4bc
FT
1228 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1229 DMA_TABLE_BYTES,
1230 DMA_TO_DEVICE);
9f38c636 1231
39a6f4bc 1232 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
28b76796 1233 tx_ring_dma + size > DMA_BIT_MASK(30)) {
9f38c636
JL
1234 kfree(tx_ring);
1235 goto out_err;
1236 }
1237
1238 bp->tx_ring = tx_ring;
1239 bp->tx_ring_dma = tx_ring_dma;
1240 bp->flags |= B44_FLAG_TX_RING_HACK;
1241 }
1da177e4
LT
1242
1243 return 0;
1244
1245out_err:
1246 b44_free_consistent(bp);
1247 return -ENOMEM;
1248}
1249
1250/* bp->lock is held. */
1251static void b44_clear_stats(struct b44 *bp)
1252{
1253 unsigned long reg;
1254
1255 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1256 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1257 br32(bp, reg);
1258 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1259 br32(bp, reg);
1260}
1261
1262/* bp->lock is held. */
fedb0eef 1263static void b44_chip_reset(struct b44 *bp, int reset_kind)
1da177e4 1264{
753f4920 1265 struct ssb_device *sdev = bp->sdev;
f8af11af 1266 bool was_enabled;
753f4920 1267
f8af11af
MB
1268 was_enabled = ssb_device_is_enabled(bp->sdev);
1269
1270 ssb_device_enable(bp->sdev, 0);
1271 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1272
1273 if (was_enabled) {
1da177e4
LT
1274 bw32(bp, B44_RCV_LAZY, 0);
1275 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
40ee8c76 1276 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1da177e4
LT
1277 bw32(bp, B44_DMATX_CTRL, 0);
1278 bp->tx_prod = bp->tx_cons = 0;
1279 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1281 100, 0);
1282 }
1283 bw32(bp, B44_DMARX_CTRL, 0);
1284 bp->rx_prod = bp->rx_cons = 0;
f8af11af 1285 }
1da177e4
LT
1286
1287 b44_clear_stats(bp);
1288
fedb0eef
MB
1289 /*
1290 * Don't enable PHY if we are doing a partial reset
1291 * we are probably going to power down
1292 */
1293 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1294 return;
1295
753f4920
MB
1296 switch (sdev->bus->bustype) {
1297 case SSB_BUSTYPE_SSB:
1298 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
39506a55
JL
1299 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1300 B44_MDC_RATIO)
753f4920
MB
1301 & MDIO_CTRL_MAXF_MASK)));
1302 break;
1303 case SSB_BUSTYPE_PCI:
753f4920
MB
1304 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1305 (0x0d & MDIO_CTRL_MAXF_MASK)));
1306 break;
98a1e2a9
MB
1307 case SSB_BUSTYPE_PCMCIA:
1308 case SSB_BUSTYPE_SDIO:
1309 WARN_ON(1); /* A device with this bus does not exist. */
1310 break;
753f4920
MB
1311 }
1312
1da177e4
LT
1313 br32(bp, B44_MDIO_CTRL);
1314
1315 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1316 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1317 br32(bp, B44_ENET_CTRL);
d6194195 1318 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1da177e4
LT
1319 } else {
1320 u32 val = br32(bp, B44_DEVCTRL);
1321
1322 if (val & DEVCTRL_EPR) {
1323 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1324 br32(bp, B44_DEVCTRL);
1325 udelay(100);
1326 }
d6194195 1327 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1da177e4
LT
1328 }
1329}
1330
1331/* bp->lock is held. */
1332static void b44_halt(struct b44 *bp)
1333{
1334 b44_disable_ints(bp);
fedb0eef
MB
1335 /* reset PHY */
1336 b44_phy_reset(bp);
1337 /* power down PHY */
2fc96fff 1338 netdev_info(bp->dev, "powering down PHY\n");
fedb0eef
MB
1339 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1340 /* now reset the chip, but without enabling the MAC&PHY
1341 * part of it. This has to be done _after_ we shut down the PHY */
1342 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1da177e4
LT
1343}
1344
1345/* bp->lock is held. */
1346static void __b44_set_mac_addr(struct b44 *bp)
1347{
1348 bw32(bp, B44_CAM_CTRL, 0);
1349 if (!(bp->dev->flags & IFF_PROMISC)) {
1350 u32 val;
1351
1352 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1353 val = br32(bp, B44_CAM_CTRL);
1354 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1355 }
1356}
1357
1358static int b44_set_mac_addr(struct net_device *dev, void *p)
1359{
1360 struct b44 *bp = netdev_priv(dev);
1361 struct sockaddr *addr = p;
753f4920 1362 u32 val;
1da177e4
LT
1363
1364 if (netif_running(dev))
1365 return -EBUSY;
1366
391fc09a
GZ
1367 if (!is_valid_ether_addr(addr->sa_data))
1368 return -EINVAL;
1369
1da177e4
LT
1370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1371
1372 spin_lock_irq(&bp->lock);
753f4920
MB
1373
1374 val = br32(bp, B44_RXCONFIG);
1375 if (!(val & RXCONFIG_CAM_ABSENT))
1376 __b44_set_mac_addr(bp);
1377
1da177e4
LT
1378 spin_unlock_irq(&bp->lock);
1379
1380 return 0;
1381}
1382
1383/* Called at device open time to get the chip ready for
1384 * packet processing. Invoked with bp->lock held.
1385 */
1386static void __b44_set_rx_mode(struct net_device *);
5fc7d61a 1387static void b44_init_hw(struct b44 *bp, int reset_kind)
1da177e4
LT
1388{
1389 u32 val;
1390
fedb0eef 1391 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
5fc7d61a 1392 if (reset_kind == B44_FULL_RESET) {
00e8b3aa
GZ
1393 b44_phy_reset(bp);
1394 b44_setup_phy(bp);
1395 }
1da177e4
LT
1396
1397 /* Enable CRC32, set proper LED modes and power on PHY */
1398 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1399 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1400
1401 /* This sets the MAC address too. */
1402 __b44_set_rx_mode(bp->dev);
1403
1404 /* MTU + eth header + possible VLAN tag + struct rx_header */
1405 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1406 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1407
1408 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
5fc7d61a
MC
1409 if (reset_kind == B44_PARTIAL_RESET) {
1410 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
72f4861e 1411 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
5fc7d61a 1412 } else {
00e8b3aa
GZ
1413 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1414 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1415 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
72f4861e 1416 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
00e8b3aa 1417 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1da177e4 1418
00e8b3aa
GZ
1419 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1420 bp->rx_prod = bp->rx_pending;
1da177e4 1421
00e8b3aa 1422 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
00e8b3aa 1423 }
1da177e4
LT
1424
1425 val = br32(bp, B44_ENET_CTRL);
1426 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
5055544e
HM
1427
1428 netdev_reset_queue(bp->dev);
1da177e4
LT
1429}
1430
1431static int b44_open(struct net_device *dev)
1432{
1433 struct b44 *bp = netdev_priv(dev);
1434 int err;
1435
753f4920 1436 err = b44_alloc_consistent(bp, GFP_KERNEL);
1da177e4 1437 if (err)
6c2f4267 1438 goto out;
1da177e4 1439
bea3348e
SH
1440 napi_enable(&bp->napi);
1441
1da177e4 1442 b44_init_rings(bp);
5fc7d61a 1443 b44_init_hw(bp, B44_FULL_RESET);
1da177e4 1444
e254e9bf
JL
1445 b44_check_phy(bp);
1446
1fb9df5d 1447 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
6c2f4267 1448 if (unlikely(err < 0)) {
bea3348e 1449 napi_disable(&bp->napi);
fedb0eef 1450 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
6c2f4267
FR
1451 b44_free_rings(bp);
1452 b44_free_consistent(bp);
1453 goto out;
1454 }
1da177e4
LT
1455
1456 init_timer(&bp->timer);
1457 bp->timer.expires = jiffies + HZ;
1458 bp->timer.data = (unsigned long) bp;
1459 bp->timer.function = b44_timer;
1460 add_timer(&bp->timer);
1461
1462 b44_enable_ints(bp);
d9e2d185 1463 netif_start_queue(dev);
6c2f4267 1464out:
1da177e4
LT
1465 return err;
1466}
1467
1da177e4
LT
1468#ifdef CONFIG_NET_POLL_CONTROLLER
1469/*
1470 * Polling receive - used by netconsole and other diagnostic tools
1471 * to allow network i/o with interrupts disabled.
1472 */
1473static void b44_poll_controller(struct net_device *dev)
1474{
1475 disable_irq(dev->irq);
7d12e780 1476 b44_interrupt(dev->irq, dev);
1da177e4
LT
1477 enable_irq(dev->irq);
1478}
1479#endif
1480
725ad800
GZ
1481static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1482{
1483 u32 i;
1484 u32 *pattern = (u32 *) pp;
1485
1486 for (i = 0; i < bytes; i += sizeof(u32)) {
1487 bw32(bp, B44_FILT_ADDR, table_offset + i);
1488 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1489 }
1490}
1491
1492static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1493{
1494 int magicsync = 6;
1495 int k, j, len = offset;
1496 int ethaddr_bytes = ETH_ALEN;
1497
1498 memset(ppattern + offset, 0xff, magicsync);
1499 for (j = 0; j < magicsync; j++)
1500 set_bit(len++, (unsigned long *) pmask);
1501
1502 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504 ethaddr_bytes = ETH_ALEN;
1505 else
1506 ethaddr_bytes = B44_PATTERN_SIZE - len;
1507 if (ethaddr_bytes <=0)
1508 break;
1509 for (k = 0; k< ethaddr_bytes; k++) {
1510 ppattern[offset + magicsync +
1511 (j * ETH_ALEN) + k] = macaddr[k];
e0188829 1512 set_bit(len++, (unsigned long *) pmask);
725ad800
GZ
1513 }
1514 }
1515 return len - 1;
1516}
1517
1518/* Setup magic packet patterns in the b44 WOL
1519 * pattern matching filter.
1520 */
1521static void b44_setup_pseudo_magicp(struct b44 *bp)
1522{
1523
1524 u32 val;
1525 int plen0, plen1, plen2;
1526 u8 *pwol_pattern;
1527 u8 pwol_mask[B44_PMASK_SIZE];
1528
dd00cc48 1529 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
b2adaca9 1530 if (!pwol_pattern)
725ad800 1531 return;
725ad800
GZ
1532
1533 /* Ipv4 magic packet pattern - pattern 0.*/
725ad800
GZ
1534 memset(pwol_mask, 0, B44_PMASK_SIZE);
1535 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1536 B44_ETHIPV4UDP_HLEN);
1537
1538 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1539 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1540
1541 /* Raw ethernet II magic packet pattern - pattern 1 */
1542 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543 memset(pwol_mask, 0, B44_PMASK_SIZE);
1544 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1545 ETH_HLEN);
1546
1547 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1549 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550 B44_PMASK_BASE + B44_PMASK_SIZE);
1551
1552 /* Ipv6 magic packet pattern - pattern 2 */
1553 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1554 memset(pwol_mask, 0, B44_PMASK_SIZE);
1555 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1556 B44_ETHIPV6UDP_HLEN);
1557
1558 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1559 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1560 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1561 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1562
1563 kfree(pwol_pattern);
1564
1565 /* set these pattern's lengths: one less than each real length */
1566 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1567 bw32(bp, B44_WKUP_LEN, val);
1568
1569 /* enable wakeup pattern matching */
1570 val = br32(bp, B44_DEVCTRL);
1571 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1572
1573}
52cafd96 1574
753f4920
MB
1575#ifdef CONFIG_B44_PCI
1576static void b44_setup_wol_pci(struct b44 *bp)
1577{
1578 u16 val;
1579
1580 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1581 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1582 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1583 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1584 }
1585}
1586#else
1587static inline void b44_setup_wol_pci(struct b44 *bp) { }
1588#endif /* CONFIG_B44_PCI */
1589
52cafd96
GZ
1590static void b44_setup_wol(struct b44 *bp)
1591{
1592 u32 val;
52cafd96
GZ
1593
1594 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1595
1596 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1597
1598 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1599
1600 val = bp->dev->dev_addr[2] << 24 |
1601 bp->dev->dev_addr[3] << 16 |
1602 bp->dev->dev_addr[4] << 8 |
1603 bp->dev->dev_addr[5];
1604 bw32(bp, B44_ADDR_LO, val);
1605
1606 val = bp->dev->dev_addr[0] << 8 |
1607 bp->dev->dev_addr[1];
1608 bw32(bp, B44_ADDR_HI, val);
1609
1610 val = br32(bp, B44_DEVCTRL);
1611 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1612
725ad800
GZ
1613 } else {
1614 b44_setup_pseudo_magicp(bp);
1615 }
753f4920 1616 b44_setup_wol_pci(bp);
52cafd96
GZ
1617}
1618
1da177e4
LT
1619static int b44_close(struct net_device *dev)
1620{
1621 struct b44 *bp = netdev_priv(dev);
1622
1623 netif_stop_queue(dev);
1624
bea3348e 1625 napi_disable(&bp->napi);
ba5eec9c 1626
1da177e4
LT
1627 del_timer_sync(&bp->timer);
1628
1629 spin_lock_irq(&bp->lock);
1630
1da177e4
LT
1631 b44_halt(bp);
1632 b44_free_rings(bp);
c35ca399 1633 netif_carrier_off(dev);
1da177e4
LT
1634
1635 spin_unlock_irq(&bp->lock);
1636
1637 free_irq(dev->irq, dev);
1638
52cafd96 1639 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 1640 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
1641 b44_setup_wol(bp);
1642 }
1643
1da177e4
LT
1644 b44_free_consistent(bp);
1645
1646 return 0;
1647}
1648
eeda8585
KG
1649static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1650 struct rtnl_link_stats64 *nstat)
1da177e4
LT
1651{
1652 struct b44 *bp = netdev_priv(dev);
1da177e4 1653 struct b44_hw_stats *hwstat = &bp->hw_stats;
eeda8585
KG
1654 unsigned int start;
1655
1656 do {
1657 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
1658
1659 /* Convert HW stats into rtnl_link_stats64 stats. */
1660 nstat->rx_packets = hwstat->rx_pkts;
1661 nstat->tx_packets = hwstat->tx_pkts;
1662 nstat->rx_bytes = hwstat->rx_octets;
1663 nstat->tx_bytes = hwstat->tx_octets;
1664 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1665 hwstat->tx_oversize_pkts +
1666 hwstat->tx_underruns +
1667 hwstat->tx_excessive_cols +
1668 hwstat->tx_late_cols);
1669 nstat->multicast = hwstat->tx_multicast_pkts;
1670 nstat->collisions = hwstat->tx_total_cols;
1671
1672 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1673 hwstat->rx_undersize);
1674 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1675 nstat->rx_frame_errors = hwstat->rx_align_errs;
1676 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1677 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1678 hwstat->rx_oversize_pkts +
1679 hwstat->rx_missed_pkts +
1680 hwstat->rx_crc_align_errs +
1681 hwstat->rx_undersize +
1682 hwstat->rx_crc_errs +
1683 hwstat->rx_align_errs +
1684 hwstat->rx_symbol_errs);
1685
1686 nstat->tx_aborted_errors = hwstat->tx_underruns;
1da177e4 1687#if 0
eeda8585
KG
1688 /* Carrier lost counter seems to be broken for some devices */
1689 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1da177e4 1690#endif
eeda8585 1691 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
1da177e4
LT
1692
1693 return nstat;
1694}
1695
1696static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1697{
22bedad3 1698 struct netdev_hw_addr *ha;
1da177e4
LT
1699 int i, num_ents;
1700
4cd24eaf 1701 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
0ddf477b 1702 i = 0;
22bedad3 1703 netdev_for_each_mc_addr(ha, dev) {
0ddf477b
JP
1704 if (i == num_ents)
1705 break;
22bedad3 1706 __b44_cam_write(bp, ha->addr, i++ + 1);
1da177e4
LT
1707 }
1708 return i+1;
1709}
1710
1711static void __b44_set_rx_mode(struct net_device *dev)
1712{
1713 struct b44 *bp = netdev_priv(dev);
1714 u32 val;
1da177e4
LT
1715
1716 val = br32(bp, B44_RXCONFIG);
1717 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
753f4920 1718 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1da177e4
LT
1719 val |= RXCONFIG_PROMISC;
1720 bw32(bp, B44_RXCONFIG, val);
1721 } else {
874a6214 1722 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
cda22aa9 1723 int i = 1;
874a6214 1724
1da177e4
LT
1725 __b44_set_mac_addr(bp);
1726
2f614fe0 1727 if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf 1728 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1da177e4
LT
1729 val |= RXCONFIG_ALLMULTI;
1730 else
874a6214 1731 i = __b44_load_mcast(bp, dev);
10badc21 1732
2f614fe0 1733 for (; i < 64; i++)
10badc21 1734 __b44_cam_write(bp, zero, i);
2f614fe0 1735
1da177e4
LT
1736 bw32(bp, B44_RXCONFIG, val);
1737 val = br32(bp, B44_CAM_CTRL);
1738 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1739 }
1740}
1741
1742static void b44_set_rx_mode(struct net_device *dev)
1743{
1744 struct b44 *bp = netdev_priv(dev);
1745
1746 spin_lock_irq(&bp->lock);
1747 __b44_set_rx_mode(dev);
1748 spin_unlock_irq(&bp->lock);
1749}
1750
1751static u32 b44_get_msglevel(struct net_device *dev)
1752{
1753 struct b44 *bp = netdev_priv(dev);
1754 return bp->msg_enable;
1755}
1756
1757static void b44_set_msglevel(struct net_device *dev, u32 value)
1758{
1759 struct b44 *bp = netdev_priv(dev);
1760 bp->msg_enable = value;
1761}
1762
1763static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1764{
1765 struct b44 *bp = netdev_priv(dev);
753f4920 1766 struct ssb_bus *bus = bp->sdev->bus;
1da177e4 1767
27e09551 1768 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1769 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
753f4920
MB
1770 switch (bus->bustype) {
1771 case SSB_BUSTYPE_PCI:
27e09551 1772 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
753f4920 1773 break;
753f4920 1774 case SSB_BUSTYPE_SSB:
27e09551 1775 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
753f4920 1776 break;
98a1e2a9
MB
1777 case SSB_BUSTYPE_PCMCIA:
1778 case SSB_BUSTYPE_SDIO:
1779 WARN_ON(1); /* A device with this bus does not exist. */
1780 break;
753f4920 1781 }
1da177e4
LT
1782}
1783
1784static int b44_nway_reset(struct net_device *dev)
1785{
1786 struct b44 *bp = netdev_priv(dev);
1787 u32 bmcr;
1788 int r;
1789
1790 spin_lock_irq(&bp->lock);
1791 b44_readphy(bp, MII_BMCR, &bmcr);
1792 b44_readphy(bp, MII_BMCR, &bmcr);
1793 r = -EINVAL;
1794 if (bmcr & BMCR_ANENABLE) {
1795 b44_writephy(bp, MII_BMCR,
1796 bmcr | BMCR_ANRESTART);
1797 r = 0;
1798 }
1799 spin_unlock_irq(&bp->lock);
1800
1801 return r;
1802}
1803
1804static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1805{
1806 struct b44 *bp = netdev_priv(dev);
1807
1da177e4
LT
1808 cmd->supported = (SUPPORTED_Autoneg);
1809 cmd->supported |= (SUPPORTED_100baseT_Half |
1810 SUPPORTED_100baseT_Full |
1811 SUPPORTED_10baseT_Half |
1812 SUPPORTED_10baseT_Full |
1813 SUPPORTED_MII);
1814
1815 cmd->advertising = 0;
1816 if (bp->flags & B44_FLAG_ADV_10HALF)
adf6e000 1817 cmd->advertising |= ADVERTISED_10baseT_Half;
1da177e4 1818 if (bp->flags & B44_FLAG_ADV_10FULL)
adf6e000 1819 cmd->advertising |= ADVERTISED_10baseT_Full;
1da177e4 1820 if (bp->flags & B44_FLAG_ADV_100HALF)
adf6e000 1821 cmd->advertising |= ADVERTISED_100baseT_Half;
1da177e4 1822 if (bp->flags & B44_FLAG_ADV_100FULL)
adf6e000
MW
1823 cmd->advertising |= ADVERTISED_100baseT_Full;
1824 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
70739497
DD
1825 ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1826 SPEED_100 : SPEED_10));
1da177e4
LT
1827 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1828 DUPLEX_FULL : DUPLEX_HALF;
1829 cmd->port = 0;
1830 cmd->phy_address = bp->phy_addr;
d6194195
HM
1831 cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
1832 XCVR_EXTERNAL : XCVR_INTERNAL;
1da177e4
LT
1833 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1834 AUTONEG_DISABLE : AUTONEG_ENABLE;
47b9c3b1
GZ
1835 if (cmd->autoneg == AUTONEG_ENABLE)
1836 cmd->advertising |= ADVERTISED_Autoneg;
1837 if (!netif_running(dev)){
70739497 1838 ethtool_cmd_speed_set(cmd, 0);
47b9c3b1
GZ
1839 cmd->duplex = 0xff;
1840 }
1da177e4
LT
1841 cmd->maxtxpkt = 0;
1842 cmd->maxrxpkt = 0;
1843 return 0;
1844}
1845
1846static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1847{
1848 struct b44 *bp = netdev_priv(dev);
25db0338 1849 u32 speed = ethtool_cmd_speed(cmd);
1da177e4 1850
1da177e4
LT
1851 /* We do not support gigabit. */
1852 if (cmd->autoneg == AUTONEG_ENABLE) {
1853 if (cmd->advertising &
1854 (ADVERTISED_1000baseT_Half |
1855 ADVERTISED_1000baseT_Full))
1856 return -EINVAL;
25db0338
DD
1857 } else if ((speed != SPEED_100 &&
1858 speed != SPEED_10) ||
1da177e4
LT
1859 (cmd->duplex != DUPLEX_HALF &&
1860 cmd->duplex != DUPLEX_FULL)) {
1861 return -EINVAL;
1862 }
1863
1864 spin_lock_irq(&bp->lock);
1865
1866 if (cmd->autoneg == AUTONEG_ENABLE) {
47b9c3b1
GZ
1867 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1868 B44_FLAG_100_BASE_T |
1869 B44_FLAG_FULL_DUPLEX |
1870 B44_FLAG_ADV_10HALF |
1da177e4
LT
1871 B44_FLAG_ADV_10FULL |
1872 B44_FLAG_ADV_100HALF |
1873 B44_FLAG_ADV_100FULL);
47b9c3b1
GZ
1874 if (cmd->advertising == 0) {
1875 bp->flags |= (B44_FLAG_ADV_10HALF |
1876 B44_FLAG_ADV_10FULL |
1877 B44_FLAG_ADV_100HALF |
1878 B44_FLAG_ADV_100FULL);
1879 } else {
1880 if (cmd->advertising & ADVERTISED_10baseT_Half)
1881 bp->flags |= B44_FLAG_ADV_10HALF;
1882 if (cmd->advertising & ADVERTISED_10baseT_Full)
1883 bp->flags |= B44_FLAG_ADV_10FULL;
1884 if (cmd->advertising & ADVERTISED_100baseT_Half)
1885 bp->flags |= B44_FLAG_ADV_100HALF;
1886 if (cmd->advertising & ADVERTISED_100baseT_Full)
1887 bp->flags |= B44_FLAG_ADV_100FULL;
1888 }
1da177e4
LT
1889 } else {
1890 bp->flags |= B44_FLAG_FORCE_LINK;
47b9c3b1 1891 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
25db0338 1892 if (speed == SPEED_100)
1da177e4
LT
1893 bp->flags |= B44_FLAG_100_BASE_T;
1894 if (cmd->duplex == DUPLEX_FULL)
1895 bp->flags |= B44_FLAG_FULL_DUPLEX;
1896 }
1897
47b9c3b1
GZ
1898 if (netif_running(dev))
1899 b44_setup_phy(bp);
1da177e4
LT
1900
1901 spin_unlock_irq(&bp->lock);
1902
1903 return 0;
1904}
1905
1906static void b44_get_ringparam(struct net_device *dev,
1907 struct ethtool_ringparam *ering)
1908{
1909 struct b44 *bp = netdev_priv(dev);
1910
1911 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1912 ering->rx_pending = bp->rx_pending;
1913
1914 /* XXX ethtool lacks a tx_max_pending, oops... */
1915}
1916
1917static int b44_set_ringparam(struct net_device *dev,
1918 struct ethtool_ringparam *ering)
1919{
1920 struct b44 *bp = netdev_priv(dev);
1921
1922 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1923 (ering->rx_mini_pending != 0) ||
1924 (ering->rx_jumbo_pending != 0) ||
1925 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1926 return -EINVAL;
1927
1928 spin_lock_irq(&bp->lock);
1929
1930 bp->rx_pending = ering->rx_pending;
1931 bp->tx_pending = ering->tx_pending;
1932
1933 b44_halt(bp);
1934 b44_init_rings(bp);
5fc7d61a 1935 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1936 netif_wake_queue(bp->dev);
1937 spin_unlock_irq(&bp->lock);
1938
1939 b44_enable_ints(bp);
10badc21 1940
1da177e4
LT
1941 return 0;
1942}
1943
1944static void b44_get_pauseparam(struct net_device *dev,
1945 struct ethtool_pauseparam *epause)
1946{
1947 struct b44 *bp = netdev_priv(dev);
1948
1949 epause->autoneg =
1950 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1951 epause->rx_pause =
1952 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1953 epause->tx_pause =
1954 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1955}
1956
1957static int b44_set_pauseparam(struct net_device *dev,
1958 struct ethtool_pauseparam *epause)
1959{
1960 struct b44 *bp = netdev_priv(dev);
1961
1962 spin_lock_irq(&bp->lock);
1963 if (epause->autoneg)
1964 bp->flags |= B44_FLAG_PAUSE_AUTO;
1965 else
1966 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1967 if (epause->rx_pause)
1968 bp->flags |= B44_FLAG_RX_PAUSE;
1969 else
1970 bp->flags &= ~B44_FLAG_RX_PAUSE;
1971 if (epause->tx_pause)
1972 bp->flags |= B44_FLAG_TX_PAUSE;
1973 else
1974 bp->flags &= ~B44_FLAG_TX_PAUSE;
1975 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1976 b44_halt(bp);
1977 b44_init_rings(bp);
5fc7d61a 1978 b44_init_hw(bp, B44_FULL_RESET);
1da177e4
LT
1979 } else {
1980 __b44_set_flow_ctrl(bp, bp->flags);
1981 }
1982 spin_unlock_irq(&bp->lock);
1983
1984 b44_enable_ints(bp);
10badc21 1985
1da177e4
LT
1986 return 0;
1987}
1988
3353930d
FR
1989static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1990{
1991 switch(stringset) {
1992 case ETH_SS_STATS:
1993 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1994 break;
1995 }
1996}
1997
b9f2c044 1998static int b44_get_sset_count(struct net_device *dev, int sset)
3353930d 1999{
b9f2c044
JG
2000 switch (sset) {
2001 case ETH_SS_STATS:
2002 return ARRAY_SIZE(b44_gstrings);
2003 default:
2004 return -EOPNOTSUPP;
2005 }
3353930d
FR
2006}
2007
2008static void b44_get_ethtool_stats(struct net_device *dev,
2009 struct ethtool_stats *stats, u64 *data)
2010{
2011 struct b44 *bp = netdev_priv(dev);
eeda8585
KG
2012 struct b44_hw_stats *hwstat = &bp->hw_stats;
2013 u64 *data_src, *data_dst;
2014 unsigned int start;
3353930d
FR
2015 u32 i;
2016
2017 spin_lock_irq(&bp->lock);
3353930d 2018 b44_stats_update(bp);
eeda8585 2019 spin_unlock_irq(&bp->lock);
3353930d 2020
eeda8585
KG
2021 do {
2022 data_src = &hwstat->tx_good_octets;
2023 data_dst = data;
2024 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
3353930d 2025
eeda8585
KG
2026 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2027 *data_dst++ = *data_src++;
2028
2029 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
3353930d
FR
2030}
2031
52cafd96
GZ
2032static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2033{
2034 struct b44 *bp = netdev_priv(dev);
2035
2036 wol->supported = WAKE_MAGIC;
2037 if (bp->flags & B44_FLAG_WOL_ENABLE)
2038 wol->wolopts = WAKE_MAGIC;
2039 else
2040 wol->wolopts = 0;
2041 memset(&wol->sopass, 0, sizeof(wol->sopass));
2042}
2043
2044static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2045{
2046 struct b44 *bp = netdev_priv(dev);
2047
2048 spin_lock_irq(&bp->lock);
2049 if (wol->wolopts & WAKE_MAGIC)
2050 bp->flags |= B44_FLAG_WOL_ENABLE;
2051 else
2052 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2053 spin_unlock_irq(&bp->lock);
2054
2055 return 0;
2056}
2057
7282d491 2058static const struct ethtool_ops b44_ethtool_ops = {
1da177e4
LT
2059 .get_drvinfo = b44_get_drvinfo,
2060 .get_settings = b44_get_settings,
2061 .set_settings = b44_set_settings,
2062 .nway_reset = b44_nway_reset,
2063 .get_link = ethtool_op_get_link,
52cafd96
GZ
2064 .get_wol = b44_get_wol,
2065 .set_wol = b44_set_wol,
1da177e4
LT
2066 .get_ringparam = b44_get_ringparam,
2067 .set_ringparam = b44_set_ringparam,
2068 .get_pauseparam = b44_get_pauseparam,
2069 .set_pauseparam = b44_set_pauseparam,
2070 .get_msglevel = b44_get_msglevel,
2071 .set_msglevel = b44_set_msglevel,
3353930d 2072 .get_strings = b44_get_strings,
b9f2c044 2073 .get_sset_count = b44_get_sset_count,
3353930d 2074 .get_ethtool_stats = b44_get_ethtool_stats,
1da177e4
LT
2075};
2076
2077static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2078{
2079 struct mii_ioctl_data *data = if_mii(ifr);
2080 struct b44 *bp = netdev_priv(dev);
3410572d
FR
2081 int err = -EINVAL;
2082
2083 if (!netif_running(dev))
2084 goto out;
1da177e4
LT
2085
2086 spin_lock_irq(&bp->lock);
2087 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2088 spin_unlock_irq(&bp->lock);
3410572d 2089out:
1da177e4
LT
2090 return err;
2091}
2092
23971887 2093static int b44_get_invariants(struct b44 *bp)
1da177e4 2094{
753f4920
MB
2095 struct ssb_device *sdev = bp->sdev;
2096 int err = 0;
2097 u8 *addr;
1da177e4 2098
753f4920 2099 bp->dma_offset = ssb_dma_translation(sdev);
1da177e4 2100
753f4920
MB
2101 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2102 instance > 1) {
458414b2
LF
2103 addr = sdev->bus->sprom.et1mac;
2104 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
753f4920 2105 } else {
458414b2
LF
2106 addr = sdev->bus->sprom.et0mac;
2107 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
753f4920 2108 }
5ea79631
MB
2109 /* Some ROMs have buggy PHY addresses with the high
2110 * bits set (sign extension?). Truncate them to a
2111 * valid PHY address. */
2112 bp->phy_addr &= 0x1F;
2113
d458cdf7 2114 memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
391fc09a
GZ
2115
2116 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2fc96fff 2117 pr_err("Invalid MAC address found in EEPROM\n");
391fc09a
GZ
2118 return -EINVAL;
2119 }
2120
1da177e4
LT
2121 bp->imask = IMASK_DEF;
2122
10badc21 2123 /* XXX - really required?
1da177e4 2124 bp->flags |= B44_FLAG_BUGGY_TXPTR;
753f4920 2125 */
52cafd96 2126
753f4920
MB
2127 if (bp->sdev->id.revision >= 7)
2128 bp->flags |= B44_FLAG_B0_ANDLATER;
52cafd96 2129
1da177e4
LT
2130 return err;
2131}
2132
403413e5
SH
2133static const struct net_device_ops b44_netdev_ops = {
2134 .ndo_open = b44_open,
2135 .ndo_stop = b44_close,
2136 .ndo_start_xmit = b44_start_xmit,
eeda8585 2137 .ndo_get_stats64 = b44_get_stats64,
afc4b13d 2138 .ndo_set_rx_mode = b44_set_rx_mode,
403413e5
SH
2139 .ndo_set_mac_address = b44_set_mac_addr,
2140 .ndo_validate_addr = eth_validate_addr,
2141 .ndo_do_ioctl = b44_ioctl,
2142 .ndo_tx_timeout = b44_tx_timeout,
2143 .ndo_change_mtu = b44_change_mtu,
2144#ifdef CONFIG_NET_POLL_CONTROLLER
2145 .ndo_poll_controller = b44_poll_controller,
2146#endif
2147};
2148
23971887 2149static int b44_init_one(struct ssb_device *sdev,
1dd06ae8 2150 const struct ssb_device_id *ent)
1da177e4 2151{
1da177e4
LT
2152 struct net_device *dev;
2153 struct b44 *bp;
0795af57 2154 int err;
1da177e4 2155
753f4920
MB
2156 instance++;
2157
7329f0d5 2158 pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
1da177e4
LT
2159
2160 dev = alloc_etherdev(sizeof(*bp));
2161 if (!dev) {
1da177e4 2162 err = -ENOMEM;
753f4920 2163 goto out;
1da177e4
LT
2164 }
2165
753f4920 2166 SET_NETDEV_DEV(dev, sdev->dev);
1da177e4
LT
2167
2168 /* No interesting netdevice features in this card... */
2169 dev->features |= 0;
2170
2171 bp = netdev_priv(dev);
753f4920 2172 bp->sdev = sdev;
1da177e4 2173 bp->dev = dev;
a58c891a 2174 bp->force_copybreak = 0;
874a6214
FR
2175
2176 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1da177e4
LT
2177
2178 spin_lock_init(&bp->lock);
2179
1da177e4
LT
2180 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2181 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2182
403413e5 2183 dev->netdev_ops = &b44_netdev_ops;
bea3348e 2184 netif_napi_add(dev, &bp->napi, b44_poll, 64);
1da177e4 2185 dev->watchdog_timeo = B44_TX_TIMEOUT;
753f4920 2186 dev->irq = sdev->irq;
1da177e4
LT
2187 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2188
753f4920
MB
2189 err = ssb_bus_powerup(sdev->bus, 0);
2190 if (err) {
2191 dev_err(sdev->dev,
2192 "Failed to powerup the bus\n");
2193 goto err_out_free_dev;
2194 }
39a6f4bc 2195
4011f9f7 2196 if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
753f4920 2197 dev_err(sdev->dev,
2fc96fff 2198 "Required 30BIT DMA mask unsupported by the system\n");
753f4920
MB
2199 goto err_out_powerdown;
2200 }
39a6f4bc 2201
1da177e4
LT
2202 err = b44_get_invariants(bp);
2203 if (err) {
753f4920 2204 dev_err(sdev->dev,
2fc96fff 2205 "Problem fetching invariants of chip, aborting\n");
753f4920 2206 goto err_out_powerdown;
1da177e4
LT
2207 }
2208
7befa6ab
HM
2209 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2210 dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2211 err = -ENODEV;
2212 goto err_out_powerdown;
2213 }
2214
1da177e4
LT
2215 bp->mii_if.dev = dev;
2216 bp->mii_if.mdio_read = b44_mii_read;
2217 bp->mii_if.mdio_write = b44_mii_write;
2218 bp->mii_if.phy_id = bp->phy_addr;
2219 bp->mii_if.phy_id_mask = 0x1f;
2220 bp->mii_if.reg_num_mask = 0x1f;
2221
2222 /* By default, advertise all speed/duplex settings. */
2223 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2224 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2225
2226 /* By default, auto-negotiate PAUSE. */
2227 bp->flags |= B44_FLAG_PAUSE_AUTO;
2228
2229 err = register_netdev(dev);
2230 if (err) {
2fc96fff 2231 dev_err(sdev->dev, "Cannot register net device, aborting\n");
753f4920 2232 goto err_out_powerdown;
1da177e4
LT
2233 }
2234
bcf64aa3
PF
2235 netif_carrier_off(dev);
2236
753f4920 2237 ssb_set_drvdata(sdev, dev);
1da177e4 2238
10badc21 2239 /* Chip reset provides power to the b44 MAC & PCI cores, which
5c513129 2240 * is necessary for MAC register access.
10badc21 2241 */
fedb0eef 2242 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
5c513129 2243
8850dce1
HM
2244 /* do a phy reset to test if there is an active phy */
2245 if (b44_phy_reset(bp) < 0)
5ab6329c 2246 bp->phy_addr = B44_PHY_ADDR_NO_LOCAL_PHY;
8850dce1 2247
7329f0d5 2248 netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
1da177e4
LT
2249
2250 return 0;
2251
753f4920
MB
2252err_out_powerdown:
2253 ssb_bus_may_powerdown(sdev->bus);
1da177e4
LT
2254
2255err_out_free_dev:
2256 free_netdev(dev);
2257
753f4920 2258out:
1da177e4
LT
2259 return err;
2260}
2261
23971887 2262static void b44_remove_one(struct ssb_device *sdev)
1da177e4 2263{
753f4920 2264 struct net_device *dev = ssb_get_drvdata(sdev);
1da177e4 2265
874a6214 2266 unregister_netdev(dev);
e92aa634 2267 ssb_device_disable(sdev, 0);
753f4920 2268 ssb_bus_may_powerdown(sdev->bus);
874a6214 2269 free_netdev(dev);
fedb0eef 2270 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
753f4920 2271 ssb_set_drvdata(sdev, NULL);
1da177e4
LT
2272}
2273
753f4920 2274static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
1da177e4 2275{
753f4920 2276 struct net_device *dev = ssb_get_drvdata(sdev);
1da177e4
LT
2277 struct b44 *bp = netdev_priv(dev);
2278
753f4920
MB
2279 if (!netif_running(dev))
2280 return 0;
1da177e4
LT
2281
2282 del_timer_sync(&bp->timer);
2283
10badc21 2284 spin_lock_irq(&bp->lock);
1da177e4
LT
2285
2286 b44_halt(bp);
10badc21 2287 netif_carrier_off(bp->dev);
1da177e4
LT
2288 netif_device_detach(bp->dev);
2289 b44_free_rings(bp);
2290
2291 spin_unlock_irq(&bp->lock);
46e17853
PM
2292
2293 free_irq(dev->irq, dev);
52cafd96 2294 if (bp->flags & B44_FLAG_WOL_ENABLE) {
5fc7d61a 2295 b44_init_hw(bp, B44_PARTIAL_RESET);
52cafd96
GZ
2296 b44_setup_wol(bp);
2297 }
753f4920 2298
fedb0eef 2299 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
1da177e4
LT
2300 return 0;
2301}
2302
753f4920 2303static int b44_resume(struct ssb_device *sdev)
1da177e4 2304{
753f4920 2305 struct net_device *dev = ssb_get_drvdata(sdev);
1da177e4 2306 struct b44 *bp = netdev_priv(dev);
90afd0e5 2307 int rc = 0;
1da177e4 2308
753f4920 2309 rc = ssb_bus_powerup(sdev->bus, 0);
90afd0e5 2310 if (rc) {
753f4920
MB
2311 dev_err(sdev->dev,
2312 "Failed to powerup the bus\n");
90afd0e5
DM
2313 return rc;
2314 }
2315
1da177e4
LT
2316 if (!netif_running(dev))
2317 return 0;
2318
afed4ccb
JH
2319 spin_lock_irq(&bp->lock);
2320 b44_init_rings(bp);
2321 b44_init_hw(bp, B44_FULL_RESET);
2322 spin_unlock_irq(&bp->lock);
2323
2324 /*
2325 * As a shared interrupt, the handler can be called immediately. To be
2326 * able to check the interrupt status the hardware must already be
2327 * powered back on (b44_init_hw).
2328 */
90afd0e5
DM
2329 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2330 if (rc) {
2fc96fff 2331 netdev_err(dev, "request_irq failed\n");
afed4ccb
JH
2332 spin_lock_irq(&bp->lock);
2333 b44_halt(bp);
2334 b44_free_rings(bp);
2335 spin_unlock_irq(&bp->lock);
90afd0e5
DM
2336 return rc;
2337 }
46e17853 2338
1da177e4 2339 netif_device_attach(bp->dev);
1da177e4 2340
1da177e4 2341 b44_enable_ints(bp);
d9e2d185 2342 netif_wake_queue(dev);
a72a8179
SH
2343
2344 mod_timer(&bp->timer, jiffies + 1);
2345
1da177e4
LT
2346 return 0;
2347}
2348
753f4920 2349static struct ssb_driver b44_ssb_driver = {
1da177e4 2350 .name = DRV_MODULE_NAME,
753f4920 2351 .id_table = b44_ssb_tbl,
1da177e4 2352 .probe = b44_init_one,
23971887 2353 .remove = b44_remove_one,
753f4920
MB
2354 .suspend = b44_suspend,
2355 .resume = b44_resume,
1da177e4
LT
2356};
2357
cd155987 2358static inline int __init b44_pci_init(void)
753f4920
MB
2359{
2360 int err = 0;
2361#ifdef CONFIG_B44_PCI
2362 err = ssb_pcihost_register(&b44_pci_driver);
2363#endif
2364 return err;
2365}
2366
64f0a836 2367static inline void b44_pci_exit(void)
753f4920
MB
2368{
2369#ifdef CONFIG_B44_PCI
2370 ssb_pcihost_unregister(&b44_pci_driver);
2371#endif
2372}
2373
1da177e4
LT
2374static int __init b44_init(void)
2375{
9f38c636 2376 unsigned int dma_desc_align_size = dma_get_cache_alignment();
753f4920 2377 int err;
9f38c636
JL
2378
2379 /* Setup paramaters for syncing RX/TX DMA descriptors */
22d4d771 2380 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
9f38c636 2381
753f4920
MB
2382 err = b44_pci_init();
2383 if (err)
2384 return err;
2385 err = ssb_driver_register(&b44_ssb_driver);
2386 if (err)
2387 b44_pci_exit();
2388 return err;
1da177e4
LT
2389}
2390
2391static void __exit b44_cleanup(void)
2392{
753f4920
MB
2393 ssb_driver_unregister(&b44_ssb_driver);
2394 b44_pci_exit();
1da177e4
LT
2395}
2396
2397module_init(b44_init);
2398module_exit(b44_cleanup);
2399