]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/dec/tulip/tulip_core.c
treewide: Use fallthrough pseudo-keyword
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / dec / tulip / tulip_core.c
1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11
12 #define pr_fmt(fmt) "tulip: " fmt
13
14 #define DRV_NAME "tulip"
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include "tulip.h"
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/etherdevice.h>
23 #include <linux/delay.h>
24 #include <linux/mii.h>
25 #include <linux/crc32.h>
26 #include <asm/unaligned.h>
27 #include <linux/uaccess.h>
28
29 #ifdef CONFIG_SPARC
30 #include <asm/prom.h>
31 #endif
32
33 /* A few user-configurable values. */
34
35 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
36 static unsigned int max_interrupt_work = 25;
37
38 #define MAX_UNITS 8
39 /* Used to pass the full-duplex flag, etc. */
40 static int full_duplex[MAX_UNITS];
41 static int options[MAX_UNITS];
42 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
43
44 /* The possible media types that can be set in options[] are: */
45 const char * const medianame[32] = {
46 "10baseT", "10base2", "AUI", "100baseTx",
47 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
48 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
49 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
50 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
51 "","","","", "","","","", "","","","Transceiver reset",
52 };
53
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
55 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
56 defined(CONFIG_SPARC) || defined(__ia64__) || \
57 defined(__sh__) || defined(__mips__)
58 static int rx_copybreak = 1518;
59 #else
60 static int rx_copybreak = 100;
61 #endif
62
63 /*
64 Set the bus performance register.
65 Typical: Set 16 longword cache alignment, no burst limit.
66 Cache alignment bits 15:14 Burst length 13:8
67 0000 No alignment 0x00000000 unlimited 0800 8 longwords
68 4000 8 longwords 0100 1 longword 1000 16 longwords
69 8000 16 longwords 0200 2 longwords 2000 32 longwords
70 C000 32 longwords 0400 4 longwords
71 Warning: many older 486 systems are broken and require setting 0x00A04800
72 8 longword cache alignment, 8 longword burst.
73 ToDo: Non-Intel setting could be better.
74 */
75
76 #if defined(__alpha__) || defined(__ia64__)
77 static int csr0 = 0x01A00000 | 0xE000;
78 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
79 static int csr0 = 0x01A00000 | 0x8000;
80 #elif defined(CONFIG_SPARC) || defined(__hppa__)
81 /* The UltraSparc PCI controllers will disconnect at every 64-byte
82 * crossing anyways so it makes no sense to tell Tulip to burst
83 * any more than that.
84 */
85 static int csr0 = 0x01A00000 | 0x9000;
86 #elif defined(__arm__) || defined(__sh__)
87 static int csr0 = 0x01A00000 | 0x4800;
88 #elif defined(__mips__)
89 static int csr0 = 0x00200000 | 0x4000;
90 #else
91 static int csr0;
92 #endif
93
94 /* Operational parameters that usually are not changed. */
95 /* Time in jiffies before concluding the transmitter is hung. */
96 #define TX_TIMEOUT (4*HZ)
97
98
99 MODULE_AUTHOR("The Linux Kernel Team");
100 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
101 MODULE_LICENSE("GPL");
102 module_param(tulip_debug, int, 0);
103 module_param(max_interrupt_work, int, 0);
104 module_param(rx_copybreak, int, 0);
105 module_param(csr0, int, 0);
106 module_param_array(options, int, NULL, 0);
107 module_param_array(full_duplex, int, NULL, 0);
108
109 #ifdef TULIP_DEBUG
110 int tulip_debug = TULIP_DEBUG;
111 #else
112 int tulip_debug = 1;
113 #endif
114
115 static void tulip_timer(struct timer_list *t)
116 {
117 struct tulip_private *tp = from_timer(tp, t, timer);
118 struct net_device *dev = tp->dev;
119
120 if (netif_running(dev))
121 schedule_work(&tp->media_work);
122 }
123
124 /*
125 * This table use during operation for capabilities and media timer.
126 *
127 * It is indexed via the values in 'enum chips'
128 */
129
130 const struct tulip_chip_table tulip_tbl[] = {
131 { }, /* placeholder for array, slot unused currently */
132 { }, /* placeholder for array, slot unused currently */
133
134 /* DC21140 */
135 { "Digital DS21140 Tulip", 128, 0x0001ebef,
136 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
137 tulip_media_task },
138
139 /* DC21142, DC21143 */
140 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
141 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
142 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
143
144 /* LC82C168 */
145 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
146 HAS_MII | HAS_PNICNWAY, pnic_timer, },
147
148 /* MX98713 */
149 { "Macronix 98713 PMAC", 128, 0x0001ebef,
150 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
151
152 /* MX98715 */
153 { "Macronix 98715 PMAC", 256, 0x0001ebef,
154 HAS_MEDIA_TABLE, mxic_timer, },
155
156 /* MX98725 */
157 { "Macronix 98725 PMAC", 256, 0x0001ebef,
158 HAS_MEDIA_TABLE, mxic_timer, },
159
160 /* AX88140 */
161 { "ASIX AX88140", 128, 0x0001fbff,
162 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
163 | IS_ASIX, tulip_timer, tulip_media_task },
164
165 /* PNIC2 */
166 { "Lite-On PNIC-II", 256, 0x0801fbff,
167 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
168
169 /* COMET */
170 { "ADMtek Comet", 256, 0x0001abef,
171 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
172
173 /* COMPEX9881 */
174 { "Compex 9881 PMAC", 128, 0x0001ebef,
175 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
176
177 /* I21145 */
178 { "Intel DS21145 Tulip", 128, 0x0801fbff,
179 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
180 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
181
182 /* DM910X */
183 #ifdef CONFIG_TULIP_DM910X
184 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
185 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
186 tulip_timer, tulip_media_task },
187 #else
188 { NULL },
189 #endif
190
191 /* RS7112 */
192 { "Conexant LANfinity", 256, 0x0001ebef,
193 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
194
195 };
196
197
198 static const struct pci_device_id tulip_pci_tbl[] = {
199 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
200 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
201 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
202 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
203 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
204 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
205 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
206 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
207 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
208 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
209 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
210 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
211 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
212 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
213 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
214 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
215 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
216 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
217 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
218 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
219 #ifdef CONFIG_TULIP_DM910X
220 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
221 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
222 #endif
223 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
225 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
230 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
233 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
235 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
236 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
237 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { } /* terminate list */
239 };
240 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
241
242
243 /* A full-duplex map for media types. */
244 const char tulip_media_cap[32] =
245 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
246
247 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
248 static void tulip_init_ring(struct net_device *dev);
249 static void tulip_free_ring(struct net_device *dev);
250 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
251 struct net_device *dev);
252 static int tulip_open(struct net_device *dev);
253 static int tulip_close(struct net_device *dev);
254 static void tulip_up(struct net_device *dev);
255 static void tulip_down(struct net_device *dev);
256 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
257 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
258 static void set_rx_mode(struct net_device *dev);
259 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
260 #ifdef CONFIG_NET_POLL_CONTROLLER
261 static void poll_tulip(struct net_device *dev);
262 #endif
263
264 static void tulip_set_power_state (struct tulip_private *tp,
265 int sleep, int snooze)
266 {
267 if (tp->flags & HAS_ACPI) {
268 u32 tmp, newtmp;
269 pci_read_config_dword (tp->pdev, CFDD, &tmp);
270 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
271 if (sleep)
272 newtmp |= CFDD_Sleep;
273 else if (snooze)
274 newtmp |= CFDD_Snooze;
275 if (tmp != newtmp)
276 pci_write_config_dword (tp->pdev, CFDD, newtmp);
277 }
278
279 }
280
281
282 static void tulip_up(struct net_device *dev)
283 {
284 struct tulip_private *tp = netdev_priv(dev);
285 void __iomem *ioaddr = tp->base_addr;
286 int next_tick = 3*HZ;
287 u32 reg;
288 int i;
289
290 #ifdef CONFIG_TULIP_NAPI
291 napi_enable(&tp->napi);
292 #endif
293
294 /* Wake the chip from sleep/snooze mode. */
295 tulip_set_power_state (tp, 0, 0);
296
297 /* Disable all WOL events */
298 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
299 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
300 tulip_set_wolopts(tp->pdev, 0);
301
302 /* On some chip revs we must set the MII/SYM port before the reset!? */
303 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
304 iowrite32(0x00040000, ioaddr + CSR6);
305
306 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
307 iowrite32(0x00000001, ioaddr + CSR0);
308 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
309 udelay(100);
310
311 /* Deassert reset.
312 Wait the specified 50 PCI cycles after a reset by initializing
313 Tx and Rx queues and the address filter list. */
314 iowrite32(tp->csr0, ioaddr + CSR0);
315 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
316 udelay(100);
317
318 if (tulip_debug > 1)
319 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
320
321 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
322 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
323 tp->cur_rx = tp->cur_tx = 0;
324 tp->dirty_rx = tp->dirty_tx = 0;
325
326 if (tp->flags & MC_HASH_ONLY) {
327 u32 addr_low = get_unaligned_le32(dev->dev_addr);
328 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
329 if (tp->chip_id == AX88140) {
330 iowrite32(0, ioaddr + CSR13);
331 iowrite32(addr_low, ioaddr + CSR14);
332 iowrite32(1, ioaddr + CSR13);
333 iowrite32(addr_high, ioaddr + CSR14);
334 } else if (tp->flags & COMET_MAC_ADDR) {
335 iowrite32(addr_low, ioaddr + 0xA4);
336 iowrite32(addr_high, ioaddr + 0xA8);
337 iowrite32(0, ioaddr + CSR27);
338 iowrite32(0, ioaddr + CSR28);
339 }
340 } else {
341 /* This is set_rx_mode(), but without starting the transmitter. */
342 u16 *eaddrs = (u16 *)dev->dev_addr;
343 u16 *setup_frm = &tp->setup_frame[15*6];
344 dma_addr_t mapping;
345
346 /* 21140 bug: you must add the broadcast address. */
347 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
348 /* Fill the final entry of the table with our physical address. */
349 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
350 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
351 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
352
353 mapping = pci_map_single(tp->pdev, tp->setup_frame,
354 sizeof(tp->setup_frame),
355 PCI_DMA_TODEVICE);
356 tp->tx_buffers[tp->cur_tx].skb = NULL;
357 tp->tx_buffers[tp->cur_tx].mapping = mapping;
358
359 /* Put the setup frame on the Tx list. */
360 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
361 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
362 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
363
364 tp->cur_tx++;
365 }
366
367 tp->saved_if_port = dev->if_port;
368 if (dev->if_port == 0)
369 dev->if_port = tp->default_port;
370
371 /* Allow selecting a default media. */
372 i = 0;
373 if (tp->mtable == NULL)
374 goto media_picked;
375 if (dev->if_port) {
376 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
377 (dev->if_port == 12 ? 0 : dev->if_port);
378 for (i = 0; i < tp->mtable->leafcount; i++)
379 if (tp->mtable->mleaf[i].media == looking_for) {
380 dev_info(&dev->dev,
381 "Using user-specified media %s\n",
382 medianame[dev->if_port]);
383 goto media_picked;
384 }
385 }
386 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
387 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
388 for (i = 0; i < tp->mtable->leafcount; i++)
389 if (tp->mtable->mleaf[i].media == looking_for) {
390 dev_info(&dev->dev,
391 "Using EEPROM-set media %s\n",
392 medianame[looking_for]);
393 goto media_picked;
394 }
395 }
396 /* Start sensing first non-full-duplex media. */
397 for (i = tp->mtable->leafcount - 1;
398 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
399 ;
400 media_picked:
401
402 tp->csr6 = 0;
403 tp->cur_index = i;
404 tp->nwayset = 0;
405
406 if (dev->if_port) {
407 if (tp->chip_id == DC21143 &&
408 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
409 /* We must reset the media CSRs when we force-select MII mode. */
410 iowrite32(0x0000, ioaddr + CSR13);
411 iowrite32(0x0000, ioaddr + CSR14);
412 iowrite32(0x0008, ioaddr + CSR15);
413 }
414 tulip_select_media(dev, 1);
415 } else if (tp->chip_id == DC21142) {
416 if (tp->mii_cnt) {
417 tulip_select_media(dev, 1);
418 if (tulip_debug > 1)
419 dev_info(&dev->dev,
420 "Using MII transceiver %d, status %04x\n",
421 tp->phys[0],
422 tulip_mdio_read(dev, tp->phys[0], 1));
423 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
424 tp->csr6 = csr6_mask_hdcap;
425 dev->if_port = 11;
426 iowrite32(0x0000, ioaddr + CSR13);
427 iowrite32(0x0000, ioaddr + CSR14);
428 } else
429 t21142_start_nway(dev);
430 } else if (tp->chip_id == PNIC2) {
431 /* for initial startup advertise 10/100 Full and Half */
432 tp->sym_advertise = 0x01E0;
433 /* enable autonegotiate end interrupt */
434 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
435 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
436 pnic2_start_nway(dev);
437 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
438 if (tp->mii_cnt) {
439 dev->if_port = 11;
440 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
441 iowrite32(0x0001, ioaddr + CSR15);
442 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
443 pnic_do_nway(dev);
444 else {
445 /* Start with 10mbps to do autonegotiation. */
446 iowrite32(0x32, ioaddr + CSR12);
447 tp->csr6 = 0x00420000;
448 iowrite32(0x0001B078, ioaddr + 0xB8);
449 iowrite32(0x0201B078, ioaddr + 0xB8);
450 next_tick = 1*HZ;
451 }
452 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
453 ! tp->medialock) {
454 dev->if_port = 0;
455 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
456 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
457 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
458 /* Provided by BOLO, Macronix - 12/10/1998. */
459 dev->if_port = 0;
460 tp->csr6 = 0x01a80200;
461 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
462 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
463 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
464 /* Enable automatic Tx underrun recovery. */
465 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
466 dev->if_port = tp->mii_cnt ? 11 : 0;
467 tp->csr6 = 0x00040000;
468 } else if (tp->chip_id == AX88140) {
469 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
470 } else
471 tulip_select_media(dev, 1);
472
473 /* Start the chip's Tx to process setup frame. */
474 tulip_stop_rxtx(tp);
475 barrier();
476 udelay(5);
477 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
478
479 /* Enable interrupts by setting the interrupt mask. */
480 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
481 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
482 tulip_start_rxtx(tp);
483 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
484
485 if (tulip_debug > 2) {
486 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
487 ioread32(ioaddr + CSR0),
488 ioread32(ioaddr + CSR5),
489 ioread32(ioaddr + CSR6));
490 }
491
492 /* Set the timer to switch to check for link beat and perhaps switch
493 to an alternate media type. */
494 tp->timer.expires = RUN_AT(next_tick);
495 add_timer(&tp->timer);
496 #ifdef CONFIG_TULIP_NAPI
497 timer_setup(&tp->oom_timer, oom_timer, 0);
498 #endif
499 }
500
501 static int
502 tulip_open(struct net_device *dev)
503 {
504 struct tulip_private *tp = netdev_priv(dev);
505 int retval;
506
507 tulip_init_ring (dev);
508
509 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
510 dev->name, dev);
511 if (retval)
512 goto free_ring;
513
514 tulip_up (dev);
515
516 netif_start_queue (dev);
517
518 return 0;
519
520 free_ring:
521 tulip_free_ring (dev);
522 return retval;
523 }
524
525
526 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
527 {
528 struct tulip_private *tp = netdev_priv(dev);
529 void __iomem *ioaddr = tp->base_addr;
530 unsigned long flags;
531
532 spin_lock_irqsave (&tp->lock, flags);
533
534 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
535 /* Do nothing -- the media monitor should handle this. */
536 if (tulip_debug > 1)
537 dev_warn(&dev->dev,
538 "Transmit timeout using MII device\n");
539 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
540 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
541 tp->chip_id == DM910X) {
542 dev_warn(&dev->dev,
543 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
544 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
545 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
546 ioread32(ioaddr + CSR15));
547 tp->timeout_recovery = 1;
548 schedule_work(&tp->media_work);
549 goto out_unlock;
550 } else if (tp->chip_id == PNIC2) {
551 dev_warn(&dev->dev,
552 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
553 (int)ioread32(ioaddr + CSR5),
554 (int)ioread32(ioaddr + CSR6),
555 (int)ioread32(ioaddr + CSR7),
556 (int)ioread32(ioaddr + CSR12));
557 } else {
558 dev_warn(&dev->dev,
559 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
560 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
561 dev->if_port = 0;
562 }
563
564 #if defined(way_too_many_messages)
565 if (tulip_debug > 3) {
566 int i;
567 for (i = 0; i < RX_RING_SIZE; i++) {
568 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
569 int j;
570 printk(KERN_DEBUG
571 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
572 i,
573 (unsigned int)tp->rx_ring[i].status,
574 (unsigned int)tp->rx_ring[i].length,
575 (unsigned int)tp->rx_ring[i].buffer1,
576 (unsigned int)tp->rx_ring[i].buffer2,
577 buf[0], buf[1], buf[2]);
578 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
579 if (j < 100)
580 pr_cont(" %02x", buf[j]);
581 pr_cont(" j=%d\n", j);
582 }
583 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
584 for (i = 0; i < RX_RING_SIZE; i++)
585 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
586 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
587 for (i = 0; i < TX_RING_SIZE; i++)
588 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
589 pr_cont("\n");
590 }
591 #endif
592
593 tulip_tx_timeout_complete(tp, ioaddr);
594
595 out_unlock:
596 spin_unlock_irqrestore (&tp->lock, flags);
597 netif_trans_update(dev); /* prevent tx timeout */
598 netif_wake_queue (dev);
599 }
600
601
602 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
603 static void tulip_init_ring(struct net_device *dev)
604 {
605 struct tulip_private *tp = netdev_priv(dev);
606 int i;
607
608 tp->susp_rx = 0;
609 tp->ttimer = 0;
610 tp->nir = 0;
611
612 for (i = 0; i < RX_RING_SIZE; i++) {
613 tp->rx_ring[i].status = 0x00000000;
614 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
615 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
616 tp->rx_buffers[i].skb = NULL;
617 tp->rx_buffers[i].mapping = 0;
618 }
619 /* Mark the last entry as wrapping the ring. */
620 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
621 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
622
623 for (i = 0; i < RX_RING_SIZE; i++) {
624 dma_addr_t mapping;
625
626 /* Note the receive buffer must be longword aligned.
627 netdev_alloc_skb() provides 16 byte alignment. But do *not*
628 use skb_reserve() to align the IP header! */
629 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
630 tp->rx_buffers[i].skb = skb;
631 if (skb == NULL)
632 break;
633 mapping = pci_map_single(tp->pdev, skb->data,
634 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
635 tp->rx_buffers[i].mapping = mapping;
636 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
637 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
638 }
639 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
640
641 /* The Tx buffer descriptor is filled in as needed, but we
642 do need to clear the ownership bit. */
643 for (i = 0; i < TX_RING_SIZE; i++) {
644 tp->tx_buffers[i].skb = NULL;
645 tp->tx_buffers[i].mapping = 0;
646 tp->tx_ring[i].status = 0x00000000;
647 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
648 }
649 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
650 }
651
652 static netdev_tx_t
653 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
654 {
655 struct tulip_private *tp = netdev_priv(dev);
656 int entry;
657 u32 flag;
658 dma_addr_t mapping;
659 unsigned long flags;
660
661 spin_lock_irqsave(&tp->lock, flags);
662
663 /* Calculate the next Tx descriptor entry. */
664 entry = tp->cur_tx % TX_RING_SIZE;
665
666 tp->tx_buffers[entry].skb = skb;
667 mapping = pci_map_single(tp->pdev, skb->data,
668 skb->len, PCI_DMA_TODEVICE);
669 tp->tx_buffers[entry].mapping = mapping;
670 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
671
672 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
673 flag = 0x60000000; /* No interrupt */
674 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
675 flag = 0xe0000000; /* Tx-done intr. */
676 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
677 flag = 0x60000000; /* No Tx-done intr. */
678 } else { /* Leave room for set_rx_mode() to fill entries. */
679 flag = 0xe0000000; /* Tx-done intr. */
680 netif_stop_queue(dev);
681 }
682 if (entry == TX_RING_SIZE-1)
683 flag = 0xe0000000 | DESC_RING_WRAP;
684
685 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
686 /* if we were using Transmit Automatic Polling, we would need a
687 * wmb() here. */
688 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
689 wmb();
690
691 tp->cur_tx++;
692
693 /* Trigger an immediate transmit demand. */
694 iowrite32(0, tp->base_addr + CSR1);
695
696 spin_unlock_irqrestore(&tp->lock, flags);
697
698 return NETDEV_TX_OK;
699 }
700
701 static void tulip_clean_tx_ring(struct tulip_private *tp)
702 {
703 unsigned int dirty_tx;
704
705 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
706 dirty_tx++) {
707 int entry = dirty_tx % TX_RING_SIZE;
708 int status = le32_to_cpu(tp->tx_ring[entry].status);
709
710 if (status < 0) {
711 tp->dev->stats.tx_errors++; /* It wasn't Txed */
712 tp->tx_ring[entry].status = 0;
713 }
714
715 /* Check for Tx filter setup frames. */
716 if (tp->tx_buffers[entry].skb == NULL) {
717 /* test because dummy frames not mapped */
718 if (tp->tx_buffers[entry].mapping)
719 pci_unmap_single(tp->pdev,
720 tp->tx_buffers[entry].mapping,
721 sizeof(tp->setup_frame),
722 PCI_DMA_TODEVICE);
723 continue;
724 }
725
726 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
727 tp->tx_buffers[entry].skb->len,
728 PCI_DMA_TODEVICE);
729
730 /* Free the original skb. */
731 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
732 tp->tx_buffers[entry].skb = NULL;
733 tp->tx_buffers[entry].mapping = 0;
734 }
735 }
736
737 static void tulip_down (struct net_device *dev)
738 {
739 struct tulip_private *tp = netdev_priv(dev);
740 void __iomem *ioaddr = tp->base_addr;
741 unsigned long flags;
742
743 cancel_work_sync(&tp->media_work);
744
745 #ifdef CONFIG_TULIP_NAPI
746 napi_disable(&tp->napi);
747 #endif
748
749 del_timer_sync (&tp->timer);
750 #ifdef CONFIG_TULIP_NAPI
751 del_timer_sync (&tp->oom_timer);
752 #endif
753 spin_lock_irqsave (&tp->lock, flags);
754
755 /* Disable interrupts by clearing the interrupt mask. */
756 iowrite32 (0x00000000, ioaddr + CSR7);
757
758 /* Stop the Tx and Rx processes. */
759 tulip_stop_rxtx(tp);
760
761 /* prepare receive buffers */
762 tulip_refill_rx(dev);
763
764 /* release any unconsumed transmit buffers */
765 tulip_clean_tx_ring(tp);
766
767 if (ioread32(ioaddr + CSR6) != 0xffffffff)
768 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
769
770 spin_unlock_irqrestore (&tp->lock, flags);
771
772 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
773
774 dev->if_port = tp->saved_if_port;
775
776 /* Leave the driver in snooze, not sleep, mode. */
777 tulip_set_power_state (tp, 0, 1);
778 }
779
780 static void tulip_free_ring (struct net_device *dev)
781 {
782 struct tulip_private *tp = netdev_priv(dev);
783 int i;
784
785 /* Free all the skbuffs in the Rx queue. */
786 for (i = 0; i < RX_RING_SIZE; i++) {
787 struct sk_buff *skb = tp->rx_buffers[i].skb;
788 dma_addr_t mapping = tp->rx_buffers[i].mapping;
789
790 tp->rx_buffers[i].skb = NULL;
791 tp->rx_buffers[i].mapping = 0;
792
793 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
794 tp->rx_ring[i].length = 0;
795 /* An invalid address. */
796 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
797 if (skb) {
798 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
799 PCI_DMA_FROMDEVICE);
800 dev_kfree_skb (skb);
801 }
802 }
803
804 for (i = 0; i < TX_RING_SIZE; i++) {
805 struct sk_buff *skb = tp->tx_buffers[i].skb;
806
807 if (skb != NULL) {
808 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
809 skb->len, PCI_DMA_TODEVICE);
810 dev_kfree_skb (skb);
811 }
812 tp->tx_buffers[i].skb = NULL;
813 tp->tx_buffers[i].mapping = 0;
814 }
815 }
816
817 static int tulip_close (struct net_device *dev)
818 {
819 struct tulip_private *tp = netdev_priv(dev);
820 void __iomem *ioaddr = tp->base_addr;
821
822 netif_stop_queue (dev);
823
824 tulip_down (dev);
825
826 if (tulip_debug > 1)
827 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
828 ioread32 (ioaddr + CSR5));
829
830 free_irq (tp->pdev->irq, dev);
831
832 tulip_free_ring (dev);
833
834 return 0;
835 }
836
837 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
838 {
839 struct tulip_private *tp = netdev_priv(dev);
840 void __iomem *ioaddr = tp->base_addr;
841
842 if (netif_running(dev)) {
843 unsigned long flags;
844
845 spin_lock_irqsave (&tp->lock, flags);
846
847 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
848
849 spin_unlock_irqrestore(&tp->lock, flags);
850 }
851
852 return &dev->stats;
853 }
854
855
856 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
857 {
858 struct tulip_private *np = netdev_priv(dev);
859 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
860 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
861 }
862
863
864 static int tulip_ethtool_set_wol(struct net_device *dev,
865 struct ethtool_wolinfo *wolinfo)
866 {
867 struct tulip_private *tp = netdev_priv(dev);
868
869 if (wolinfo->wolopts & (~tp->wolinfo.supported))
870 return -EOPNOTSUPP;
871
872 tp->wolinfo.wolopts = wolinfo->wolopts;
873 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
874 return 0;
875 }
876
877 static void tulip_ethtool_get_wol(struct net_device *dev,
878 struct ethtool_wolinfo *wolinfo)
879 {
880 struct tulip_private *tp = netdev_priv(dev);
881
882 wolinfo->supported = tp->wolinfo.supported;
883 wolinfo->wolopts = tp->wolinfo.wolopts;
884 return;
885 }
886
887
888 static const struct ethtool_ops ops = {
889 .get_drvinfo = tulip_get_drvinfo,
890 .set_wol = tulip_ethtool_set_wol,
891 .get_wol = tulip_ethtool_get_wol,
892 };
893
894 /* Provide ioctl() calls to examine the MII xcvr state. */
895 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
896 {
897 struct tulip_private *tp = netdev_priv(dev);
898 void __iomem *ioaddr = tp->base_addr;
899 struct mii_ioctl_data *data = if_mii(rq);
900 const unsigned int phy_idx = 0;
901 int phy = tp->phys[phy_idx] & 0x1f;
902 unsigned int regnum = data->reg_num;
903
904 switch (cmd) {
905 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
906 if (tp->mii_cnt)
907 data->phy_id = phy;
908 else if (tp->flags & HAS_NWAY)
909 data->phy_id = 32;
910 else if (tp->chip_id == COMET)
911 data->phy_id = 1;
912 else
913 return -ENODEV;
914 fallthrough;
915
916 case SIOCGMIIREG: /* Read MII PHY register. */
917 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
918 int csr12 = ioread32 (ioaddr + CSR12);
919 int csr14 = ioread32 (ioaddr + CSR14);
920 switch (regnum) {
921 case 0:
922 if (((csr14<<5) & 0x1000) ||
923 (dev->if_port == 5 && tp->nwayset))
924 data->val_out = 0x1000;
925 else
926 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
927 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
928 break;
929 case 1:
930 data->val_out =
931 0x1848 +
932 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
933 ((csr12&0x06) == 6 ? 0 : 4);
934 data->val_out |= 0x6048;
935 break;
936 case 4:
937 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
938 data->val_out =
939 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
940 ((csr14 >> 1) & 0x20) + 1;
941 data->val_out |= ((csr14 >> 9) & 0x03C0);
942 break;
943 case 5: data->val_out = tp->lpar; break;
944 default: data->val_out = 0; break;
945 }
946 } else {
947 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
948 }
949 return 0;
950
951 case SIOCSMIIREG: /* Write MII PHY register. */
952 if (regnum & ~0x1f)
953 return -EINVAL;
954 if (data->phy_id == phy) {
955 u16 value = data->val_in;
956 switch (regnum) {
957 case 0: /* Check for autonegotiation on or reset. */
958 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
959 if (tp->full_duplex_lock)
960 tp->full_duplex = (value & 0x0100) ? 1 : 0;
961 break;
962 case 4:
963 tp->advertising[phy_idx] =
964 tp->mii_advertise = data->val_in;
965 break;
966 }
967 }
968 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
969 u16 value = data->val_in;
970 if (regnum == 0) {
971 if ((value & 0x1200) == 0x1200) {
972 if (tp->chip_id == PNIC2) {
973 pnic2_start_nway (dev);
974 } else {
975 t21142_start_nway (dev);
976 }
977 }
978 } else if (regnum == 4)
979 tp->sym_advertise = value;
980 } else {
981 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
982 }
983 return 0;
984 default:
985 return -EOPNOTSUPP;
986 }
987
988 return -EOPNOTSUPP;
989 }
990
991
992 /* Set or clear the multicast filter for this adaptor.
993 Note that we only use exclusion around actually queueing the
994 new frame, not around filling tp->setup_frame. This is non-deterministic
995 when re-entered but still correct. */
996
997 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
998 {
999 struct tulip_private *tp = netdev_priv(dev);
1000 u16 hash_table[32];
1001 struct netdev_hw_addr *ha;
1002 int i;
1003 u16 *eaddrs;
1004
1005 memset(hash_table, 0, sizeof(hash_table));
1006 __set_bit_le(255, hash_table); /* Broadcast entry */
1007 /* This should work on big-endian machines as well. */
1008 netdev_for_each_mc_addr(ha, dev) {
1009 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1010
1011 __set_bit_le(index, hash_table);
1012 }
1013 for (i = 0; i < 32; i++) {
1014 *setup_frm++ = hash_table[i];
1015 *setup_frm++ = hash_table[i];
1016 }
1017 setup_frm = &tp->setup_frame[13*6];
1018
1019 /* Fill the final entry with our physical address. */
1020 eaddrs = (u16 *)dev->dev_addr;
1021 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1022 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1023 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1024 }
1025
1026 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1027 {
1028 struct tulip_private *tp = netdev_priv(dev);
1029 struct netdev_hw_addr *ha;
1030 u16 *eaddrs;
1031
1032 /* We have <= 14 addresses so we can use the wonderful
1033 16 address perfect filtering of the Tulip. */
1034 netdev_for_each_mc_addr(ha, dev) {
1035 eaddrs = (u16 *) ha->addr;
1036 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1037 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1038 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1039 }
1040 /* Fill the unused entries with the broadcast address. */
1041 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1042 setup_frm = &tp->setup_frame[15*6];
1043
1044 /* Fill the final entry with our physical address. */
1045 eaddrs = (u16 *)dev->dev_addr;
1046 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1047 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1048 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1049 }
1050
1051
1052 static void set_rx_mode(struct net_device *dev)
1053 {
1054 struct tulip_private *tp = netdev_priv(dev);
1055 void __iomem *ioaddr = tp->base_addr;
1056 int csr6;
1057
1058 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1059
1060 tp->csr6 &= ~0x00D5;
1061 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1062 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1063 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1064 } else if ((netdev_mc_count(dev) > 1000) ||
1065 (dev->flags & IFF_ALLMULTI)) {
1066 /* Too many to filter well -- accept all multicasts. */
1067 tp->csr6 |= AcceptAllMulticast;
1068 csr6 |= AcceptAllMulticast;
1069 } else if (tp->flags & MC_HASH_ONLY) {
1070 /* Some work-alikes have only a 64-entry hash filter table. */
1071 /* Should verify correctness on big-endian/__powerpc__ */
1072 struct netdev_hw_addr *ha;
1073 if (netdev_mc_count(dev) > 64) {
1074 /* Arbitrary non-effective limit. */
1075 tp->csr6 |= AcceptAllMulticast;
1076 csr6 |= AcceptAllMulticast;
1077 } else {
1078 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1079 int filterbit;
1080 netdev_for_each_mc_addr(ha, dev) {
1081 if (tp->flags & COMET_MAC_ADDR)
1082 filterbit = ether_crc_le(ETH_ALEN,
1083 ha->addr);
1084 else
1085 filterbit = ether_crc(ETH_ALEN,
1086 ha->addr) >> 26;
1087 filterbit &= 0x3f;
1088 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1089 if (tulip_debug > 2)
1090 dev_info(&dev->dev,
1091 "Added filter for %pM %08x bit %d\n",
1092 ha->addr,
1093 ether_crc(ETH_ALEN, ha->addr),
1094 filterbit);
1095 }
1096 if (mc_filter[0] == tp->mc_filter[0] &&
1097 mc_filter[1] == tp->mc_filter[1])
1098 ; /* No change. */
1099 else if (tp->flags & IS_ASIX) {
1100 iowrite32(2, ioaddr + CSR13);
1101 iowrite32(mc_filter[0], ioaddr + CSR14);
1102 iowrite32(3, ioaddr + CSR13);
1103 iowrite32(mc_filter[1], ioaddr + CSR14);
1104 } else if (tp->flags & COMET_MAC_ADDR) {
1105 iowrite32(mc_filter[0], ioaddr + CSR27);
1106 iowrite32(mc_filter[1], ioaddr + CSR28);
1107 }
1108 tp->mc_filter[0] = mc_filter[0];
1109 tp->mc_filter[1] = mc_filter[1];
1110 }
1111 } else {
1112 unsigned long flags;
1113 u32 tx_flags = 0x08000000 | 192;
1114
1115 /* Note that only the low-address shortword of setup_frame is valid!
1116 The values are doubled for big-endian architectures. */
1117 if (netdev_mc_count(dev) > 14) {
1118 /* Must use a multicast hash table. */
1119 build_setup_frame_hash(tp->setup_frame, dev);
1120 tx_flags = 0x08400000 | 192;
1121 } else {
1122 build_setup_frame_perfect(tp->setup_frame, dev);
1123 }
1124
1125 spin_lock_irqsave(&tp->lock, flags);
1126
1127 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1128 /* Same setup recently queued, we need not add it. */
1129 } else {
1130 unsigned int entry;
1131 int dummy = -1;
1132
1133 /* Now add this frame to the Tx list. */
1134
1135 entry = tp->cur_tx++ % TX_RING_SIZE;
1136
1137 if (entry != 0) {
1138 /* Avoid a chip errata by prefixing a dummy entry. */
1139 tp->tx_buffers[entry].skb = NULL;
1140 tp->tx_buffers[entry].mapping = 0;
1141 tp->tx_ring[entry].length =
1142 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1143 tp->tx_ring[entry].buffer1 = 0;
1144 /* Must set DescOwned later to avoid race with chip */
1145 dummy = entry;
1146 entry = tp->cur_tx++ % TX_RING_SIZE;
1147
1148 }
1149
1150 tp->tx_buffers[entry].skb = NULL;
1151 tp->tx_buffers[entry].mapping =
1152 pci_map_single(tp->pdev, tp->setup_frame,
1153 sizeof(tp->setup_frame),
1154 PCI_DMA_TODEVICE);
1155 /* Put the setup frame on the Tx list. */
1156 if (entry == TX_RING_SIZE-1)
1157 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1158 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1159 tp->tx_ring[entry].buffer1 =
1160 cpu_to_le32(tp->tx_buffers[entry].mapping);
1161 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1162 if (dummy >= 0)
1163 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1164 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1165 netif_stop_queue(dev);
1166
1167 /* Trigger an immediate transmit demand. */
1168 iowrite32(0, ioaddr + CSR1);
1169 }
1170
1171 spin_unlock_irqrestore(&tp->lock, flags);
1172 }
1173
1174 iowrite32(csr6, ioaddr + CSR6);
1175 }
1176
1177 #ifdef CONFIG_TULIP_MWI
1178 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1179 {
1180 struct tulip_private *tp = netdev_priv(dev);
1181 u8 cache;
1182 u16 pci_command;
1183 u32 csr0;
1184
1185 if (tulip_debug > 3)
1186 netdev_dbg(dev, "tulip_mwi_config()\n");
1187
1188 tp->csr0 = csr0 = 0;
1189
1190 /* if we have any cache line size at all, we can do MRM and MWI */
1191 csr0 |= MRM | MWI;
1192
1193 /* Enable MWI in the standard PCI command bit.
1194 * Check for the case where MWI is desired but not available
1195 */
1196 pci_try_set_mwi(pdev);
1197
1198 /* read result from hardware (in case bit refused to enable) */
1199 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1200 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1201 csr0 &= ~MWI;
1202
1203 /* if cache line size hardwired to zero, no MWI */
1204 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1205 if ((csr0 & MWI) && (cache == 0)) {
1206 csr0 &= ~MWI;
1207 pci_clear_mwi(pdev);
1208 }
1209
1210 /* assign per-cacheline-size cache alignment and
1211 * burst length values
1212 */
1213 switch (cache) {
1214 case 8:
1215 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1216 break;
1217 case 16:
1218 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1219 break;
1220 case 32:
1221 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1222 break;
1223 default:
1224 cache = 0;
1225 break;
1226 }
1227
1228 /* if we have a good cache line size, we by now have a good
1229 * csr0, so save it and exit
1230 */
1231 if (cache)
1232 goto out;
1233
1234 /* we don't have a good csr0 or cache line size, disable MWI */
1235 if (csr0 & MWI) {
1236 pci_clear_mwi(pdev);
1237 csr0 &= ~MWI;
1238 }
1239
1240 /* sane defaults for burst length and cache alignment
1241 * originally from de4x5 driver
1242 */
1243 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1244
1245 out:
1246 tp->csr0 = csr0;
1247 if (tulip_debug > 2)
1248 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1249 cache, csr0);
1250 }
1251 #endif
1252
1253 /*
1254 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1255 * is the DM910X and the on chip ULi devices
1256 */
1257
1258 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1259 {
1260 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1261 return 1;
1262 return 0;
1263 }
1264
1265 static const struct net_device_ops tulip_netdev_ops = {
1266 .ndo_open = tulip_open,
1267 .ndo_start_xmit = tulip_start_xmit,
1268 .ndo_tx_timeout = tulip_tx_timeout,
1269 .ndo_stop = tulip_close,
1270 .ndo_get_stats = tulip_get_stats,
1271 .ndo_do_ioctl = private_ioctl,
1272 .ndo_set_rx_mode = set_rx_mode,
1273 .ndo_set_mac_address = eth_mac_addr,
1274 .ndo_validate_addr = eth_validate_addr,
1275 #ifdef CONFIG_NET_POLL_CONTROLLER
1276 .ndo_poll_controller = poll_tulip,
1277 #endif
1278 };
1279
1280 static const struct pci_device_id early_486_chipsets[] = {
1281 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1282 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1283 { },
1284 };
1285
1286 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1287 {
1288 struct tulip_private *tp;
1289 /* See note below on the multiport cards. */
1290 static unsigned char last_phys_addr[ETH_ALEN] = {
1291 0x00, 'L', 'i', 'n', 'u', 'x'
1292 };
1293 static int last_irq;
1294 int i, irq;
1295 unsigned short sum;
1296 unsigned char *ee_data;
1297 struct net_device *dev;
1298 void __iomem *ioaddr;
1299 static int board_idx = -1;
1300 int chip_idx = ent->driver_data;
1301 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1302 unsigned int eeprom_missing = 0;
1303 unsigned int force_csr0 = 0;
1304
1305 board_idx++;
1306
1307 /*
1308 * Lan media wire a tulip chip to a wan interface. Needs a very
1309 * different driver (lmc driver)
1310 */
1311
1312 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1313 pr_err("skipping LMC card\n");
1314 return -ENODEV;
1315 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1316 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1317 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1318 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1319 pr_err("skipping SBE T3E3 port\n");
1320 return -ENODEV;
1321 }
1322
1323 /*
1324 * DM910x chips should be handled by the dmfe driver, except
1325 * on-board chips on SPARC systems. Also, early DM9100s need
1326 * software CRC which only the dmfe driver supports.
1327 */
1328
1329 #ifdef CONFIG_TULIP_DM910X
1330 if (chip_idx == DM910X) {
1331 struct device_node *dp;
1332
1333 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1334 pdev->revision < 0x30) {
1335 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1336 return -ENODEV;
1337 }
1338
1339 dp = pci_device_to_OF_node(pdev);
1340 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1341 pr_info("skipping DM910x expansion card (use dmfe)\n");
1342 return -ENODEV;
1343 }
1344 }
1345 #endif
1346
1347 /*
1348 * Looks for early PCI chipsets where people report hangs
1349 * without the workarounds being on.
1350 */
1351
1352 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1353 aligned. Aries might need this too. The Saturn errata are not
1354 pretty reading but thankfully it's an old 486 chipset.
1355
1356 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1357 Saturn.
1358 */
1359
1360 if (pci_dev_present(early_486_chipsets)) {
1361 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1362 force_csr0 = 1;
1363 }
1364
1365 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1366 if (chip_idx == AX88140) {
1367 if ((csr0 & 0x3f00) == 0)
1368 csr0 |= 0x2000;
1369 }
1370
1371 /* PNIC doesn't have MWI/MRL/MRM... */
1372 if (chip_idx == LC82C168)
1373 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1374
1375 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1376 if (tulip_uli_dm_quirk(pdev)) {
1377 csr0 &= ~0x01f100ff;
1378 #if defined(CONFIG_SPARC)
1379 csr0 = (csr0 & ~0xff00) | 0xe000;
1380 #endif
1381 }
1382 /*
1383 * And back to business
1384 */
1385
1386 i = pci_enable_device(pdev);
1387 if (i) {
1388 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1389 return i;
1390 }
1391
1392 irq = pdev->irq;
1393
1394 /* alloc_etherdev ensures aligned and zeroed private structures */
1395 dev = alloc_etherdev (sizeof (*tp));
1396 if (!dev)
1397 return -ENOMEM;
1398
1399 SET_NETDEV_DEV(dev, &pdev->dev);
1400 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1401 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1402 pci_name(pdev),
1403 (unsigned long long)pci_resource_len (pdev, 0),
1404 (unsigned long long)pci_resource_start (pdev, 0));
1405 goto err_out_free_netdev;
1406 }
1407
1408 /* grab all resources from both PIO and MMIO regions, as we
1409 * don't want anyone else messing around with our hardware */
1410 if (pci_request_regions (pdev, DRV_NAME))
1411 goto err_out_free_netdev;
1412
1413 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1414
1415 if (!ioaddr)
1416 goto err_out_free_res;
1417
1418 /*
1419 * initialize private data structure 'tp'
1420 * it is zeroed and aligned in alloc_etherdev
1421 */
1422 tp = netdev_priv(dev);
1423 tp->dev = dev;
1424
1425 tp->rx_ring = pci_alloc_consistent(pdev,
1426 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1427 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1428 &tp->rx_ring_dma);
1429 if (!tp->rx_ring)
1430 goto err_out_mtable;
1431 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1432 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1433
1434 tp->chip_id = chip_idx;
1435 tp->flags = tulip_tbl[chip_idx].flags;
1436
1437 tp->wolinfo.supported = 0;
1438 tp->wolinfo.wolopts = 0;
1439 /* COMET: Enable power management only for AN983B */
1440 if (chip_idx == COMET ) {
1441 u32 sig;
1442 pci_read_config_dword (pdev, 0x80, &sig);
1443 if (sig == 0x09811317) {
1444 tp->flags |= COMET_PM;
1445 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1446 pr_info("%s: Enabled WOL support for AN983B\n",
1447 __func__);
1448 }
1449 }
1450 tp->pdev = pdev;
1451 tp->base_addr = ioaddr;
1452 tp->revision = pdev->revision;
1453 tp->csr0 = csr0;
1454 spin_lock_init(&tp->lock);
1455 spin_lock_init(&tp->mii_lock);
1456 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
1457
1458 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1459
1460 #ifdef CONFIG_TULIP_MWI
1461 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1462 tulip_mwi_config (pdev, dev);
1463 #endif
1464
1465 /* Stop the chip's Tx and Rx processes. */
1466 tulip_stop_rxtx(tp);
1467
1468 pci_set_master(pdev);
1469
1470 #ifdef CONFIG_GSC
1471 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1472 switch (pdev->subsystem_device) {
1473 default:
1474 break;
1475 case 0x1061:
1476 case 0x1062:
1477 case 0x1063:
1478 case 0x1098:
1479 case 0x1099:
1480 case 0x10EE:
1481 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1482 chip_name = "GSC DS21140 Tulip";
1483 }
1484 }
1485 #endif
1486
1487 /* Clear the missed-packet counter. */
1488 ioread32(ioaddr + CSR8);
1489
1490 /* The station address ROM is read byte serially. The register must
1491 be polled, waiting for the value to be read bit serially from the
1492 EEPROM.
1493 */
1494 ee_data = tp->eeprom;
1495 memset(ee_data, 0, sizeof(tp->eeprom));
1496 sum = 0;
1497 if (chip_idx == LC82C168) {
1498 for (i = 0; i < 3; i++) {
1499 int value, boguscnt = 100000;
1500 iowrite32(0x600 | i, ioaddr + 0x98);
1501 do {
1502 value = ioread32(ioaddr + CSR9);
1503 } while (value < 0 && --boguscnt > 0);
1504 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1505 sum += value & 0xffff;
1506 }
1507 } else if (chip_idx == COMET) {
1508 /* No need to read the EEPROM. */
1509 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1510 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1511 for (i = 0; i < 6; i ++)
1512 sum += dev->dev_addr[i];
1513 } else {
1514 /* A serial EEPROM interface, we read now and sort it out later. */
1515 int sa_offset = 0;
1516 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1517 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1518
1519 if (ee_max_addr > sizeof(tp->eeprom))
1520 ee_max_addr = sizeof(tp->eeprom);
1521
1522 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1523 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1524 ee_data[i] = data & 0xff;
1525 ee_data[i + 1] = data >> 8;
1526 }
1527
1528 /* DEC now has a specification (see Notes) but early board makers
1529 just put the address in the first EEPROM locations. */
1530 /* This does memcmp(ee_data, ee_data+16, 8) */
1531 for (i = 0; i < 8; i ++)
1532 if (ee_data[i] != ee_data[16+i])
1533 sa_offset = 20;
1534 if (chip_idx == CONEXANT) {
1535 /* Check that the tuple type and length is correct. */
1536 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1537 sa_offset = 0x19A;
1538 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1539 ee_data[2] == 0) {
1540 sa_offset = 2; /* Grrr, damn Matrox boards. */
1541 }
1542 #ifdef CONFIG_MIPS_COBALT
1543 if ((pdev->bus->number == 0) &&
1544 ((PCI_SLOT(pdev->devfn) == 7) ||
1545 (PCI_SLOT(pdev->devfn) == 12))) {
1546 /* Cobalt MAC address in first EEPROM locations. */
1547 sa_offset = 0;
1548 /* Ensure our media table fixup get's applied */
1549 memcpy(ee_data + 16, ee_data, 8);
1550 }
1551 #endif
1552 #ifdef CONFIG_GSC
1553 /* Check to see if we have a broken srom */
1554 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1555 /* pci_vendor_id and subsystem_id are swapped */
1556 ee_data[0] = ee_data[2];
1557 ee_data[1] = ee_data[3];
1558 ee_data[2] = 0x61;
1559 ee_data[3] = 0x10;
1560
1561 /* HSC-PCI boards need to be byte-swaped and shifted
1562 * up 1 word. This shift needs to happen at the end
1563 * of the MAC first because of the 2 byte overlap.
1564 */
1565 for (i = 4; i >= 0; i -= 2) {
1566 ee_data[17 + i + 3] = ee_data[17 + i];
1567 ee_data[16 + i + 5] = ee_data[16 + i];
1568 }
1569 }
1570 #endif
1571
1572 for (i = 0; i < 6; i ++) {
1573 dev->dev_addr[i] = ee_data[i + sa_offset];
1574 sum += ee_data[i + sa_offset];
1575 }
1576 }
1577 /* Lite-On boards have the address byte-swapped. */
1578 if ((dev->dev_addr[0] == 0xA0 ||
1579 dev->dev_addr[0] == 0xC0 ||
1580 dev->dev_addr[0] == 0x02) &&
1581 dev->dev_addr[1] == 0x00)
1582 for (i = 0; i < 6; i+=2) {
1583 char tmp = dev->dev_addr[i];
1584 dev->dev_addr[i] = dev->dev_addr[i+1];
1585 dev->dev_addr[i+1] = tmp;
1586 }
1587 /* On the Zynx 315 Etherarray and other multiport boards only the
1588 first Tulip has an EEPROM.
1589 On Sparc systems the mac address is held in the OBP property
1590 "local-mac-address".
1591 The addresses of the subsequent ports are derived from the first.
1592 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1593 that here as well. */
1594 if (sum == 0 || sum == 6*0xff) {
1595 #if defined(CONFIG_SPARC)
1596 struct device_node *dp = pci_device_to_OF_node(pdev);
1597 const unsigned char *addr;
1598 int len;
1599 #endif
1600 eeprom_missing = 1;
1601 for (i = 0; i < 5; i++)
1602 dev->dev_addr[i] = last_phys_addr[i];
1603 dev->dev_addr[i] = last_phys_addr[i] + 1;
1604 #if defined(CONFIG_SPARC)
1605 addr = of_get_property(dp, "local-mac-address", &len);
1606 if (addr && len == ETH_ALEN)
1607 memcpy(dev->dev_addr, addr, ETH_ALEN);
1608 #endif
1609 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1610 if (last_irq)
1611 irq = last_irq;
1612 #endif
1613 }
1614
1615 for (i = 0; i < 6; i++)
1616 last_phys_addr[i] = dev->dev_addr[i];
1617 last_irq = irq;
1618
1619 /* The lower four bits are the media type. */
1620 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1621 if (options[board_idx] & MEDIA_MASK)
1622 tp->default_port = options[board_idx] & MEDIA_MASK;
1623 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1624 tp->full_duplex = 1;
1625 if (mtu[board_idx] > 0)
1626 dev->mtu = mtu[board_idx];
1627 }
1628 if (dev->mem_start & MEDIA_MASK)
1629 tp->default_port = dev->mem_start & MEDIA_MASK;
1630 if (tp->default_port) {
1631 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1632 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1633 tp->medialock = 1;
1634 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1635 tp->full_duplex = 1;
1636 }
1637 if (tp->full_duplex)
1638 tp->full_duplex_lock = 1;
1639
1640 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1641 static const u16 media2advert[] = {
1642 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1643 };
1644 tp->mii_advertise = media2advert[tp->default_port - 9];
1645 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1646 }
1647
1648 if (tp->flags & HAS_MEDIA_TABLE) {
1649 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1650 tulip_parse_eeprom(dev);
1651 strcpy(dev->name, "eth%d"); /* un-hack */
1652 }
1653
1654 if ((tp->flags & ALWAYS_CHECK_MII) ||
1655 (tp->mtable && tp->mtable->has_mii) ||
1656 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1657 if (tp->mtable && tp->mtable->has_mii) {
1658 for (i = 0; i < tp->mtable->leafcount; i++)
1659 if (tp->mtable->mleaf[i].media == 11) {
1660 tp->cur_index = i;
1661 tp->saved_if_port = dev->if_port;
1662 tulip_select_media(dev, 2);
1663 dev->if_port = tp->saved_if_port;
1664 break;
1665 }
1666 }
1667
1668 /* Find the connected MII xcvrs.
1669 Doing this in open() would allow detecting external xcvrs
1670 later, but takes much time. */
1671 tulip_find_mii (dev, board_idx);
1672 }
1673
1674 /* The Tulip-specific entries in the device structure. */
1675 dev->netdev_ops = &tulip_netdev_ops;
1676 dev->watchdog_timeo = TX_TIMEOUT;
1677 #ifdef CONFIG_TULIP_NAPI
1678 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1679 #endif
1680 dev->ethtool_ops = &ops;
1681
1682 if (register_netdev(dev))
1683 goto err_out_free_ring;
1684
1685 pci_set_drvdata(pdev, dev);
1686
1687 dev_info(&dev->dev,
1688 #ifdef CONFIG_TULIP_MMIO
1689 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1690 #else
1691 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1692 #endif
1693 chip_name, pdev->revision,
1694 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1695 eeprom_missing ? " EEPROM not present," : "",
1696 dev->dev_addr, irq);
1697
1698 if (tp->chip_id == PNIC2)
1699 tp->link_change = pnic2_lnk_change;
1700 else if (tp->flags & HAS_NWAY)
1701 tp->link_change = t21142_lnk_change;
1702 else if (tp->flags & HAS_PNICNWAY)
1703 tp->link_change = pnic_lnk_change;
1704
1705 /* Reset the xcvr interface and turn on heartbeat. */
1706 switch (chip_idx) {
1707 case DC21140:
1708 case DM910X:
1709 default:
1710 if (tp->mtable)
1711 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1712 break;
1713 case DC21142:
1714 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1715 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1716 iowrite32(0x0000, ioaddr + CSR13);
1717 iowrite32(0x0000, ioaddr + CSR14);
1718 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1719 } else
1720 t21142_start_nway(dev);
1721 break;
1722 case PNIC2:
1723 /* just do a reset for sanity sake */
1724 iowrite32(0x0000, ioaddr + CSR13);
1725 iowrite32(0x0000, ioaddr + CSR14);
1726 break;
1727 case LC82C168:
1728 if ( ! tp->mii_cnt) {
1729 tp->nway = 1;
1730 tp->nwayset = 0;
1731 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1732 iowrite32(0x30, ioaddr + CSR12);
1733 iowrite32(0x0001F078, ioaddr + CSR6);
1734 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1735 }
1736 break;
1737 case MX98713:
1738 case COMPEX9881:
1739 iowrite32(0x00000000, ioaddr + CSR6);
1740 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1741 iowrite32(0x00000001, ioaddr + CSR13);
1742 break;
1743 case MX98715:
1744 case MX98725:
1745 iowrite32(0x01a80000, ioaddr + CSR6);
1746 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1747 iowrite32(0x00001000, ioaddr + CSR12);
1748 break;
1749 case COMET:
1750 /* No initialization necessary. */
1751 break;
1752 }
1753
1754 /* put the chip in snooze mode until opened */
1755 tulip_set_power_state (tp, 0, 1);
1756
1757 return 0;
1758
1759 err_out_free_ring:
1760 pci_free_consistent (pdev,
1761 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1762 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1763 tp->rx_ring, tp->rx_ring_dma);
1764
1765 err_out_mtable:
1766 kfree (tp->mtable);
1767 pci_iounmap(pdev, ioaddr);
1768
1769 err_out_free_res:
1770 pci_release_regions (pdev);
1771
1772 err_out_free_netdev:
1773 free_netdev (dev);
1774 return -ENODEV;
1775 }
1776
1777
1778 /* set the registers according to the given wolopts */
1779 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1780 {
1781 struct net_device *dev = pci_get_drvdata(pdev);
1782 struct tulip_private *tp = netdev_priv(dev);
1783 void __iomem *ioaddr = tp->base_addr;
1784
1785 if (tp->flags & COMET_PM) {
1786 unsigned int tmp;
1787
1788 tmp = ioread32(ioaddr + CSR18);
1789 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1790 tmp |= comet_csr18_pm_mode;
1791 iowrite32(tmp, ioaddr + CSR18);
1792
1793 /* Set the Wake-up Control/Status Register to the given WOL options*/
1794 tmp = ioread32(ioaddr + CSR13);
1795 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1796 if (wolopts & WAKE_MAGIC)
1797 tmp |= comet_csr13_mpre;
1798 if (wolopts & WAKE_PHY)
1799 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1800 /* Clear the event flags */
1801 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1802 iowrite32(tmp, ioaddr + CSR13);
1803 }
1804 }
1805
1806 static int __maybe_unused tulip_suspend(struct device *dev_d)
1807 {
1808 struct net_device *dev = dev_get_drvdata(dev_d);
1809 struct tulip_private *tp = netdev_priv(dev);
1810
1811 if (!dev)
1812 return -EINVAL;
1813
1814 if (!netif_running(dev))
1815 goto save_state;
1816
1817 tulip_down(dev);
1818
1819 netif_device_detach(dev);
1820 /* FIXME: it needlessly adds an error path. */
1821 free_irq(tp->pdev->irq, dev);
1822
1823 save_state:
1824 tulip_set_wolopts(to_pci_dev(dev_d), tp->wolinfo.wolopts);
1825 device_set_wakeup_enable(dev_d, !!tp->wolinfo.wolopts);
1826
1827 return 0;
1828 }
1829
1830 static int __maybe_unused tulip_resume(struct device *dev_d)
1831 {
1832 struct pci_dev *pdev = to_pci_dev(dev_d);
1833 struct net_device *dev = dev_get_drvdata(dev_d);
1834 struct tulip_private *tp = netdev_priv(dev);
1835 void __iomem *ioaddr = tp->base_addr;
1836 unsigned int tmp;
1837 int retval = 0;
1838
1839 if (!dev)
1840 return -EINVAL;
1841
1842 if (!netif_running(dev))
1843 return 0;
1844
1845 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1846 dev->name, dev);
1847 if (retval) {
1848 pr_err("request_irq failed in resume\n");
1849 return retval;
1850 }
1851
1852 if (tp->flags & COMET_PM) {
1853 device_set_wakeup_enable(dev_d, 0);
1854
1855 /* Clear the PMES flag */
1856 tmp = ioread32(ioaddr + CSR20);
1857 tmp |= comet_csr20_pmes;
1858 iowrite32(tmp, ioaddr + CSR20);
1859
1860 /* Disable all wake-up events */
1861 tulip_set_wolopts(pdev, 0);
1862 }
1863 netif_device_attach(dev);
1864
1865 if (netif_running(dev))
1866 tulip_up(dev);
1867
1868 return 0;
1869 }
1870
1871 static void tulip_remove_one(struct pci_dev *pdev)
1872 {
1873 struct net_device *dev = pci_get_drvdata (pdev);
1874 struct tulip_private *tp;
1875
1876 if (!dev)
1877 return;
1878
1879 tp = netdev_priv(dev);
1880 unregister_netdev(dev);
1881 pci_free_consistent (pdev,
1882 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1883 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1884 tp->rx_ring, tp->rx_ring_dma);
1885 kfree (tp->mtable);
1886 pci_iounmap(pdev, tp->base_addr);
1887 free_netdev (dev);
1888 pci_release_regions (pdev);
1889 pci_disable_device(pdev);
1890
1891 /* pci_power_off (pdev, -1); */
1892 }
1893
1894 #ifdef CONFIG_NET_POLL_CONTROLLER
1895 /*
1896 * Polling 'interrupt' - used by things like netconsole to send skbs
1897 * without having to re-enable interrupts. It's not called while
1898 * the interrupt routine is executing.
1899 */
1900
1901 static void poll_tulip (struct net_device *dev)
1902 {
1903 struct tulip_private *tp = netdev_priv(dev);
1904 const int irq = tp->pdev->irq;
1905
1906 /* disable_irq here is not very nice, but with the lockless
1907 interrupt handler we have no other choice. */
1908 disable_irq(irq);
1909 tulip_interrupt (irq, dev);
1910 enable_irq(irq);
1911 }
1912 #endif
1913
1914 static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume);
1915
1916 static struct pci_driver tulip_driver = {
1917 .name = DRV_NAME,
1918 .id_table = tulip_pci_tbl,
1919 .probe = tulip_init_one,
1920 .remove = tulip_remove_one,
1921 .driver.pm = &tulip_pm_ops,
1922 };
1923
1924
1925 static int __init tulip_init (void)
1926 {
1927 if (!csr0) {
1928 pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1929 /* default to 8 longword cache line alignment */
1930 csr0 = 0x00A00000 | 0x4800;
1931 }
1932
1933 /* copy module parms into globals */
1934 tulip_rx_copybreak = rx_copybreak;
1935 tulip_max_interrupt_work = max_interrupt_work;
1936
1937 /* probe for and init boards */
1938 return pci_register_driver(&tulip_driver);
1939 }
1940
1941
1942 static void __exit tulip_cleanup (void)
1943 {
1944 pci_unregister_driver (&tulip_driver);
1945 }
1946
1947
1948 module_init(tulip_init);
1949 module_exit(tulip_cleanup);