]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This code is derived from the VIA reference driver (copyright message | |
3 | * below) provided to Red Hat by VIA Networking Technologies, Inc. for | |
4 | * addition to the Linux kernel. | |
5 | * | |
6 | * The code has been merged into one source file, cleaned up to follow | |
7 | * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned | |
8 | * for 64bit hardware platforms. | |
9 | * | |
10 | * TODO | |
1da177e4 | 11 | * rx_copybreak/alignment |
1da177e4 LT |
12 | * More testing |
13 | * | |
113aa838 | 14 | * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
15 | * Additional fixes and clean up: Francois Romieu |
16 | * | |
17 | * This source has not been verified for use in safety critical systems. | |
18 | * | |
19 | * Please direct queries about the revamped driver to the linux-kernel | |
20 | * list not VIA. | |
21 | * | |
22 | * Original code: | |
23 | * | |
24 | * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. | |
25 | * All rights reserved. | |
26 | * | |
27 | * This software may be redistributed and/or modified under | |
28 | * the terms of the GNU General Public License as published by the Free | |
29 | * Software Foundation; either version 2 of the License, or | |
30 | * any later version. | |
31 | * | |
32 | * This program is distributed in the hope that it will be useful, but | |
33 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
34 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
35 | * for more details. | |
36 | * | |
37 | * Author: Chuang Liang-Shing, AJ Jiang | |
38 | * | |
39 | * Date: Jan 24, 2003 | |
40 | * | |
41 | * MODULE_LICENSE("GPL"); | |
42 | * | |
43 | */ | |
44 | ||
1da177e4 LT |
45 | #include <linux/module.h> |
46 | #include <linux/types.h> | |
73b54688 | 47 | #include <linux/bitops.h> |
1da177e4 | 48 | #include <linux/init.h> |
e2c41f14 | 49 | #include <linux/dma-mapping.h> |
1da177e4 LT |
50 | #include <linux/mm.h> |
51 | #include <linux/errno.h> | |
52 | #include <linux/ioport.h> | |
53 | #include <linux/pci.h> | |
54 | #include <linux/kernel.h> | |
55 | #include <linux/netdevice.h> | |
56 | #include <linux/etherdevice.h> | |
57 | #include <linux/skbuff.h> | |
58 | #include <linux/delay.h> | |
59 | #include <linux/timer.h> | |
60 | #include <linux/slab.h> | |
61 | #include <linux/interrupt.h> | |
1da177e4 LT |
62 | #include <linux/string.h> |
63 | #include <linux/wait.h> | |
c4067400 | 64 | #include <linux/io.h> |
1da177e4 | 65 | #include <linux/if.h> |
c4067400 | 66 | #include <linux/uaccess.h> |
1da177e4 | 67 | #include <linux/proc_fs.h> |
6dffbe53 TP |
68 | #include <linux/of_address.h> |
69 | #include <linux/of_device.h> | |
70 | #include <linux/of_irq.h> | |
1da177e4 | 71 | #include <linux/inetdevice.h> |
6dffbe53 | 72 | #include <linux/platform_device.h> |
1da177e4 LT |
73 | #include <linux/reboot.h> |
74 | #include <linux/ethtool.h> | |
75 | #include <linux/mii.h> | |
76 | #include <linux/in.h> | |
77 | #include <linux/if_arp.h> | |
501e4d24 | 78 | #include <linux/if_vlan.h> |
1da177e4 LT |
79 | #include <linux/ip.h> |
80 | #include <linux/tcp.h> | |
81 | #include <linux/udp.h> | |
82 | #include <linux/crc-ccitt.h> | |
83 | #include <linux/crc32.h> | |
84 | ||
85 | #include "via-velocity.h" | |
86 | ||
6dffbe53 TP |
87 | enum velocity_bus_type { |
88 | BUS_PCI, | |
89 | BUS_PLATFORM, | |
90 | }; | |
1da177e4 | 91 | |
c4067400 | 92 | static int velocity_nics; |
1da177e4 LT |
93 | static int msglevel = MSG_LEVEL_INFO; |
94 | ||
6dffbe53 TP |
95 | static void velocity_set_power_state(struct velocity_info *vptr, char state) |
96 | { | |
97 | void *addr = vptr->mac_regs; | |
98 | ||
99 | if (vptr->pdev) | |
100 | pci_set_power_state(vptr->pdev, state); | |
101 | else | |
102 | writeb(state, addr + 0x154); | |
103 | } | |
104 | ||
01faccbf SH |
105 | /** |
106 | * mac_get_cam_mask - Read a CAM mask | |
107 | * @regs: register block for this velocity | |
108 | * @mask: buffer to store mask | |
109 | * | |
110 | * Fetch the mask bits of the selected CAM and store them into the | |
111 | * provided mask buffer. | |
112 | */ | |
c4067400 | 113 | static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask) |
01faccbf SH |
114 | { |
115 | int i; | |
116 | ||
117 | /* Select CAM mask */ | |
118 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
119 | ||
120 | writeb(0, ®s->CAMADDR); | |
121 | ||
122 | /* read mask */ | |
123 | for (i = 0; i < 8; i++) | |
124 | *mask++ = readb(&(regs->MARCAM[i])); | |
125 | ||
126 | /* disable CAMEN */ | |
127 | writeb(0, ®s->CAMADDR); | |
128 | ||
129 | /* Select mar */ | |
130 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
01faccbf SH |
131 | } |
132 | ||
01faccbf SH |
133 | /** |
134 | * mac_set_cam_mask - Set a CAM mask | |
135 | * @regs: register block for this velocity | |
136 | * @mask: CAM mask to load | |
137 | * | |
138 | * Store a new mask into a CAM | |
139 | */ | |
c4067400 | 140 | static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask) |
01faccbf SH |
141 | { |
142 | int i; | |
143 | /* Select CAM mask */ | |
144 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
145 | ||
146 | writeb(CAMADDR_CAMEN, ®s->CAMADDR); | |
147 | ||
c4067400 | 148 | for (i = 0; i < 8; i++) |
01faccbf | 149 | writeb(*mask++, &(regs->MARCAM[i])); |
c4067400 | 150 | |
01faccbf SH |
151 | /* disable CAMEN */ |
152 | writeb(0, ®s->CAMADDR); | |
153 | ||
154 | /* Select mar */ | |
155 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
156 | } | |
157 | ||
c4067400 | 158 | static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask) |
01faccbf SH |
159 | { |
160 | int i; | |
161 | /* Select CAM mask */ | |
162 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
163 | ||
164 | writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); | |
165 | ||
c4067400 | 166 | for (i = 0; i < 8; i++) |
01faccbf | 167 | writeb(*mask++, &(regs->MARCAM[i])); |
c4067400 | 168 | |
01faccbf SH |
169 | /* disable CAMEN */ |
170 | writeb(0, ®s->CAMADDR); | |
171 | ||
172 | /* Select mar */ | |
173 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
174 | } | |
175 | ||
176 | /** | |
177 | * mac_set_cam - set CAM data | |
178 | * @regs: register block of this velocity | |
179 | * @idx: Cam index | |
180 | * @addr: 2 or 6 bytes of CAM data | |
181 | * | |
182 | * Load an address or vlan tag into a CAM | |
183 | */ | |
c4067400 | 184 | static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr) |
01faccbf SH |
185 | { |
186 | int i; | |
187 | ||
188 | /* Select CAM mask */ | |
189 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
190 | ||
191 | idx &= (64 - 1); | |
192 | ||
193 | writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); | |
194 | ||
c4067400 | 195 | for (i = 0; i < 6; i++) |
01faccbf | 196 | writeb(*addr++, &(regs->MARCAM[i])); |
c4067400 | 197 | |
01faccbf SH |
198 | BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); |
199 | ||
200 | udelay(10); | |
201 | ||
202 | writeb(0, ®s->CAMADDR); | |
203 | ||
204 | /* Select mar */ | |
205 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
206 | } | |
207 | ||
c4067400 | 208 | static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx, |
01faccbf SH |
209 | const u8 *addr) |
210 | { | |
211 | ||
212 | /* Select CAM mask */ | |
213 | BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
214 | ||
215 | idx &= (64 - 1); | |
216 | ||
217 | writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); | |
218 | writew(*((u16 *) addr), ®s->MARCAM[0]); | |
219 | ||
220 | BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); | |
221 | ||
222 | udelay(10); | |
223 | ||
224 | writeb(0, ®s->CAMADDR); | |
225 | ||
226 | /* Select mar */ | |
227 | BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); | |
228 | } | |
229 | ||
230 | ||
231 | /** | |
232 | * mac_wol_reset - reset WOL after exiting low power | |
233 | * @regs: register block of this velocity | |
234 | * | |
235 | * Called after we drop out of wake on lan mode in order to | |
236 | * reset the Wake on lan features. This function doesn't restore | |
237 | * the rest of the logic from the result of sleep/wakeup | |
238 | */ | |
c4067400 | 239 | static void mac_wol_reset(struct mac_regs __iomem *regs) |
01faccbf SH |
240 | { |
241 | ||
242 | /* Turn off SWPTAG right after leaving power mode */ | |
243 | BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); | |
244 | /* clear sticky bits */ | |
245 | BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); | |
246 | ||
247 | BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); | |
248 | BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); | |
249 | /* disable force PME-enable */ | |
250 | writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); | |
251 | /* disable power-event config bit */ | |
252 | writew(0xFFFF, ®s->WOLCRClr); | |
253 | /* clear power status */ | |
254 | writew(0xFFFF, ®s->WOLSRClr); | |
255 | } | |
1da177e4 | 256 | |
7282d491 | 257 | static const struct ethtool_ops velocity_ethtool_ops; |
1da177e4 LT |
258 | |
259 | /* | |
260 | Define module options | |
261 | */ | |
262 | ||
263 | MODULE_AUTHOR("VIA Networking Technologies, Inc."); | |
264 | MODULE_LICENSE("GPL"); | |
265 | MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"); | |
266 | ||
c4067400 DJ |
267 | #define VELOCITY_PARAM(N, D) \ |
268 | static int N[MAX_UNITS] = OPTION_DEFAULT;\ | |
1da177e4 | 269 | module_param_array(N, int, NULL, 0); \ |
c4067400 | 270 | MODULE_PARM_DESC(N, D); |
1da177e4 LT |
271 | |
272 | #define RX_DESC_MIN 64 | |
273 | #define RX_DESC_MAX 255 | |
274 | #define RX_DESC_DEF 64 | |
275 | VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); | |
276 | ||
277 | #define TX_DESC_MIN 16 | |
278 | #define TX_DESC_MAX 256 | |
279 | #define TX_DESC_DEF 64 | |
280 | VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); | |
281 | ||
1da177e4 LT |
282 | #define RX_THRESH_MIN 0 |
283 | #define RX_THRESH_MAX 3 | |
284 | #define RX_THRESH_DEF 0 | |
285 | /* rx_thresh[] is used for controlling the receive fifo threshold. | |
286 | 0: indicate the rxfifo threshold is 128 bytes. | |
287 | 1: indicate the rxfifo threshold is 512 bytes. | |
288 | 2: indicate the rxfifo threshold is 1024 bytes. | |
289 | 3: indicate the rxfifo threshold is store & forward. | |
290 | */ | |
291 | VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); | |
292 | ||
293 | #define DMA_LENGTH_MIN 0 | |
294 | #define DMA_LENGTH_MAX 7 | |
2a5774f7 | 295 | #define DMA_LENGTH_DEF 6 |
1da177e4 LT |
296 | |
297 | /* DMA_length[] is used for controlling the DMA length | |
298 | 0: 8 DWORDs | |
299 | 1: 16 DWORDs | |
300 | 2: 32 DWORDs | |
301 | 3: 64 DWORDs | |
302 | 4: 128 DWORDs | |
303 | 5: 256 DWORDs | |
304 | 6: SF(flush till emply) | |
305 | 7: SF(flush till emply) | |
306 | */ | |
307 | VELOCITY_PARAM(DMA_length, "DMA length"); | |
308 | ||
1da177e4 LT |
309 | #define IP_ALIG_DEF 0 |
310 | /* IP_byte_align[] is used for IP header DWORD byte aligned | |
311 | 0: indicate the IP header won't be DWORD byte aligned.(Default) . | |
312 | 1: indicate the IP header will be DWORD byte aligned. | |
25985edc | 313 | In some environment, the IP header should be DWORD byte aligned, |
1da177e4 LT |
314 | or the packet will be droped when we receive it. (eg: IPVS) |
315 | */ | |
316 | VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); | |
317 | ||
1da177e4 LT |
318 | #define FLOW_CNTL_DEF 1 |
319 | #define FLOW_CNTL_MIN 1 | |
320 | #define FLOW_CNTL_MAX 5 | |
321 | ||
322 | /* flow_control[] is used for setting the flow control ability of NIC. | |
323 | 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. | |
324 | 2: enable TX flow control. | |
325 | 3: enable RX flow control. | |
326 | 4: enable RX/TX flow control. | |
327 | 5: disable | |
328 | */ | |
329 | VELOCITY_PARAM(flow_control, "Enable flow control ability"); | |
330 | ||
331 | #define MED_LNK_DEF 0 | |
332 | #define MED_LNK_MIN 0 | |
15419227 | 333 | #define MED_LNK_MAX 5 |
1da177e4 LT |
334 | /* speed_duplex[] is used for setting the speed and duplex mode of NIC. |
335 | 0: indicate autonegotiation for both speed and duplex mode | |
336 | 1: indicate 100Mbps half duplex mode | |
337 | 2: indicate 100Mbps full duplex mode | |
338 | 3: indicate 10Mbps half duplex mode | |
339 | 4: indicate 10Mbps full duplex mode | |
15419227 | 340 | 5: indicate 1000Mbps full duplex mode |
1da177e4 LT |
341 | |
342 | Note: | |
c4067400 DJ |
343 | if EEPROM have been set to the force mode, this option is ignored |
344 | by driver. | |
1da177e4 LT |
345 | */ |
346 | VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); | |
347 | ||
348 | #define VAL_PKT_LEN_DEF 0 | |
349 | /* ValPktLen[] is used for setting the checksum offload ability of NIC. | |
350 | 0: Receive frame with invalid layer 2 length (Default) | |
351 | 1: Drop frame with invalid layer 2 length | |
352 | */ | |
353 | VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); | |
354 | ||
355 | #define WOL_OPT_DEF 0 | |
356 | #define WOL_OPT_MIN 0 | |
357 | #define WOL_OPT_MAX 7 | |
358 | /* wol_opts[] is used for controlling wake on lan behavior. | |
359 | 0: Wake up if recevied a magic packet. (Default) | |
360 | 1: Wake up if link status is on/off. | |
361 | 2: Wake up if recevied an arp packet. | |
362 | 4: Wake up if recevied any unicast packet. | |
363 | Those value can be sumed up to support more than one option. | |
364 | */ | |
365 | VELOCITY_PARAM(wol_opts, "Wake On Lan options"); | |
366 | ||
1da177e4 LT |
367 | static int rx_copybreak = 200; |
368 | module_param(rx_copybreak, int, 0644); | |
369 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | |
370 | ||
1da177e4 LT |
371 | /* |
372 | * Internal board variants. At the moment we have only one | |
373 | */ | |
4f14b92f | 374 | static struct velocity_info_tbl chip_info_table[] = { |
cabb7667 JG |
375 | {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL}, |
376 | { } | |
1da177e4 LT |
377 | }; |
378 | ||
379 | /* | |
380 | * Describe the PCI device identifiers that we support in this | |
381 | * device driver. Used for hotplug autoloading. | |
382 | */ | |
6dffbe53 | 383 | |
9baa3c34 | 384 | static const struct pci_device_id velocity_pci_id_table[] = { |
e54f4893 JG |
385 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, |
386 | { } | |
1da177e4 LT |
387 | }; |
388 | ||
6dffbe53 TP |
389 | MODULE_DEVICE_TABLE(pci, velocity_pci_id_table); |
390 | ||
391 | /** | |
392 | * Describe the OF device identifiers that we support in this | |
393 | * device driver. Used for devicetree nodes. | |
394 | */ | |
395 | static struct of_device_id velocity_of_ids[] = { | |
396 | { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] }, | |
397 | { /* Sentinel */ }, | |
398 | }; | |
399 | MODULE_DEVICE_TABLE(of, velocity_of_ids); | |
1da177e4 LT |
400 | |
401 | /** | |
402 | * get_chip_name - identifier to name | |
403 | * @id: chip identifier | |
404 | * | |
405 | * Given a chip identifier return a suitable description. Returns | |
406 | * a pointer a static string valid while the driver is loaded. | |
407 | */ | |
27add006 | 408 | static const char *get_chip_name(enum chip_type chip_id) |
1da177e4 LT |
409 | { |
410 | int i; | |
411 | for (i = 0; chip_info_table[i].name != NULL; i++) | |
412 | if (chip_info_table[i].chip_id == chip_id) | |
413 | break; | |
414 | return chip_info_table[i].name; | |
415 | } | |
416 | ||
1da177e4 LT |
417 | /** |
418 | * velocity_set_int_opt - parser for integer options | |
419 | * @opt: pointer to option value | |
420 | * @val: value the user requested (or -1 for default) | |
421 | * @min: lowest value allowed | |
422 | * @max: highest value allowed | |
423 | * @def: default value | |
424 | * @name: property name | |
425 | * @dev: device name | |
426 | * | |
427 | * Set an integer property in the module options. This function does | |
428 | * all the verification and checking as well as reporting so that | |
429 | * we don't duplicate code for each option. | |
430 | */ | |
27add006 BP |
431 | static void velocity_set_int_opt(int *opt, int val, int min, int max, int def, |
432 | char *name, const char *devname) | |
1da177e4 LT |
433 | { |
434 | if (val == -1) | |
435 | *opt = def; | |
436 | else if (val < min || val > max) { | |
437 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", | |
438 | devname, name, min, max); | |
439 | *opt = def; | |
440 | } else { | |
441 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n", | |
442 | devname, name, val); | |
443 | *opt = val; | |
444 | } | |
445 | } | |
446 | ||
447 | /** | |
448 | * velocity_set_bool_opt - parser for boolean options | |
449 | * @opt: pointer to option value | |
450 | * @val: value the user requested (or -1 for default) | |
451 | * @def: default value (yes/no) | |
452 | * @flag: numeric value to set for true. | |
453 | * @name: property name | |
454 | * @dev: device name | |
455 | * | |
456 | * Set a boolean property in the module options. This function does | |
457 | * all the verification and checking as well as reporting so that | |
458 | * we don't duplicate code for each option. | |
459 | */ | |
27add006 BP |
460 | static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, |
461 | char *name, const char *devname) | |
1da177e4 LT |
462 | { |
463 | (*opt) &= (~flag); | |
464 | if (val == -1) | |
465 | *opt |= (def ? flag : 0); | |
466 | else if (val < 0 || val > 1) { | |
6aa20a22 | 467 | printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n", |
1da177e4 LT |
468 | devname, name); |
469 | *opt |= (def ? flag : 0); | |
470 | } else { | |
6aa20a22 | 471 | printk(KERN_INFO "%s: set parameter %s to %s\n", |
1da177e4 LT |
472 | devname, name, val ? "TRUE" : "FALSE"); |
473 | *opt |= (val ? flag : 0); | |
474 | } | |
475 | } | |
476 | ||
477 | /** | |
478 | * velocity_get_options - set options on device | |
479 | * @opts: option structure for the device | |
480 | * @index: index of option to use in module options array | |
481 | * @devname: device name | |
482 | * | |
483 | * Turn the module and command options into a single structure | |
484 | * for the current device | |
485 | */ | |
27add006 BP |
486 | static void velocity_get_options(struct velocity_opt *opts, int index, |
487 | const char *devname) | |
1da177e4 LT |
488 | { |
489 | ||
490 | velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); | |
491 | velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); | |
492 | velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); | |
493 | velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); | |
501e4d24 | 494 | |
1da177e4 LT |
495 | velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); |
496 | velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); | |
497 | velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); | |
498 | velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); | |
64699336 | 499 | velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); |
1da177e4 LT |
500 | opts->numrx = (opts->numrx & ~3); |
501 | } | |
502 | ||
503 | /** | |
504 | * velocity_init_cam_filter - initialise CAM | |
505 | * @vptr: velocity to program | |
506 | * | |
507 | * Initialize the content addressable memory used for filters. Load | |
508 | * appropriately according to the presence of VLAN | |
509 | */ | |
1da177e4 LT |
510 | static void velocity_init_cam_filter(struct velocity_info *vptr) |
511 | { | |
c4067400 | 512 | struct mac_regs __iomem *regs = vptr->mac_regs; |
73b54688 | 513 | unsigned int vid, i = 0; |
1da177e4 LT |
514 | |
515 | /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ | |
516 | WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); | |
517 | WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG); | |
518 | ||
519 | /* Disable all CAMs */ | |
520 | memset(vptr->vCAMmask, 0, sizeof(u8) * 8); | |
521 | memset(vptr->mCAMmask, 0, sizeof(u8) * 8); | |
01faccbf SH |
522 | mac_set_vlan_cam_mask(regs, vptr->vCAMmask); |
523 | mac_set_cam_mask(regs, vptr->mCAMmask); | |
1da177e4 | 524 | |
d4f73c8e | 525 | /* Enable VCAMs */ |
73b54688 JP |
526 | for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { |
527 | mac_set_vlan_cam(regs, i, (u8 *) &vid); | |
528 | vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); | |
529 | if (++i >= VCAM_SIZE) | |
530 | break; | |
531 | } | |
532 | mac_set_vlan_cam_mask(regs, vptr->vCAMmask); | |
d4f73c8e FR |
533 | } |
534 | ||
80d5c368 PM |
535 | static int velocity_vlan_rx_add_vid(struct net_device *dev, |
536 | __be16 proto, u16 vid) | |
501e4d24 SH |
537 | { |
538 | struct velocity_info *vptr = netdev_priv(dev); | |
539 | ||
c4067400 | 540 | spin_lock_irq(&vptr->lock); |
73b54688 | 541 | set_bit(vid, vptr->active_vlans); |
501e4d24 | 542 | velocity_init_cam_filter(vptr); |
c4067400 | 543 | spin_unlock_irq(&vptr->lock); |
8e586137 | 544 | return 0; |
501e4d24 SH |
545 | } |
546 | ||
80d5c368 PM |
547 | static int velocity_vlan_rx_kill_vid(struct net_device *dev, |
548 | __be16 proto, u16 vid) | |
501e4d24 SH |
549 | { |
550 | struct velocity_info *vptr = netdev_priv(dev); | |
551 | ||
c4067400 | 552 | spin_lock_irq(&vptr->lock); |
73b54688 | 553 | clear_bit(vid, vptr->active_vlans); |
501e4d24 | 554 | velocity_init_cam_filter(vptr); |
c4067400 | 555 | spin_unlock_irq(&vptr->lock); |
8e586137 | 556 | return 0; |
501e4d24 SH |
557 | } |
558 | ||
3c4dc711 FR |
559 | static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) |
560 | { | |
561 | vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; | |
562 | } | |
501e4d24 | 563 | |
1da177e4 LT |
564 | /** |
565 | * velocity_rx_reset - handle a receive reset | |
566 | * @vptr: velocity we are resetting | |
567 | * | |
568 | * Reset the ownership and status for the receive ring side. | |
569 | * Hand all the receive queue to the NIC. | |
570 | */ | |
1da177e4 LT |
571 | static void velocity_rx_reset(struct velocity_info *vptr) |
572 | { | |
573 | ||
c4067400 | 574 | struct mac_regs __iomem *regs = vptr->mac_regs; |
1da177e4 LT |
575 | int i; |
576 | ||
3c4dc711 | 577 | velocity_init_rx_ring_indexes(vptr); |
1da177e4 LT |
578 | |
579 | /* | |
580 | * Init state, all RD entries belong to the NIC | |
581 | */ | |
582 | for (i = 0; i < vptr->options.numrx; ++i) | |
0fe9f15e | 583 | vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; |
1da177e4 LT |
584 | |
585 | writew(vptr->options.numrx, ®s->RBRDU); | |
0fe9f15e | 586 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); |
1da177e4 LT |
587 | writew(0, ®s->RDIdx); |
588 | writew(vptr->options.numrx - 1, ®s->RDCSize); | |
589 | } | |
590 | ||
591 | /** | |
2cf71d2e DJ |
592 | * velocity_get_opt_media_mode - get media selection |
593 | * @vptr: velocity adapter | |
1da177e4 | 594 | * |
2cf71d2e DJ |
595 | * Get the media mode stored in EEPROM or module options and load |
596 | * mii_status accordingly. The requested link state information | |
597 | * is also returned. | |
1da177e4 | 598 | */ |
2cf71d2e | 599 | static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) |
1da177e4 | 600 | { |
2cf71d2e | 601 | u32 status = 0; |
1da177e4 | 602 | |
2cf71d2e DJ |
603 | switch (vptr->options.spd_dpx) { |
604 | case SPD_DPX_AUTO: | |
605 | status = VELOCITY_AUTONEG_ENABLE; | |
1da177e4 | 606 | break; |
2cf71d2e DJ |
607 | case SPD_DPX_100_FULL: |
608 | status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; | |
609 | break; | |
610 | case SPD_DPX_10_FULL: | |
611 | status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; | |
612 | break; | |
613 | case SPD_DPX_100_HALF: | |
614 | status = VELOCITY_SPEED_100; | |
615 | break; | |
616 | case SPD_DPX_10_HALF: | |
617 | status = VELOCITY_SPEED_10; | |
618 | break; | |
15419227 | 619 | case SPD_DPX_1000_FULL: |
620 | status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; | |
621 | break; | |
2cf71d2e DJ |
622 | } |
623 | vptr->mii_status = status; | |
624 | return status; | |
625 | } | |
1da177e4 | 626 | |
2cf71d2e DJ |
627 | /** |
628 | * safe_disable_mii_autopoll - autopoll off | |
629 | * @regs: velocity registers | |
630 | * | |
631 | * Turn off the autopoll and wait for it to disable on the chip | |
632 | */ | |
633 | static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs) | |
634 | { | |
635 | u16 ww; | |
1da177e4 | 636 | |
2cf71d2e DJ |
637 | /* turn off MAUTO */ |
638 | writeb(0, ®s->MIICR); | |
639 | for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { | |
640 | udelay(1); | |
641 | if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) | |
642 | break; | |
643 | } | |
644 | } | |
1da177e4 | 645 | |
2cf71d2e DJ |
646 | /** |
647 | * enable_mii_autopoll - turn on autopolling | |
648 | * @regs: velocity registers | |
649 | * | |
650 | * Enable the MII link status autopoll feature on the Velocity | |
651 | * hardware. Wait for it to enable. | |
652 | */ | |
653 | static void enable_mii_autopoll(struct mac_regs __iomem *regs) | |
654 | { | |
655 | int ii; | |
1da177e4 | 656 | |
2cf71d2e DJ |
657 | writeb(0, &(regs->MIICR)); |
658 | writeb(MIIADR_SWMPL, ®s->MIIADR); | |
1da177e4 | 659 | |
2cf71d2e DJ |
660 | for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { |
661 | udelay(1); | |
662 | if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) | |
663 | break; | |
664 | } | |
1da177e4 | 665 | |
2cf71d2e | 666 | writeb(MIICR_MAUTO, ®s->MIICR); |
1da177e4 | 667 | |
2cf71d2e DJ |
668 | for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { |
669 | udelay(1); | |
670 | if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) | |
671 | break; | |
672 | } | |
1da177e4 | 673 | |
2cf71d2e | 674 | } |
1da177e4 | 675 | |
2cf71d2e DJ |
676 | /** |
677 | * velocity_mii_read - read MII data | |
678 | * @regs: velocity registers | |
679 | * @index: MII register index | |
680 | * @data: buffer for received data | |
681 | * | |
682 | * Perform a single read of an MII 16bit register. Returns zero | |
683 | * on success or -ETIMEDOUT if the PHY did not respond. | |
684 | */ | |
685 | static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data) | |
686 | { | |
687 | u16 ww; | |
1da177e4 | 688 | |
2cf71d2e DJ |
689 | /* |
690 | * Disable MIICR_MAUTO, so that mii addr can be set normally | |
691 | */ | |
692 | safe_disable_mii_autopoll(regs); | |
1da177e4 | 693 | |
2cf71d2e | 694 | writeb(index, ®s->MIIADR); |
1da177e4 | 695 | |
2cf71d2e | 696 | BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR); |
1da177e4 | 697 | |
2cf71d2e DJ |
698 | for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { |
699 | if (!(readb(®s->MIICR) & MIICR_RCMD)) | |
700 | break; | |
701 | } | |
1da177e4 | 702 | |
2cf71d2e | 703 | *data = readw(®s->MIIDATA); |
1da177e4 | 704 | |
2cf71d2e DJ |
705 | enable_mii_autopoll(regs); |
706 | if (ww == W_MAX_TIMEOUT) | |
707 | return -ETIMEDOUT; | |
708 | return 0; | |
1da177e4 LT |
709 | } |
710 | ||
711 | /** | |
2cf71d2e DJ |
712 | * mii_check_media_mode - check media state |
713 | * @regs: velocity registers | |
1da177e4 | 714 | * |
2cf71d2e DJ |
715 | * Check the current MII status and determine the link status |
716 | * accordingly | |
1da177e4 | 717 | */ |
2cf71d2e | 718 | static u32 mii_check_media_mode(struct mac_regs __iomem *regs) |
1da177e4 | 719 | { |
2cf71d2e DJ |
720 | u32 status = 0; |
721 | u16 ANAR; | |
1da177e4 | 722 | |
3a7f8681 | 723 | if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs)) |
2cf71d2e | 724 | status |= VELOCITY_LINK_FAIL; |
1da177e4 | 725 | |
3a7f8681 | 726 | if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs)) |
2cf71d2e | 727 | status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; |
3a7f8681 | 728 | else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs)) |
2cf71d2e DJ |
729 | status |= (VELOCITY_SPEED_1000); |
730 | else { | |
3a7f8681 FR |
731 | velocity_mii_read(regs, MII_ADVERTISE, &ANAR); |
732 | if (ANAR & ADVERTISE_100FULL) | |
2cf71d2e | 733 | status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); |
3a7f8681 | 734 | else if (ANAR & ADVERTISE_100HALF) |
2cf71d2e | 735 | status |= VELOCITY_SPEED_100; |
3a7f8681 | 736 | else if (ANAR & ADVERTISE_10FULL) |
2cf71d2e DJ |
737 | status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); |
738 | else | |
739 | status |= (VELOCITY_SPEED_10); | |
1da177e4 LT |
740 | } |
741 | ||
3a7f8681 FR |
742 | if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { |
743 | velocity_mii_read(regs, MII_ADVERTISE, &ANAR); | |
744 | if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) | |
745 | == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { | |
746 | if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) | |
2cf71d2e DJ |
747 | status |= VELOCITY_AUTONEG_ENABLE; |
748 | } | |
1da177e4 | 749 | } |
1da177e4 | 750 | |
2cf71d2e DJ |
751 | return status; |
752 | } | |
39a11bd9 | 753 | |
1da177e4 | 754 | /** |
2cf71d2e DJ |
755 | * velocity_mii_write - write MII data |
756 | * @regs: velocity registers | |
757 | * @index: MII register index | |
758 | * @data: 16bit data for the MII register | |
1da177e4 | 759 | * |
2cf71d2e DJ |
760 | * Perform a single write to an MII 16bit register. Returns zero |
761 | * on success or -ETIMEDOUT if the PHY did not respond. | |
1da177e4 | 762 | */ |
2cf71d2e | 763 | static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data) |
1da177e4 | 764 | { |
2cf71d2e | 765 | u16 ww; |
1da177e4 | 766 | |
2cf71d2e DJ |
767 | /* |
768 | * Disable MIICR_MAUTO, so that mii addr can be set normally | |
e54f4893 | 769 | */ |
2cf71d2e | 770 | safe_disable_mii_autopoll(regs); |
1da177e4 | 771 | |
2cf71d2e DJ |
772 | /* MII reg offset */ |
773 | writeb(mii_addr, ®s->MIIADR); | |
774 | /* set MII data */ | |
775 | writew(data, ®s->MIIDATA); | |
776 | ||
777 | /* turn on MIICR_WCMD */ | |
778 | BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR); | |
779 | ||
780 | /* W_MAX_TIMEOUT is the timeout period */ | |
781 | for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { | |
782 | udelay(5); | |
783 | if (!(readb(®s->MIICR) & MIICR_WCMD)) | |
784 | break; | |
1da177e4 | 785 | } |
2cf71d2e | 786 | enable_mii_autopoll(regs); |
6aa20a22 | 787 | |
2cf71d2e DJ |
788 | if (ww == W_MAX_TIMEOUT) |
789 | return -ETIMEDOUT; | |
790 | return 0; | |
791 | } | |
6aa20a22 | 792 | |
2cf71d2e DJ |
793 | /** |
794 | * set_mii_flow_control - flow control setup | |
795 | * @vptr: velocity interface | |
796 | * | |
797 | * Set up the flow control on this interface according to | |
798 | * the supplied user/eeprom options. | |
799 | */ | |
800 | static void set_mii_flow_control(struct velocity_info *vptr) | |
801 | { | |
802 | /*Enable or Disable PAUSE in ANAR */ | |
803 | switch (vptr->options.flow_cntl) { | |
804 | case FLOW_CNTL_TX: | |
3a7f8681 FR |
805 | MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); |
806 | MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); | |
2cf71d2e | 807 | break; |
1da177e4 | 808 | |
2cf71d2e | 809 | case FLOW_CNTL_RX: |
3a7f8681 FR |
810 | MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); |
811 | MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); | |
2cf71d2e | 812 | break; |
1da177e4 | 813 | |
2cf71d2e | 814 | case FLOW_CNTL_TX_RX: |
3a7f8681 | 815 | MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); |
4a35ecf8 | 816 | MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); |
2cf71d2e | 817 | break; |
1da177e4 | 818 | |
2cf71d2e | 819 | case FLOW_CNTL_DISABLE: |
3a7f8681 FR |
820 | MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); |
821 | MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); | |
2cf71d2e DJ |
822 | break; |
823 | default: | |
824 | break; | |
825 | } | |
826 | } | |
1da177e4 | 827 | |
2cf71d2e DJ |
828 | /** |
829 | * mii_set_auto_on - autonegotiate on | |
830 | * @vptr: velocity | |
831 | * | |
832 | * Enable autonegotation on this interface | |
833 | */ | |
834 | static void mii_set_auto_on(struct velocity_info *vptr) | |
835 | { | |
3a7f8681 FR |
836 | if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs)) |
837 | MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); | |
2cf71d2e | 838 | else |
3a7f8681 | 839 | MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); |
2cf71d2e | 840 | } |
1da177e4 | 841 | |
2cf71d2e DJ |
842 | static u32 check_connection_type(struct mac_regs __iomem *regs) |
843 | { | |
844 | u32 status = 0; | |
845 | u8 PHYSR0; | |
846 | u16 ANAR; | |
847 | PHYSR0 = readb(®s->PHYSR0); | |
1da177e4 | 848 | |
2cf71d2e DJ |
849 | /* |
850 | if (!(PHYSR0 & PHYSR0_LINKGD)) | |
851 | status|=VELOCITY_LINK_FAIL; | |
852 | */ | |
1da177e4 | 853 | |
2cf71d2e DJ |
854 | if (PHYSR0 & PHYSR0_FDPX) |
855 | status |= VELOCITY_DUPLEX_FULL; | |
1da177e4 | 856 | |
2cf71d2e DJ |
857 | if (PHYSR0 & PHYSR0_SPDG) |
858 | status |= VELOCITY_SPEED_1000; | |
859 | else if (PHYSR0 & PHYSR0_SPD10) | |
860 | status |= VELOCITY_SPEED_10; | |
861 | else | |
862 | status |= VELOCITY_SPEED_100; | |
1da177e4 | 863 | |
3a7f8681 FR |
864 | if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { |
865 | velocity_mii_read(regs, MII_ADVERTISE, &ANAR); | |
866 | if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) | |
867 | == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { | |
868 | if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) | |
2cf71d2e DJ |
869 | status |= VELOCITY_AUTONEG_ENABLE; |
870 | } | |
1da177e4 LT |
871 | } |
872 | ||
2cf71d2e DJ |
873 | return status; |
874 | } | |
1da177e4 | 875 | |
2cf71d2e DJ |
876 | /** |
877 | * velocity_set_media_mode - set media mode | |
878 | * @mii_status: old MII link state | |
879 | * | |
880 | * Check the media link state and configure the flow control | |
881 | * PHY and also velocity hardware setup accordingly. In particular | |
882 | * we need to set up CD polling and frame bursting. | |
883 | */ | |
884 | static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) | |
885 | { | |
886 | u32 curr_status; | |
887 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1da177e4 | 888 | |
2cf71d2e DJ |
889 | vptr->mii_status = mii_check_media_mode(vptr->mac_regs); |
890 | curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); | |
07b5f6a6 | 891 | |
2cf71d2e DJ |
892 | /* Set mii link status */ |
893 | set_mii_flow_control(vptr); | |
1da177e4 | 894 | |
6aa20a22 | 895 | /* |
a34f0b31 | 896 | Check if new status is consistent with current status |
8e95a202 JP |
897 | if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) || |
898 | (mii_status==curr_status)) { | |
2cf71d2e DJ |
899 | vptr->mii_status=mii_check_media_mode(vptr->mac_regs); |
900 | vptr->mii_status=check_connection_type(vptr->mac_regs); | |
901 | VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); | |
902 | return 0; | |
903 | } | |
1da177e4 | 904 | */ |
6aa20a22 | 905 | |
2cf71d2e | 906 | if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) |
3a7f8681 | 907 | MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); |
1da177e4 LT |
908 | |
909 | /* | |
2cf71d2e | 910 | * If connection type is AUTO |
1da177e4 | 911 | */ |
2cf71d2e DJ |
912 | if (mii_status & VELOCITY_AUTONEG_ENABLE) { |
913 | VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n"); | |
914 | /* clear force MAC mode bit */ | |
915 | BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); | |
916 | /* set duplex mode of MAC according to duplex mode of MII */ | |
3a7f8681 FR |
917 | MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs); |
918 | MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); | |
919 | MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); | |
6aa20a22 | 920 | |
2cf71d2e DJ |
921 | /* enable AUTO-NEGO mode */ |
922 | mii_set_auto_on(vptr); | |
923 | } else { | |
15419227 | 924 | u16 CTRL1000; |
2cf71d2e DJ |
925 | u16 ANAR; |
926 | u8 CHIPGCR; | |
1da177e4 | 927 | |
2cf71d2e DJ |
928 | /* |
929 | * 1. if it's 3119, disable frame bursting in halfduplex mode | |
930 | * and enable it in fullduplex mode | |
931 | * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR | |
932 | * 3. only enable CD heart beat counter in 10HD mode | |
933 | */ | |
1da177e4 | 934 | |
2cf71d2e DJ |
935 | /* set force MAC mode bit */ |
936 | BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); | |
1da177e4 | 937 | |
2cf71d2e | 938 | CHIPGCR = readb(®s->CHIPGCR); |
15419227 | 939 | |
940 | if (mii_status & VELOCITY_SPEED_1000) | |
941 | CHIPGCR |= CHIPGCR_FCGMII; | |
942 | else | |
943 | CHIPGCR &= ~CHIPGCR_FCGMII; | |
501e4d24 | 944 | |
2cf71d2e DJ |
945 | if (mii_status & VELOCITY_DUPLEX_FULL) { |
946 | CHIPGCR |= CHIPGCR_FCFDX; | |
947 | writeb(CHIPGCR, ®s->CHIPGCR); | |
948 | VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); | |
949 | if (vptr->rev_id < REV_ID_VT3216_A0) | |
950 | BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); | |
951 | } else { | |
952 | CHIPGCR &= ~CHIPGCR_FCFDX; | |
953 | VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); | |
954 | writeb(CHIPGCR, ®s->CHIPGCR); | |
955 | if (vptr->rev_id < REV_ID_VT3216_A0) | |
956 | BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); | |
957 | } | |
1da177e4 | 958 | |
15419227 | 959 | velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000); |
960 | CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); | |
961 | if ((mii_status & VELOCITY_SPEED_1000) && | |
962 | (mii_status & VELOCITY_DUPLEX_FULL)) { | |
963 | CTRL1000 |= ADVERTISE_1000FULL; | |
964 | } | |
965 | velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000); | |
1da177e4 | 966 | |
2cf71d2e DJ |
967 | if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) |
968 | BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); | |
969 | else | |
970 | BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); | |
1da177e4 | 971 | |
3a7f8681 FR |
972 | /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */ |
973 | velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR); | |
974 | ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)); | |
2cf71d2e DJ |
975 | if (mii_status & VELOCITY_SPEED_100) { |
976 | if (mii_status & VELOCITY_DUPLEX_FULL) | |
3a7f8681 | 977 | ANAR |= ADVERTISE_100FULL; |
2cf71d2e | 978 | else |
3a7f8681 | 979 | ANAR |= ADVERTISE_100HALF; |
15419227 | 980 | } else if (mii_status & VELOCITY_SPEED_10) { |
2cf71d2e | 981 | if (mii_status & VELOCITY_DUPLEX_FULL) |
3a7f8681 | 982 | ANAR |= ADVERTISE_10FULL; |
2cf71d2e | 983 | else |
3a7f8681 | 984 | ANAR |= ADVERTISE_10HALF; |
2cf71d2e | 985 | } |
3a7f8681 | 986 | velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR); |
2cf71d2e DJ |
987 | /* enable AUTO-NEGO mode */ |
988 | mii_set_auto_on(vptr); | |
3a7f8681 | 989 | /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */ |
d3b238a0 | 990 | } |
2cf71d2e DJ |
991 | /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ |
992 | /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ | |
993 | return VELOCITY_LINK_CHANGE; | |
994 | } | |
8a22dddb | 995 | |
2cf71d2e DJ |
996 | /** |
997 | * velocity_print_link_status - link status reporting | |
998 | * @vptr: velocity to report on | |
999 | * | |
1000 | * Turn the link status of the velocity card into a kernel log | |
1001 | * description of the new link state, detailing speed and duplex | |
1002 | * status | |
1003 | */ | |
1004 | static void velocity_print_link_status(struct velocity_info *vptr) | |
1005 | { | |
6aa20a22 | 1006 | |
2cf71d2e | 1007 | if (vptr->mii_status & VELOCITY_LINK_FAIL) { |
a9683c94 | 1008 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); |
2cf71d2e | 1009 | } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { |
a9683c94 | 1010 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); |
6aa20a22 | 1011 | |
2cf71d2e DJ |
1012 | if (vptr->mii_status & VELOCITY_SPEED_1000) |
1013 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); | |
1014 | else if (vptr->mii_status & VELOCITY_SPEED_100) | |
1015 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps"); | |
1016 | else | |
1017 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps"); | |
1da177e4 | 1018 | |
2cf71d2e DJ |
1019 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) |
1020 | VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n"); | |
1021 | else | |
1022 | VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); | |
1023 | } else { | |
a9683c94 | 1024 | VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); |
2cf71d2e | 1025 | switch (vptr->options.spd_dpx) { |
15419227 | 1026 | case SPD_DPX_1000_FULL: |
1027 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); | |
1028 | break; | |
2cf71d2e DJ |
1029 | case SPD_DPX_100_HALF: |
1030 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); | |
1031 | break; | |
1032 | case SPD_DPX_100_FULL: | |
1033 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n"); | |
1034 | break; | |
1035 | case SPD_DPX_10_HALF: | |
1036 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n"); | |
1037 | break; | |
1038 | case SPD_DPX_10_FULL: | |
1039 | VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n"); | |
1040 | break; | |
1041 | default: | |
1042 | break; | |
1043 | } | |
1da177e4 | 1044 | } |
1da177e4 LT |
1045 | } |
1046 | ||
1047 | /** | |
2cf71d2e DJ |
1048 | * enable_flow_control_ability - flow control |
1049 | * @vptr: veloity to configure | |
1da177e4 | 1050 | * |
2cf71d2e DJ |
1051 | * Set up flow control according to the flow control options |
1052 | * determined by the eeprom/configuration. | |
1da177e4 | 1053 | */ |
2cf71d2e | 1054 | static void enable_flow_control_ability(struct velocity_info *vptr) |
1da177e4 | 1055 | { |
1da177e4 | 1056 | |
2cf71d2e | 1057 | struct mac_regs __iomem *regs = vptr->mac_regs; |
1da177e4 | 1058 | |
2cf71d2e | 1059 | switch (vptr->options.flow_cntl) { |
1da177e4 | 1060 | |
2cf71d2e DJ |
1061 | case FLOW_CNTL_DEFAULT: |
1062 | if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0)) | |
1063 | writel(CR0_FDXRFCEN, ®s->CR0Set); | |
1064 | else | |
1065 | writel(CR0_FDXRFCEN, ®s->CR0Clr); | |
1066 | ||
1067 | if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0)) | |
1068 | writel(CR0_FDXTFCEN, ®s->CR0Set); | |
1069 | else | |
1070 | writel(CR0_FDXTFCEN, ®s->CR0Clr); | |
1071 | break; | |
1072 | ||
1073 | case FLOW_CNTL_TX: | |
1074 | writel(CR0_FDXTFCEN, ®s->CR0Set); | |
1075 | writel(CR0_FDXRFCEN, ®s->CR0Clr); | |
1076 | break; | |
1077 | ||
1078 | case FLOW_CNTL_RX: | |
1079 | writel(CR0_FDXRFCEN, ®s->CR0Set); | |
1080 | writel(CR0_FDXTFCEN, ®s->CR0Clr); | |
1081 | break; | |
1082 | ||
1083 | case FLOW_CNTL_TX_RX: | |
1084 | writel(CR0_FDXTFCEN, ®s->CR0Set); | |
1085 | writel(CR0_FDXRFCEN, ®s->CR0Set); | |
1086 | break; | |
1087 | ||
1088 | case FLOW_CNTL_DISABLE: | |
1089 | writel(CR0_FDXRFCEN, ®s->CR0Clr); | |
1090 | writel(CR0_FDXTFCEN, ®s->CR0Clr); | |
1091 | break; | |
1092 | ||
1093 | default: | |
1094 | break; | |
1095 | } | |
1da177e4 | 1096 | |
1da177e4 LT |
1097 | } |
1098 | ||
1099 | /** | |
2cf71d2e DJ |
1100 | * velocity_soft_reset - soft reset |
1101 | * @vptr: velocity to reset | |
1da177e4 | 1102 | * |
2cf71d2e DJ |
1103 | * Kick off a soft reset of the velocity adapter and then poll |
1104 | * until the reset sequence has completed before returning. | |
1da177e4 | 1105 | */ |
2cf71d2e | 1106 | static int velocity_soft_reset(struct velocity_info *vptr) |
1da177e4 | 1107 | { |
2cf71d2e DJ |
1108 | struct mac_regs __iomem *regs = vptr->mac_regs; |
1109 | int i = 0; | |
6aa20a22 | 1110 | |
2cf71d2e | 1111 | writel(CR0_SFRST, ®s->CR0Set); |
1da177e4 | 1112 | |
2cf71d2e DJ |
1113 | for (i = 0; i < W_MAX_TIMEOUT; i++) { |
1114 | udelay(5); | |
1115 | if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) | |
1116 | break; | |
1da177e4 LT |
1117 | } |
1118 | ||
2cf71d2e DJ |
1119 | if (i == W_MAX_TIMEOUT) { |
1120 | writel(CR0_FORSRST, ®s->CR0Set); | |
1121 | /* FIXME: PCI POSTING */ | |
1122 | /* delay 2ms */ | |
1123 | mdelay(2); | |
1da177e4 | 1124 | } |
1da177e4 LT |
1125 | return 0; |
1126 | } | |
1127 | ||
1128 | /** | |
2cf71d2e DJ |
1129 | * velocity_set_multi - filter list change callback |
1130 | * @dev: network device | |
1da177e4 | 1131 | * |
2cf71d2e DJ |
1132 | * Called by the network layer when the filter lists need to change |
1133 | * for a velocity adapter. Reload the CAMs with the new address | |
1134 | * filter ruleset. | |
1da177e4 | 1135 | */ |
2cf71d2e | 1136 | static void velocity_set_multi(struct net_device *dev) |
1da177e4 | 1137 | { |
2cf71d2e | 1138 | struct velocity_info *vptr = netdev_priv(dev); |
1da177e4 | 1139 | struct mac_regs __iomem *regs = vptr->mac_regs; |
2cf71d2e DJ |
1140 | u8 rx_mode; |
1141 | int i; | |
22bedad3 | 1142 | struct netdev_hw_addr *ha; |
1da177e4 | 1143 | |
2cf71d2e DJ |
1144 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
1145 | writel(0xffffffff, ®s->MARCAM[0]); | |
1146 | writel(0xffffffff, ®s->MARCAM[4]); | |
1147 | rx_mode = (RCR_AM | RCR_AB | RCR_PROM); | |
4cd24eaf | 1148 | } else if ((netdev_mc_count(dev) > vptr->multicast_limit) || |
8e95a202 | 1149 | (dev->flags & IFF_ALLMULTI)) { |
2cf71d2e DJ |
1150 | writel(0xffffffff, ®s->MARCAM[0]); |
1151 | writel(0xffffffff, ®s->MARCAM[4]); | |
1152 | rx_mode = (RCR_AM | RCR_AB); | |
1153 | } else { | |
1154 | int offset = MCAM_SIZE - vptr->multicast_limit; | |
1155 | mac_get_cam_mask(regs, vptr->mCAMmask); | |
1da177e4 | 1156 | |
567ec874 | 1157 | i = 0; |
22bedad3 JP |
1158 | netdev_for_each_mc_addr(ha, dev) { |
1159 | mac_set_cam(regs, i + offset, ha->addr); | |
2cf71d2e | 1160 | vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); |
567ec874 | 1161 | i++; |
1da177e4 | 1162 | } |
1da177e4 | 1163 | |
2cf71d2e DJ |
1164 | mac_set_cam_mask(regs, vptr->mCAMmask); |
1165 | rx_mode = RCR_AM | RCR_AB | RCR_AP; | |
1da177e4 | 1166 | } |
2cf71d2e DJ |
1167 | if (dev->mtu > 1500) |
1168 | rx_mode |= RCR_AL; | |
1da177e4 | 1169 | |
2cf71d2e | 1170 | BYTE_REG_BITS_ON(rx_mode, ®s->RCR); |
1da177e4 | 1171 | |
9088d9a4 FR |
1172 | } |
1173 | ||
2cf71d2e DJ |
1174 | /* |
1175 | * MII access , media link mode setting functions | |
1da177e4 LT |
1176 | */ |
1177 | ||
1da177e4 | 1178 | /** |
2cf71d2e DJ |
1179 | * mii_init - set up MII |
1180 | * @vptr: velocity adapter | |
1181 | * @mii_status: links tatus | |
1da177e4 | 1182 | * |
2cf71d2e | 1183 | * Set up the PHY for the current link state. |
1da177e4 | 1184 | */ |
2cf71d2e | 1185 | static void mii_init(struct velocity_info *vptr, u32 mii_status) |
1da177e4 | 1186 | { |
2cf71d2e | 1187 | u16 BMCR; |
1da177e4 | 1188 | |
2cf71d2e | 1189 | switch (PHYID_GET_PHY_ID(vptr->phy_id)) { |
6dffbe53 TP |
1190 | case PHYID_ICPLUS_IP101A: |
1191 | MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), | |
1192 | MII_ADVERTISE, vptr->mac_regs); | |
1193 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) | |
1194 | MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, | |
1195 | vptr->mac_regs); | |
1196 | else | |
1197 | MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, | |
1198 | vptr->mac_regs); | |
1199 | MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); | |
1200 | break; | |
2cf71d2e DJ |
1201 | case PHYID_CICADA_CS8201: |
1202 | /* | |
1203 | * Reset to hardware default | |
1204 | */ | |
3a7f8681 | 1205 | MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); |
2cf71d2e DJ |
1206 | /* |
1207 | * Turn on ECHODIS bit in NWay-forced full mode and turn it | |
1208 | * off it in NWay-forced half mode for NWay-forced v.s. | |
1209 | * legacy-forced issue. | |
1210 | */ | |
1211 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) | |
3a7f8681 | 1212 | MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); |
2cf71d2e | 1213 | else |
3a7f8681 | 1214 | MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); |
2cf71d2e DJ |
1215 | /* |
1216 | * Turn on Link/Activity LED enable bit for CIS8201 | |
1217 | */ | |
3a7f8681 | 1218 | MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); |
2cf71d2e DJ |
1219 | break; |
1220 | case PHYID_VT3216_32BIT: | |
1221 | case PHYID_VT3216_64BIT: | |
1222 | /* | |
1223 | * Reset to hardware default | |
1224 | */ | |
3a7f8681 | 1225 | MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); |
2cf71d2e DJ |
1226 | /* |
1227 | * Turn on ECHODIS bit in NWay-forced full mode and turn it | |
1228 | * off it in NWay-forced half mode for NWay-forced v.s. | |
1229 | * legacy-forced issue | |
1230 | */ | |
1231 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) | |
3a7f8681 | 1232 | MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); |
2cf71d2e | 1233 | else |
3a7f8681 | 1234 | MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); |
2cf71d2e | 1235 | break; |
1da177e4 | 1236 | |
2cf71d2e DJ |
1237 | case PHYID_MARVELL_1000: |
1238 | case PHYID_MARVELL_1000S: | |
1239 | /* | |
1240 | * Assert CRS on Transmit | |
1241 | */ | |
1242 | MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); | |
1243 | /* | |
1244 | * Reset to hardware default | |
1245 | */ | |
3a7f8681 | 1246 | MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); |
2cf71d2e DJ |
1247 | break; |
1248 | default: | |
1249 | ; | |
1250 | } | |
3a7f8681 FR |
1251 | velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR); |
1252 | if (BMCR & BMCR_ISOLATE) { | |
1253 | BMCR &= ~BMCR_ISOLATE; | |
1254 | velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR); | |
1da177e4 | 1255 | } |
1da177e4 LT |
1256 | } |
1257 | ||
6dfc4b95 SK |
1258 | /** |
1259 | * setup_queue_timers - Setup interrupt timers | |
1260 | * | |
1261 | * Setup interrupt frequency during suppression (timeout if the frame | |
1262 | * count isn't filled). | |
1263 | */ | |
1264 | static void setup_queue_timers(struct velocity_info *vptr) | |
1265 | { | |
1266 | /* Only for newer revisions */ | |
1267 | if (vptr->rev_id >= REV_ID_VT3216_A0) { | |
1268 | u8 txqueue_timer = 0; | |
1269 | u8 rxqueue_timer = 0; | |
1270 | ||
1271 | if (vptr->mii_status & (VELOCITY_SPEED_1000 | | |
1272 | VELOCITY_SPEED_100)) { | |
1273 | txqueue_timer = vptr->options.txqueue_timer; | |
1274 | rxqueue_timer = vptr->options.rxqueue_timer; | |
1275 | } | |
1276 | ||
1277 | writeb(txqueue_timer, &vptr->mac_regs->TQETMR); | |
1278 | writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); | |
1279 | } | |
1280 | } | |
5ae297b0 | 1281 | |
6dfc4b95 SK |
1282 | /** |
1283 | * setup_adaptive_interrupts - Setup interrupt suppression | |
1284 | * | |
1285 | * @vptr velocity adapter | |
1286 | * | |
1287 | * The velocity is able to suppress interrupt during high interrupt load. | |
1288 | * This function turns on that feature. | |
1289 | */ | |
1290 | static void setup_adaptive_interrupts(struct velocity_info *vptr) | |
1291 | { | |
1292 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1293 | u16 tx_intsup = vptr->options.tx_intsup; | |
1294 | u16 rx_intsup = vptr->options.rx_intsup; | |
1295 | ||
1296 | /* Setup default interrupt mask (will be changed below) */ | |
1297 | vptr->int_mask = INT_MASK_DEF; | |
1298 | ||
1299 | /* Set Tx Interrupt Suppression Threshold */ | |
1300 | writeb(CAMCR_PS0, ®s->CAMCR); | |
1301 | if (tx_intsup != 0) { | |
1302 | vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | | |
1303 | ISR_PTX2I | ISR_PTX3I); | |
1304 | writew(tx_intsup, ®s->ISRCTL); | |
1305 | } else | |
1306 | writew(ISRCTL_TSUPDIS, ®s->ISRCTL); | |
1307 | ||
1308 | /* Set Rx Interrupt Suppression Threshold */ | |
1309 | writeb(CAMCR_PS1, ®s->CAMCR); | |
1310 | if (rx_intsup != 0) { | |
1311 | vptr->int_mask &= ~ISR_PRXI; | |
1312 | writew(rx_intsup, ®s->ISRCTL); | |
1313 | } else | |
1314 | writew(ISRCTL_RSUPDIS, ®s->ISRCTL); | |
1315 | ||
1316 | /* Select page to interrupt hold timer */ | |
1317 | writeb(0, ®s->CAMCR); | |
1318 | } | |
2cf71d2e | 1319 | |
1da177e4 | 1320 | /** |
2cf71d2e DJ |
1321 | * velocity_init_registers - initialise MAC registers |
1322 | * @vptr: velocity to init | |
1323 | * @type: type of initialisation (hot or cold) | |
1da177e4 | 1324 | * |
2cf71d2e DJ |
1325 | * Initialise the MAC on a reset or on first set up on the |
1326 | * hardware. | |
1da177e4 | 1327 | */ |
2cf71d2e DJ |
1328 | static void velocity_init_registers(struct velocity_info *vptr, |
1329 | enum velocity_init_type type) | |
1da177e4 | 1330 | { |
2cf71d2e | 1331 | struct mac_regs __iomem *regs = vptr->mac_regs; |
6dffbe53 | 1332 | struct net_device *netdev = vptr->netdev; |
2cf71d2e | 1333 | int i, mii_status; |
1da177e4 | 1334 | |
2cf71d2e | 1335 | mac_wol_reset(regs); |
1da177e4 | 1336 | |
2cf71d2e DJ |
1337 | switch (type) { |
1338 | case VELOCITY_INIT_RESET: | |
1339 | case VELOCITY_INIT_WOL: | |
1da177e4 | 1340 | |
6dffbe53 | 1341 | netif_stop_queue(netdev); |
6aa20a22 | 1342 | |
2cf71d2e DJ |
1343 | /* |
1344 | * Reset RX to prevent RX pointer not on the 4X location | |
1345 | */ | |
1346 | velocity_rx_reset(vptr); | |
1347 | mac_rx_queue_run(regs); | |
1348 | mac_rx_queue_wake(regs); | |
6aa20a22 | 1349 | |
2cf71d2e DJ |
1350 | mii_status = velocity_get_opt_media_mode(vptr); |
1351 | if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { | |
1352 | velocity_print_link_status(vptr); | |
1353 | if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) | |
6dffbe53 | 1354 | netif_wake_queue(netdev); |
1da177e4 | 1355 | } |
1da177e4 | 1356 | |
2cf71d2e | 1357 | enable_flow_control_ability(vptr); |
1da177e4 | 1358 | |
2cf71d2e DJ |
1359 | mac_clear_isr(regs); |
1360 | writel(CR0_STOP, ®s->CR0Clr); | |
1361 | writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), | |
1362 | ®s->CR0Set); | |
6aa20a22 | 1363 | |
2cf71d2e | 1364 | break; |
1da177e4 | 1365 | |
2cf71d2e DJ |
1366 | case VELOCITY_INIT_COLD: |
1367 | default: | |
1368 | /* | |
1369 | * Do reset | |
1370 | */ | |
1371 | velocity_soft_reset(vptr); | |
1372 | mdelay(5); | |
1da177e4 | 1373 | |
6dffbe53 TP |
1374 | if (!vptr->no_eeprom) { |
1375 | mac_eeprom_reload(regs); | |
1376 | for (i = 0; i < 6; i++) | |
1377 | writeb(netdev->dev_addr[i], regs->PAR + i); | |
1378 | } | |
1da177e4 | 1379 | |
2cf71d2e DJ |
1380 | /* |
1381 | * clear Pre_ACPI bit. | |
1382 | */ | |
1383 | BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); | |
1384 | mac_set_rx_thresh(regs, vptr->options.rx_thresh); | |
1385 | mac_set_dma_length(regs, vptr->options.DMA_length); | |
1da177e4 | 1386 | |
2cf71d2e DJ |
1387 | writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); |
1388 | /* | |
1389 | * Back off algorithm use original IEEE standard | |
1390 | */ | |
1391 | BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB); | |
1da177e4 LT |
1392 | |
1393 | /* | |
2cf71d2e | 1394 | * Init CAM filter |
1da177e4 | 1395 | */ |
2cf71d2e | 1396 | velocity_init_cam_filter(vptr); |
1da177e4 | 1397 | |
2cf71d2e DJ |
1398 | /* |
1399 | * Set packet filter: Receive directed and broadcast address | |
1400 | */ | |
6dffbe53 | 1401 | velocity_set_multi(netdev); |
1da177e4 | 1402 | |
2cf71d2e DJ |
1403 | /* |
1404 | * Enable MII auto-polling | |
1405 | */ | |
1406 | enable_mii_autopoll(regs); | |
1da177e4 | 1407 | |
6dfc4b95 | 1408 | setup_adaptive_interrupts(vptr); |
1da177e4 | 1409 | |
2cf71d2e DJ |
1410 | writel(vptr->rx.pool_dma, ®s->RDBaseLo); |
1411 | writew(vptr->options.numrx - 1, ®s->RDCSize); | |
1412 | mac_rx_queue_run(regs); | |
1413 | mac_rx_queue_wake(regs); | |
1da177e4 | 1414 | |
2cf71d2e | 1415 | writew(vptr->options.numtx - 1, ®s->TDCSize); |
1da177e4 | 1416 | |
2cf71d2e DJ |
1417 | for (i = 0; i < vptr->tx.numq; i++) { |
1418 | writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); | |
1419 | mac_tx_queue_run(regs, i); | |
1420 | } | |
1da177e4 | 1421 | |
2cf71d2e | 1422 | init_flow_control_register(vptr); |
6aa20a22 | 1423 | |
2cf71d2e DJ |
1424 | writel(CR0_STOP, ®s->CR0Clr); |
1425 | writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); | |
1da177e4 | 1426 | |
2cf71d2e | 1427 | mii_status = velocity_get_opt_media_mode(vptr); |
6dffbe53 | 1428 | netif_stop_queue(netdev); |
1da177e4 | 1429 | |
2cf71d2e | 1430 | mii_init(vptr, mii_status); |
1da177e4 | 1431 | |
2cf71d2e DJ |
1432 | if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { |
1433 | velocity_print_link_status(vptr); | |
1434 | if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) | |
6dffbe53 | 1435 | netif_wake_queue(netdev); |
1da177e4 | 1436 | } |
6aa20a22 | 1437 | |
2cf71d2e DJ |
1438 | enable_flow_control_ability(vptr); |
1439 | mac_hw_mibs_init(regs); | |
1440 | mac_write_int_mask(vptr->int_mask, regs); | |
1441 | mac_clear_isr(regs); | |
1442 | ||
1da177e4 | 1443 | } |
1da177e4 LT |
1444 | } |
1445 | ||
2cf71d2e | 1446 | static void velocity_give_many_rx_descs(struct velocity_info *vptr) |
1da177e4 | 1447 | { |
2cf71d2e DJ |
1448 | struct mac_regs __iomem *regs = vptr->mac_regs; |
1449 | int avail, dirty, unusable; | |
1450 | ||
1451 | /* | |
1452 | * RD number must be equal to 4X per hardware spec | |
1453 | * (programming guide rev 1.20, p.13) | |
1454 | */ | |
1455 | if (vptr->rx.filled < 4) | |
1456 | return; | |
1457 | ||
1458 | wmb(); | |
1459 | ||
1460 | unusable = vptr->rx.filled & 0x0003; | |
1461 | dirty = vptr->rx.dirty - unusable; | |
1462 | for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { | |
1463 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; | |
1464 | vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; | |
1da177e4 | 1465 | } |
2cf71d2e DJ |
1466 | |
1467 | writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); | |
1468 | vptr->rx.filled = unusable; | |
1da177e4 LT |
1469 | } |
1470 | ||
1471 | /** | |
2cf71d2e DJ |
1472 | * velocity_init_dma_rings - set up DMA rings |
1473 | * @vptr: Velocity to set up | |
6aa20a22 | 1474 | * |
2cf71d2e DJ |
1475 | * Allocate PCI mapped DMA rings for the receive and transmit layer |
1476 | * to use. | |
1da177e4 | 1477 | */ |
2cf71d2e | 1478 | static int velocity_init_dma_rings(struct velocity_info *vptr) |
1da177e4 | 1479 | { |
2cf71d2e DJ |
1480 | struct velocity_opt *opt = &vptr->options; |
1481 | const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); | |
1482 | const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); | |
2cf71d2e DJ |
1483 | dma_addr_t pool_dma; |
1484 | void *pool; | |
1485 | unsigned int i; | |
1da177e4 LT |
1486 | |
1487 | /* | |
2cf71d2e DJ |
1488 | * Allocate all RD/TD rings a single pool. |
1489 | * | |
e2c41f14 | 1490 | * dma_alloc_coherent() fulfills the requirement for 64 bytes |
2cf71d2e | 1491 | * alignment |
1da177e4 | 1492 | */ |
e2c41f14 TP |
1493 | pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + |
1494 | rx_ring_size, &pool_dma, GFP_ATOMIC); | |
2cf71d2e | 1495 | if (!pool) { |
e2c41f14 | 1496 | dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", |
a9683c94 | 1497 | vptr->netdev->name); |
2cf71d2e | 1498 | return -ENOMEM; |
1da177e4 LT |
1499 | } |
1500 | ||
2cf71d2e DJ |
1501 | vptr->rx.ring = pool; |
1502 | vptr->rx.pool_dma = pool_dma; | |
1da177e4 | 1503 | |
2cf71d2e DJ |
1504 | pool += rx_ring_size; |
1505 | pool_dma += rx_ring_size; | |
d4f73c8e | 1506 | |
2cf71d2e DJ |
1507 | for (i = 0; i < vptr->tx.numq; i++) { |
1508 | vptr->tx.rings[i] = pool; | |
1509 | vptr->tx.pool_dma[i] = pool_dma; | |
1510 | pool += tx_ring_size; | |
1511 | pool_dma += tx_ring_size; | |
1512 | } | |
1da177e4 LT |
1513 | |
1514 | return 0; | |
1515 | } | |
1516 | ||
2cf71d2e DJ |
1517 | static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) |
1518 | { | |
1519 | vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; | |
1520 | } | |
1521 | ||
1da177e4 LT |
1522 | /** |
1523 | * velocity_alloc_rx_buf - allocate aligned receive buffer | |
1524 | * @vptr: velocity | |
1525 | * @idx: ring index | |
1526 | * | |
1527 | * Allocate a new full sized buffer for the reception of a frame and | |
1528 | * map it into PCI space for the hardware to use. The hardware | |
1529 | * requires *64* byte alignment of the buffer which makes life | |
1530 | * less fun than would be ideal. | |
1531 | */ | |
1da177e4 LT |
1532 | static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) |
1533 | { | |
0fe9f15e FR |
1534 | struct rx_desc *rd = &(vptr->rx.ring[idx]); |
1535 | struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); | |
1da177e4 | 1536 | |
a9683c94 | 1537 | rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); |
1da177e4 LT |
1538 | if (rd_info->skb == NULL) |
1539 | return -ENOMEM; | |
1540 | ||
1541 | /* | |
1542 | * Do the gymnastics to get the buffer head for data at | |
1543 | * 64byte alignment. | |
1544 | */ | |
da95b2d4 SK |
1545 | skb_reserve(rd_info->skb, |
1546 | 64 - ((unsigned long) rd_info->skb->data & 63)); | |
e2c41f14 TP |
1547 | rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, |
1548 | vptr->rx.buf_sz, DMA_FROM_DEVICE); | |
6aa20a22 | 1549 | |
1da177e4 LT |
1550 | /* |
1551 | * Fill in the descriptor to match | |
0fe9f15e | 1552 | */ |
6aa20a22 | 1553 | |
1da177e4 | 1554 | *((u32 *) & (rd->rdesc0)) = 0; |
0fe9f15e | 1555 | rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; |
1da177e4 LT |
1556 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); |
1557 | rd->pa_high = 0; | |
1558 | return 0; | |
1559 | } | |
1560 | ||
6aa20a22 | 1561 | |
2cf71d2e | 1562 | static int velocity_rx_refill(struct velocity_info *vptr) |
1da177e4 | 1563 | { |
2cf71d2e | 1564 | int dirty = vptr->rx.dirty, done = 0; |
1da177e4 | 1565 | |
2cf71d2e DJ |
1566 | do { |
1567 | struct rx_desc *rd = vptr->rx.ring + dirty; | |
1da177e4 | 1568 | |
2cf71d2e DJ |
1569 | /* Fine for an all zero Rx desc at init time as well */ |
1570 | if (rd->rdesc0.len & OWNED_BY_NIC) | |
1571 | break; | |
1da177e4 | 1572 | |
2cf71d2e DJ |
1573 | if (!vptr->rx.info[dirty].skb) { |
1574 | if (velocity_alloc_rx_buf(vptr, dirty) < 0) | |
1da177e4 | 1575 | break; |
1da177e4 | 1576 | } |
2cf71d2e DJ |
1577 | done++; |
1578 | dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; | |
1579 | } while (dirty != vptr->rx.curr); | |
1da177e4 | 1580 | |
2cf71d2e DJ |
1581 | if (done) { |
1582 | vptr->rx.dirty = dirty; | |
1583 | vptr->rx.filled += done; | |
1da177e4 | 1584 | } |
2cf71d2e DJ |
1585 | |
1586 | return done; | |
1da177e4 LT |
1587 | } |
1588 | ||
1589 | /** | |
2cf71d2e DJ |
1590 | * velocity_free_rd_ring - free receive ring |
1591 | * @vptr: velocity to clean up | |
1da177e4 | 1592 | * |
2cf71d2e DJ |
1593 | * Free the receive buffers for each ring slot and any |
1594 | * attached socket buffers that need to go away. | |
1da177e4 | 1595 | */ |
2cf71d2e | 1596 | static void velocity_free_rd_ring(struct velocity_info *vptr) |
1da177e4 | 1597 | { |
2cf71d2e | 1598 | int i; |
1da177e4 | 1599 | |
2cf71d2e DJ |
1600 | if (vptr->rx.info == NULL) |
1601 | return; | |
6aa20a22 | 1602 | |
2cf71d2e DJ |
1603 | for (i = 0; i < vptr->options.numrx; i++) { |
1604 | struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); | |
1605 | struct rx_desc *rd = vptr->rx.ring + i; | |
1da177e4 | 1606 | |
2cf71d2e | 1607 | memset(rd, 0, sizeof(*rd)); |
1da177e4 | 1608 | |
2cf71d2e DJ |
1609 | if (!rd_info->skb) |
1610 | continue; | |
e2c41f14 TP |
1611 | dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, |
1612 | DMA_FROM_DEVICE); | |
2cf71d2e | 1613 | rd_info->skb_dma = 0; |
6aa20a22 | 1614 | |
2cf71d2e DJ |
1615 | dev_kfree_skb(rd_info->skb); |
1616 | rd_info->skb = NULL; | |
1da177e4 LT |
1617 | } |
1618 | ||
2cf71d2e DJ |
1619 | kfree(vptr->rx.info); |
1620 | vptr->rx.info = NULL; | |
1621 | } | |
1da177e4 | 1622 | |
2cf71d2e DJ |
1623 | /** |
1624 | * velocity_init_rd_ring - set up receive ring | |
1625 | * @vptr: velocity to configure | |
1626 | * | |
1627 | * Allocate and set up the receive buffers for each ring slot and | |
1628 | * assign them to the network adapter. | |
1629 | */ | |
1630 | static int velocity_init_rd_ring(struct velocity_info *vptr) | |
1631 | { | |
1632 | int ret = -ENOMEM; | |
1da177e4 | 1633 | |
2cf71d2e DJ |
1634 | vptr->rx.info = kcalloc(vptr->options.numrx, |
1635 | sizeof(struct velocity_rd_info), GFP_KERNEL); | |
1636 | if (!vptr->rx.info) | |
1637 | goto out; | |
6aa20a22 | 1638 | |
2cf71d2e | 1639 | velocity_init_rx_ring_indexes(vptr); |
1da177e4 | 1640 | |
2cf71d2e DJ |
1641 | if (velocity_rx_refill(vptr) != vptr->options.numrx) { |
1642 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR | |
a9683c94 | 1643 | "%s: failed to allocate RX buffer.\n", vptr->netdev->name); |
2cf71d2e DJ |
1644 | velocity_free_rd_ring(vptr); |
1645 | goto out; | |
1646 | } | |
1da177e4 | 1647 | |
2cf71d2e DJ |
1648 | ret = 0; |
1649 | out: | |
1650 | return ret; | |
1da177e4 LT |
1651 | } |
1652 | ||
1653 | /** | |
2cf71d2e DJ |
1654 | * velocity_init_td_ring - set up transmit ring |
1655 | * @vptr: velocity | |
1da177e4 | 1656 | * |
2cf71d2e DJ |
1657 | * Set up the transmit ring and chain the ring pointers together. |
1658 | * Returns zero on success or a negative posix errno code for | |
1659 | * failure. | |
1da177e4 | 1660 | */ |
2cf71d2e | 1661 | static int velocity_init_td_ring(struct velocity_info *vptr) |
1da177e4 | 1662 | { |
2cf71d2e | 1663 | int j; |
1da177e4 | 1664 | |
2cf71d2e DJ |
1665 | /* Init the TD ring entries */ |
1666 | for (j = 0; j < vptr->tx.numq; j++) { | |
1da177e4 | 1667 | |
2cf71d2e DJ |
1668 | vptr->tx.infos[j] = kcalloc(vptr->options.numtx, |
1669 | sizeof(struct velocity_td_info), | |
1670 | GFP_KERNEL); | |
1671 | if (!vptr->tx.infos[j]) { | |
1672 | while (--j >= 0) | |
1673 | kfree(vptr->tx.infos[j]); | |
1674 | return -ENOMEM; | |
1da177e4 | 1675 | } |
2cf71d2e DJ |
1676 | |
1677 | vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; | |
1da177e4 | 1678 | } |
2cf71d2e DJ |
1679 | return 0; |
1680 | } | |
1681 | ||
1682 | /** | |
1683 | * velocity_free_dma_rings - free PCI ring pointers | |
1684 | * @vptr: Velocity to free from | |
1685 | * | |
1686 | * Clean up the PCI ring buffers allocated to this velocity. | |
1687 | */ | |
1688 | static void velocity_free_dma_rings(struct velocity_info *vptr) | |
1689 | { | |
1690 | const int size = vptr->options.numrx * sizeof(struct rx_desc) + | |
1691 | vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; | |
1692 | ||
e2c41f14 | 1693 | dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); |
1da177e4 LT |
1694 | } |
1695 | ||
3c4dc711 FR |
1696 | static int velocity_init_rings(struct velocity_info *vptr, int mtu) |
1697 | { | |
1698 | int ret; | |
1699 | ||
1700 | velocity_set_rxbufsize(vptr, mtu); | |
1701 | ||
1702 | ret = velocity_init_dma_rings(vptr); | |
1703 | if (ret < 0) | |
1704 | goto out; | |
1705 | ||
1706 | ret = velocity_init_rd_ring(vptr); | |
1707 | if (ret < 0) | |
1708 | goto err_free_dma_rings_0; | |
1709 | ||
1710 | ret = velocity_init_td_ring(vptr); | |
1711 | if (ret < 0) | |
1712 | goto err_free_rd_ring_1; | |
1713 | out: | |
1714 | return ret; | |
1715 | ||
1716 | err_free_rd_ring_1: | |
1717 | velocity_free_rd_ring(vptr); | |
1718 | err_free_dma_rings_0: | |
1719 | velocity_free_dma_rings(vptr); | |
1720 | goto out; | |
1721 | } | |
1722 | ||
1da177e4 | 1723 | /** |
2cf71d2e DJ |
1724 | * velocity_free_tx_buf - free transmit buffer |
1725 | * @vptr: velocity | |
1726 | * @tdinfo: buffer | |
1da177e4 | 1727 | * |
2cf71d2e DJ |
1728 | * Release an transmit buffer. If the buffer was preallocated then |
1729 | * recycle it, if not then unmap the buffer. | |
1da177e4 | 1730 | */ |
c79992fd SK |
1731 | static void velocity_free_tx_buf(struct velocity_info *vptr, |
1732 | struct velocity_td_info *tdinfo, struct tx_desc *td) | |
1da177e4 | 1733 | { |
2cf71d2e | 1734 | struct sk_buff *skb = tdinfo->skb; |
28133176 | 1735 | |
2cf71d2e DJ |
1736 | /* |
1737 | * Don't unmap the pre-allocated tx_bufs | |
1738 | */ | |
1739 | if (tdinfo->skb_dma) { | |
c79992fd | 1740 | int i; |
1da177e4 | 1741 | |
2cf71d2e | 1742 | for (i = 0; i < tdinfo->nskb_dma; i++) { |
c79992fd SK |
1743 | size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN); |
1744 | ||
1745 | /* For scatter-gather */ | |
1746 | if (skb_shinfo(skb)->nr_frags > 0) | |
1747 | pktlen = max_t(size_t, pktlen, | |
1748 | td->td_buf[i].size & ~TD_QUEUE); | |
1749 | ||
e2c41f14 TP |
1750 | dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], |
1751 | le16_to_cpu(pktlen), DMA_TO_DEVICE); | |
2cf71d2e | 1752 | } |
1da177e4 | 1753 | } |
2cf71d2e DJ |
1754 | dev_kfree_skb_irq(skb); |
1755 | tdinfo->skb = NULL; | |
1da177e4 LT |
1756 | } |
1757 | ||
2cf71d2e DJ |
1758 | /* |
1759 | * FIXME: could we merge this with velocity_free_tx_buf ? | |
1760 | */ | |
1761 | static void velocity_free_td_ring_entry(struct velocity_info *vptr, | |
1762 | int q, int n) | |
1da177e4 | 1763 | { |
2cf71d2e DJ |
1764 | struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); |
1765 | int i; | |
bd7b3f34 | 1766 | |
2cf71d2e DJ |
1767 | if (td_info == NULL) |
1768 | return; | |
3c4dc711 | 1769 | |
2cf71d2e DJ |
1770 | if (td_info->skb) { |
1771 | for (i = 0; i < td_info->nskb_dma; i++) { | |
1772 | if (td_info->skb_dma[i]) { | |
e2c41f14 TP |
1773 | dma_unmap_single(vptr->dev, td_info->skb_dma[i], |
1774 | td_info->skb->len, DMA_TO_DEVICE); | |
2cf71d2e DJ |
1775 | td_info->skb_dma[i] = 0; |
1776 | } | |
3c4dc711 | 1777 | } |
2cf71d2e DJ |
1778 | dev_kfree_skb(td_info->skb); |
1779 | td_info->skb = NULL; | |
3c4dc711 | 1780 | } |
1da177e4 LT |
1781 | } |
1782 | ||
1783 | /** | |
2cf71d2e DJ |
1784 | * velocity_free_td_ring - free td ring |
1785 | * @vptr: velocity | |
1da177e4 | 1786 | * |
2cf71d2e DJ |
1787 | * Free up the transmit ring for this particular velocity adapter. |
1788 | * We free the ring contents but not the ring itself. | |
1789 | */ | |
1790 | static void velocity_free_td_ring(struct velocity_info *vptr) | |
1791 | { | |
1792 | int i, j; | |
1da177e4 | 1793 | |
2cf71d2e DJ |
1794 | for (j = 0; j < vptr->tx.numq; j++) { |
1795 | if (vptr->tx.infos[j] == NULL) | |
1796 | continue; | |
1797 | for (i = 0; i < vptr->options.numtx; i++) | |
1798 | velocity_free_td_ring_entry(vptr, j, i); | |
6aa20a22 | 1799 | |
2cf71d2e DJ |
1800 | kfree(vptr->tx.infos[j]); |
1801 | vptr->tx.infos[j] = NULL; | |
1802 | } | |
1803 | } | |
6aa20a22 | 1804 | |
2cf71d2e DJ |
1805 | static void velocity_free_rings(struct velocity_info *vptr) |
1806 | { | |
1807 | velocity_free_td_ring(vptr); | |
1808 | velocity_free_rd_ring(vptr); | |
1809 | velocity_free_dma_rings(vptr); | |
1da177e4 LT |
1810 | } |
1811 | ||
1812 | /** | |
2cf71d2e DJ |
1813 | * velocity_error - handle error from controller |
1814 | * @vptr: velocity | |
1815 | * @status: card status | |
1816 | * | |
1817 | * Process an error report from the hardware and attempt to recover | |
1818 | * the card itself. At the moment we cannot recover from some | |
1819 | * theoretically impossible errors but this could be fixed using | |
1820 | * the pci_device_failed logic to bounce the hardware | |
1da177e4 | 1821 | * |
1da177e4 | 1822 | */ |
2cf71d2e | 1823 | static void velocity_error(struct velocity_info *vptr, int status) |
1da177e4 | 1824 | { |
580a6902 | 1825 | |
2cf71d2e DJ |
1826 | if (status & ISR_TXSTLI) { |
1827 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1da177e4 | 1828 | |
2cf71d2e DJ |
1829 | printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); |
1830 | BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); | |
1831 | writew(TRDCSR_RUN, ®s->TDCSRClr); | |
a9683c94 | 1832 | netif_stop_queue(vptr->netdev); |
1da177e4 | 1833 | |
2cf71d2e DJ |
1834 | /* FIXME: port over the pci_device_failed code and use it |
1835 | here */ | |
1836 | } | |
1da177e4 | 1837 | |
2cf71d2e DJ |
1838 | if (status & ISR_SRCI) { |
1839 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
1840 | int linked; | |
1da177e4 | 1841 | |
2cf71d2e DJ |
1842 | if (vptr->options.spd_dpx == SPD_DPX_AUTO) { |
1843 | vptr->mii_status = check_connection_type(regs); | |
1da177e4 | 1844 | |
2cf71d2e DJ |
1845 | /* |
1846 | * If it is a 3119, disable frame bursting in | |
1847 | * halfduplex mode and enable it in fullduplex | |
1848 | * mode | |
1849 | */ | |
1850 | if (vptr->rev_id < REV_ID_VT3216_A0) { | |
0527a1a8 | 1851 | if (vptr->mii_status & VELOCITY_DUPLEX_FULL) |
2cf71d2e DJ |
1852 | BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); |
1853 | else | |
1854 | BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); | |
1855 | } | |
1856 | /* | |
1857 | * Only enable CD heart beat counter in 10HD mode | |
1858 | */ | |
1859 | if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) | |
1860 | BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); | |
1861 | else | |
1862 | BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); | |
6dfc4b95 SK |
1863 | |
1864 | setup_queue_timers(vptr); | |
2cf71d2e DJ |
1865 | } |
1866 | /* | |
1867 | * Get link status from PHYSR0 | |
1868 | */ | |
1869 | linked = readb(®s->PHYSR0) & PHYSR0_LINKGD; | |
1da177e4 | 1870 | |
2cf71d2e DJ |
1871 | if (linked) { |
1872 | vptr->mii_status &= ~VELOCITY_LINK_FAIL; | |
a9683c94 | 1873 | netif_carrier_on(vptr->netdev); |
2cf71d2e DJ |
1874 | } else { |
1875 | vptr->mii_status |= VELOCITY_LINK_FAIL; | |
a9683c94 | 1876 | netif_carrier_off(vptr->netdev); |
2cf71d2e | 1877 | } |
1da177e4 | 1878 | |
2cf71d2e DJ |
1879 | velocity_print_link_status(vptr); |
1880 | enable_flow_control_ability(vptr); | |
1da177e4 | 1881 | |
2cf71d2e DJ |
1882 | /* |
1883 | * Re-enable auto-polling because SRCI will disable | |
1884 | * auto-polling | |
1885 | */ | |
1da177e4 | 1886 | |
2cf71d2e | 1887 | enable_mii_autopoll(regs); |
1da177e4 | 1888 | |
2cf71d2e | 1889 | if (vptr->mii_status & VELOCITY_LINK_FAIL) |
a9683c94 | 1890 | netif_stop_queue(vptr->netdev); |
2cf71d2e | 1891 | else |
a9683c94 | 1892 | netif_wake_queue(vptr->netdev); |
1da177e4 | 1893 | |
6403eab1 | 1894 | } |
2cf71d2e DJ |
1895 | if (status & ISR_MIBFI) |
1896 | velocity_update_hw_mibs(vptr); | |
1897 | if (status & ISR_LSTEI) | |
1898 | mac_rx_queue_wake(vptr->mac_regs); | |
1da177e4 LT |
1899 | } |
1900 | ||
1901 | /** | |
2cf71d2e DJ |
1902 | * tx_srv - transmit interrupt service |
1903 | * @vptr; Velocity | |
1da177e4 | 1904 | * |
2cf71d2e DJ |
1905 | * Scan the queues looking for transmitted packets that |
1906 | * we can complete and clean up. Update any statistics as | |
1907 | * necessary/ | |
1da177e4 | 1908 | */ |
d6cade0f | 1909 | static int velocity_tx_srv(struct velocity_info *vptr) |
1da177e4 | 1910 | { |
2cf71d2e DJ |
1911 | struct tx_desc *td; |
1912 | int qnum; | |
1913 | int full = 0; | |
1914 | int idx; | |
1915 | int works = 0; | |
1916 | struct velocity_td_info *tdinfo; | |
a9683c94 | 1917 | struct net_device_stats *stats = &vptr->netdev->stats; |
1da177e4 | 1918 | |
2cf71d2e DJ |
1919 | for (qnum = 0; qnum < vptr->tx.numq; qnum++) { |
1920 | for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; | |
1921 | idx = (idx + 1) % vptr->options.numtx) { | |
1da177e4 | 1922 | |
2cf71d2e DJ |
1923 | /* |
1924 | * Get Tx Descriptor | |
1925 | */ | |
1926 | td = &(vptr->tx.rings[qnum][idx]); | |
1927 | tdinfo = &(vptr->tx.infos[qnum][idx]); | |
1da177e4 | 1928 | |
2cf71d2e DJ |
1929 | if (td->tdesc0.len & OWNED_BY_NIC) |
1930 | break; | |
1da177e4 | 1931 | |
2cf71d2e DJ |
1932 | if ((works++ > 15)) |
1933 | break; | |
1934 | ||
1935 | if (td->tdesc0.TSR & TSR0_TERR) { | |
1936 | stats->tx_errors++; | |
1937 | stats->tx_dropped++; | |
1938 | if (td->tdesc0.TSR & TSR0_CDH) | |
1939 | stats->tx_heartbeat_errors++; | |
1940 | if (td->tdesc0.TSR & TSR0_CRS) | |
1941 | stats->tx_carrier_errors++; | |
1942 | if (td->tdesc0.TSR & TSR0_ABT) | |
1943 | stats->tx_aborted_errors++; | |
1944 | if (td->tdesc0.TSR & TSR0_OWC) | |
1945 | stats->tx_window_errors++; | |
1946 | } else { | |
1947 | stats->tx_packets++; | |
1948 | stats->tx_bytes += tdinfo->skb->len; | |
1949 | } | |
c79992fd | 1950 | velocity_free_tx_buf(vptr, tdinfo, td); |
2cf71d2e DJ |
1951 | vptr->tx.used[qnum]--; |
1952 | } | |
1953 | vptr->tx.tail[qnum] = idx; | |
1da177e4 | 1954 | |
2cf71d2e DJ |
1955 | if (AVAIL_TD(vptr, qnum) < 1) |
1956 | full = 1; | |
1957 | } | |
1da177e4 | 1958 | /* |
2cf71d2e DJ |
1959 | * Look to see if we should kick the transmit network |
1960 | * layer for more work. | |
1da177e4 | 1961 | */ |
a9683c94 | 1962 | if (netif_queue_stopped(vptr->netdev) && (full == 0) && |
8e95a202 | 1963 | (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { |
a9683c94 | 1964 | netif_wake_queue(vptr->netdev); |
2cf71d2e DJ |
1965 | } |
1966 | return works; | |
1967 | } | |
1968 | ||
1969 | /** | |
1970 | * velocity_rx_csum - checksum process | |
1971 | * @rd: receive packet descriptor | |
1972 | * @skb: network layer packet buffer | |
1973 | * | |
1974 | * Process the status bits for the received packet and determine | |
1975 | * if the checksum was computed and verified by the hardware | |
1976 | */ | |
1977 | static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) | |
1978 | { | |
bc8acf2c | 1979 | skb_checksum_none_assert(skb); |
6aa20a22 | 1980 | |
2cf71d2e DJ |
1981 | if (rd->rdesc1.CSM & CSM_IPKT) { |
1982 | if (rd->rdesc1.CSM & CSM_IPOK) { | |
1983 | if ((rd->rdesc1.CSM & CSM_TCPKT) || | |
1984 | (rd->rdesc1.CSM & CSM_UDPKT)) { | |
1985 | if (!(rd->rdesc1.CSM & CSM_TUPOK)) | |
1986 | return; | |
1987 | } | |
1988 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1da177e4 LT |
1989 | } |
1990 | } | |
1da177e4 LT |
1991 | } |
1992 | ||
1da177e4 | 1993 | /** |
2cf71d2e DJ |
1994 | * velocity_rx_copy - in place Rx copy for small packets |
1995 | * @rx_skb: network layer packet buffer candidate | |
1996 | * @pkt_size: received data size | |
1997 | * @rd: receive packet descriptor | |
1da177e4 LT |
1998 | * @dev: network device |
1999 | * | |
2cf71d2e | 2000 | * Replace the current skb that is scheduled for Rx processing by a |
25985edc | 2001 | * shorter, immediately allocated skb, if the received packet is small |
2cf71d2e DJ |
2002 | * enough. This function returns a negative value if the received |
2003 | * packet is too big or if memory is exhausted. | |
1da177e4 | 2004 | */ |
2cf71d2e DJ |
2005 | static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, |
2006 | struct velocity_info *vptr) | |
1da177e4 | 2007 | { |
2cf71d2e DJ |
2008 | int ret = -1; |
2009 | if (pkt_size < rx_copybreak) { | |
2010 | struct sk_buff *new_skb; | |
1da177e4 | 2011 | |
a9683c94 | 2012 | new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); |
2cf71d2e DJ |
2013 | if (new_skb) { |
2014 | new_skb->ip_summed = rx_skb[0]->ip_summed; | |
2cf71d2e DJ |
2015 | skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); |
2016 | *rx_skb = new_skb; | |
2017 | ret = 0; | |
1da177e4 LT |
2018 | } |
2019 | ||
1da177e4 | 2020 | } |
2cf71d2e | 2021 | return ret; |
1da177e4 LT |
2022 | } |
2023 | ||
2024 | /** | |
2cf71d2e DJ |
2025 | * velocity_iph_realign - IP header alignment |
2026 | * @vptr: velocity we are handling | |
2027 | * @skb: network layer packet buffer | |
2028 | * @pkt_size: received data size | |
1da177e4 | 2029 | * |
2cf71d2e DJ |
2030 | * Align IP header on a 2 bytes boundary. This behavior can be |
2031 | * configured by the user. | |
1da177e4 | 2032 | */ |
2cf71d2e DJ |
2033 | static inline void velocity_iph_realign(struct velocity_info *vptr, |
2034 | struct sk_buff *skb, int pkt_size) | |
1da177e4 | 2035 | { |
2cf71d2e DJ |
2036 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { |
2037 | memmove(skb->data + 2, skb->data, pkt_size); | |
2038 | skb_reserve(skb, 2); | |
2039 | } | |
2040 | } | |
6aa20a22 | 2041 | |
2cf71d2e DJ |
2042 | /** |
2043 | * velocity_receive_frame - received packet processor | |
2044 | * @vptr: velocity we are handling | |
2045 | * @idx: ring index | |
2046 | * | |
2047 | * A packet has arrived. We process the packet and if appropriate | |
2048 | * pass the frame up the network stack | |
2049 | */ | |
2050 | static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |
2051 | { | |
a9683c94 | 2052 | struct net_device_stats *stats = &vptr->netdev->stats; |
2cf71d2e DJ |
2053 | struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); |
2054 | struct rx_desc *rd = &(vptr->rx.ring[idx]); | |
2055 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; | |
2056 | struct sk_buff *skb; | |
1da177e4 | 2057 | |
2cf71d2e | 2058 | if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { |
a9683c94 | 2059 | VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name); |
2cf71d2e DJ |
2060 | stats->rx_length_errors++; |
2061 | return -EINVAL; | |
2062 | } | |
1da177e4 | 2063 | |
2cf71d2e DJ |
2064 | if (rd->rdesc0.RSR & RSR_MAR) |
2065 | stats->multicast++; | |
1da177e4 | 2066 | |
2cf71d2e | 2067 | skb = rd_info->skb; |
1da177e4 | 2068 | |
e2c41f14 TP |
2069 | dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, |
2070 | vptr->rx.buf_sz, DMA_FROM_DEVICE); | |
1da177e4 | 2071 | |
2cf71d2e DJ |
2072 | /* |
2073 | * Drop frame not meeting IEEE 802.3 | |
2074 | */ | |
1da177e4 | 2075 | |
2cf71d2e DJ |
2076 | if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { |
2077 | if (rd->rdesc0.RSR & RSR_RL) { | |
2078 | stats->rx_length_errors++; | |
2079 | return -EINVAL; | |
2080 | } | |
2081 | } | |
6aa20a22 | 2082 | |
2cf71d2e | 2083 | velocity_rx_csum(rd, skb); |
6aa20a22 | 2084 | |
2cf71d2e DJ |
2085 | if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { |
2086 | velocity_iph_realign(vptr, skb, pkt_len); | |
2cf71d2e | 2087 | rd_info->skb = NULL; |
e2c41f14 TP |
2088 | dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, |
2089 | DMA_FROM_DEVICE); | |
2090 | } else { | |
2091 | dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, | |
2092 | vptr->rx.buf_sz, DMA_FROM_DEVICE); | |
2cf71d2e | 2093 | } |
6aa20a22 | 2094 | |
2cf71d2e | 2095 | skb_put(skb, pkt_len - 4); |
a9683c94 | 2096 | skb->protocol = eth_type_trans(skb, vptr->netdev); |
2cf71d2e | 2097 | |
73b54688 JP |
2098 | if (rd->rdesc0.RSR & RSR_DETAG) { |
2099 | u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); | |
2100 | ||
86a9bad3 | 2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
73b54688 | 2102 | } |
2fdac010 | 2103 | netif_receive_skb(skb); |
6aa20a22 | 2104 | |
2cf71d2e | 2105 | stats->rx_bytes += pkt_len; |
3cb7a798 | 2106 | stats->rx_packets++; |
6aa20a22 | 2107 | |
2cf71d2e | 2108 | return 0; |
1da177e4 LT |
2109 | } |
2110 | ||
1da177e4 | 2111 | /** |
2cf71d2e DJ |
2112 | * velocity_rx_srv - service RX interrupt |
2113 | * @vptr: velocity | |
1da177e4 | 2114 | * |
2cf71d2e DJ |
2115 | * Walk the receive ring of the velocity adapter and remove |
2116 | * any received packets from the receive queue. Hand the ring | |
2117 | * slots back to the adapter for reuse. | |
1da177e4 | 2118 | */ |
d6cade0f | 2119 | static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) |
1da177e4 | 2120 | { |
a9683c94 | 2121 | struct net_device_stats *stats = &vptr->netdev->stats; |
2cf71d2e DJ |
2122 | int rd_curr = vptr->rx.curr; |
2123 | int works = 0; | |
2124 | ||
dfff7144 | 2125 | while (works < budget_left) { |
2cf71d2e DJ |
2126 | struct rx_desc *rd = vptr->rx.ring + rd_curr; |
2127 | ||
2128 | if (!vptr->rx.info[rd_curr].skb) | |
2129 | break; | |
2130 | ||
2131 | if (rd->rdesc0.len & OWNED_BY_NIC) | |
2132 | break; | |
2133 | ||
2134 | rmb(); | |
1da177e4 | 2135 | |
2cf71d2e DJ |
2136 | /* |
2137 | * Don't drop CE or RL error frame although RXOK is off | |
2138 | */ | |
2139 | if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { | |
2140 | if (velocity_receive_frame(vptr, rd_curr) < 0) | |
2141 | stats->rx_dropped++; | |
2142 | } else { | |
2143 | if (rd->rdesc0.RSR & RSR_CRC) | |
2144 | stats->rx_crc_errors++; | |
2145 | if (rd->rdesc0.RSR & RSR_FAE) | |
2146 | stats->rx_frame_errors++; | |
1da177e4 | 2147 | |
2cf71d2e DJ |
2148 | stats->rx_dropped++; |
2149 | } | |
6aa20a22 | 2150 | |
2cf71d2e | 2151 | rd->size |= RX_INTEN; |
1da177e4 | 2152 | |
2cf71d2e DJ |
2153 | rd_curr++; |
2154 | if (rd_curr >= vptr->options.numrx) | |
2155 | rd_curr = 0; | |
dfff7144 SK |
2156 | works++; |
2157 | } | |
1da177e4 | 2158 | |
2cf71d2e | 2159 | vptr->rx.curr = rd_curr; |
1da177e4 | 2160 | |
2cf71d2e DJ |
2161 | if ((works > 0) && (velocity_rx_refill(vptr) > 0)) |
2162 | velocity_give_many_rx_descs(vptr); | |
2163 | ||
2164 | VAR_USED(stats); | |
2165 | return works; | |
2166 | } | |
6aa20a22 | 2167 | |
dfff7144 SK |
2168 | static int velocity_poll(struct napi_struct *napi, int budget) |
2169 | { | |
2170 | struct velocity_info *vptr = container_of(napi, | |
2171 | struct velocity_info, napi); | |
2172 | unsigned int rx_done; | |
3f2e8d9f | 2173 | unsigned long flags; |
dfff7144 SK |
2174 | |
2175 | /* | |
2176 | * Do rx and tx twice for performance (taken from the VIA | |
2177 | * out-of-tree driver). | |
2178 | */ | |
bc9627e7 | 2179 | rx_done = velocity_rx_srv(vptr, budget); |
2180 | spin_lock_irqsave(&vptr->lock, flags); | |
d6cade0f | 2181 | velocity_tx_srv(vptr); |
dfff7144 SK |
2182 | /* If budget not fully consumed, exit the polling mode */ |
2183 | if (rx_done < budget) { | |
2184 | napi_complete(napi); | |
2185 | mac_enable_int(vptr->mac_regs); | |
2186 | } | |
3f2e8d9f | 2187 | spin_unlock_irqrestore(&vptr->lock, flags); |
dfff7144 SK |
2188 | |
2189 | return rx_done; | |
2190 | } | |
6aa20a22 | 2191 | |
1da177e4 | 2192 | /** |
2cf71d2e DJ |
2193 | * velocity_intr - interrupt callback |
2194 | * @irq: interrupt number | |
2195 | * @dev_instance: interrupting device | |
1da177e4 | 2196 | * |
2cf71d2e DJ |
2197 | * Called whenever an interrupt is generated by the velocity |
2198 | * adapter IRQ line. We may not be the source of the interrupt | |
2199 | * and need to identify initially if we are, and if not exit as | |
2200 | * efficiently as possible. | |
1da177e4 | 2201 | */ |
2cf71d2e | 2202 | static irqreturn_t velocity_intr(int irq, void *dev_instance) |
1da177e4 | 2203 | { |
2cf71d2e DJ |
2204 | struct net_device *dev = dev_instance; |
2205 | struct velocity_info *vptr = netdev_priv(dev); | |
2206 | u32 isr_status; | |
1da177e4 | 2207 | |
2cf71d2e DJ |
2208 | spin_lock(&vptr->lock); |
2209 | isr_status = mac_read_isr(vptr->mac_regs); | |
2210 | ||
2211 | /* Not us ? */ | |
2212 | if (isr_status == 0) { | |
2213 | spin_unlock(&vptr->lock); | |
2214 | return IRQ_NONE; | |
1da177e4 | 2215 | } |
1da177e4 | 2216 | |
3f2e8d9f SK |
2217 | /* Ack the interrupt */ |
2218 | mac_write_isr(vptr->mac_regs, isr_status); | |
2219 | ||
dfff7144 SK |
2220 | if (likely(napi_schedule_prep(&vptr->napi))) { |
2221 | mac_disable_int(vptr->mac_regs); | |
2222 | __napi_schedule(&vptr->napi); | |
1da177e4 | 2223 | } |
3f2e8d9f SK |
2224 | |
2225 | if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) | |
2226 | velocity_error(vptr, isr_status); | |
2227 | ||
2cf71d2e | 2228 | spin_unlock(&vptr->lock); |
2cf71d2e | 2229 | |
dfff7144 | 2230 | return IRQ_HANDLED; |
1da177e4 LT |
2231 | } |
2232 | ||
2233 | /** | |
2cf71d2e DJ |
2234 | * velocity_open - interface activation callback |
2235 | * @dev: network layer device to open | |
1da177e4 | 2236 | * |
2cf71d2e DJ |
2237 | * Called when the network layer brings the interface up. Returns |
2238 | * a negative posix error code on failure, or zero on success. | |
2239 | * | |
2240 | * All the ring allocation and set up is done on open for this | |
2241 | * adapter to minimise memory usage when inactive | |
1da177e4 | 2242 | */ |
2cf71d2e | 2243 | static int velocity_open(struct net_device *dev) |
1da177e4 | 2244 | { |
2cf71d2e DJ |
2245 | struct velocity_info *vptr = netdev_priv(dev); |
2246 | int ret; | |
1da177e4 | 2247 | |
2cf71d2e DJ |
2248 | ret = velocity_init_rings(vptr, dev->mtu); |
2249 | if (ret < 0) | |
2250 | goto out; | |
1da177e4 | 2251 | |
2cf71d2e | 2252 | /* Ensure chip is running */ |
6dffbe53 | 2253 | velocity_set_power_state(vptr, PCI_D0); |
1da177e4 | 2254 | |
2cf71d2e DJ |
2255 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); |
2256 | ||
6dffbe53 | 2257 | ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED, |
2cf71d2e DJ |
2258 | dev->name, dev); |
2259 | if (ret < 0) { | |
2260 | /* Power down the chip */ | |
6dffbe53 | 2261 | velocity_set_power_state(vptr, PCI_D3hot); |
2cf71d2e DJ |
2262 | velocity_free_rings(vptr); |
2263 | goto out; | |
1da177e4 LT |
2264 | } |
2265 | ||
35bb5cad BH |
2266 | velocity_give_many_rx_descs(vptr); |
2267 | ||
2cf71d2e DJ |
2268 | mac_enable_int(vptr->mac_regs); |
2269 | netif_start_queue(dev); | |
dfff7144 | 2270 | napi_enable(&vptr->napi); |
2cf71d2e DJ |
2271 | vptr->flags |= VELOCITY_FLAGS_OPENED; |
2272 | out: | |
2273 | return ret; | |
1da177e4 LT |
2274 | } |
2275 | ||
2276 | /** | |
2cf71d2e DJ |
2277 | * velocity_shutdown - shut down the chip |
2278 | * @vptr: velocity to deactivate | |
1da177e4 | 2279 | * |
2cf71d2e DJ |
2280 | * Shuts down the internal operations of the velocity and |
2281 | * disables interrupts, autopolling, transmit and receive | |
1da177e4 | 2282 | */ |
2cf71d2e | 2283 | static void velocity_shutdown(struct velocity_info *vptr) |
1da177e4 | 2284 | { |
2cf71d2e DJ |
2285 | struct mac_regs __iomem *regs = vptr->mac_regs; |
2286 | mac_disable_int(regs); | |
2287 | writel(CR0_STOP, ®s->CR0Set); | |
2288 | writew(0xFFFF, ®s->TDCSRClr); | |
2289 | writeb(0xFF, ®s->RDCSRClr); | |
1da177e4 | 2290 | safe_disable_mii_autopoll(regs); |
2cf71d2e DJ |
2291 | mac_clear_isr(regs); |
2292 | } | |
1da177e4 | 2293 | |
2cf71d2e DJ |
2294 | /** |
2295 | * velocity_change_mtu - MTU change callback | |
2296 | * @dev: network device | |
2297 | * @new_mtu: desired MTU | |
2298 | * | |
2299 | * Handle requests from the networking layer for MTU change on | |
2300 | * this interface. It gets called on a change by the network layer. | |
2301 | * Return zero for success or negative posix error code. | |
2302 | */ | |
2303 | static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |
2304 | { | |
2305 | struct velocity_info *vptr = netdev_priv(dev); | |
2306 | int ret = 0; | |
1da177e4 | 2307 | |
2cf71d2e DJ |
2308 | if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { |
2309 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", | |
a9683c94 | 2310 | vptr->netdev->name); |
2cf71d2e DJ |
2311 | ret = -EINVAL; |
2312 | goto out_0; | |
2313 | } | |
1da177e4 | 2314 | |
2cf71d2e DJ |
2315 | if (!netif_running(dev)) { |
2316 | dev->mtu = new_mtu; | |
2317 | goto out_0; | |
1da177e4 LT |
2318 | } |
2319 | ||
2cf71d2e DJ |
2320 | if (dev->mtu != new_mtu) { |
2321 | struct velocity_info *tmp_vptr; | |
2322 | unsigned long flags; | |
2323 | struct rx_info rx; | |
2324 | struct tx_info tx; | |
1da177e4 | 2325 | |
2cf71d2e DJ |
2326 | tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL); |
2327 | if (!tmp_vptr) { | |
2328 | ret = -ENOMEM; | |
2329 | goto out_0; | |
2330 | } | |
1da177e4 | 2331 | |
a9683c94 | 2332 | tmp_vptr->netdev = dev; |
2cf71d2e | 2333 | tmp_vptr->pdev = vptr->pdev; |
6dffbe53 | 2334 | tmp_vptr->dev = vptr->dev; |
2cf71d2e DJ |
2335 | tmp_vptr->options = vptr->options; |
2336 | tmp_vptr->tx.numq = vptr->tx.numq; | |
6aa20a22 | 2337 | |
2cf71d2e DJ |
2338 | ret = velocity_init_rings(tmp_vptr, new_mtu); |
2339 | if (ret < 0) | |
2340 | goto out_free_tmp_vptr_1; | |
1da177e4 | 2341 | |
bc9627e7 | 2342 | napi_disable(&vptr->napi); |
2343 | ||
2cf71d2e | 2344 | spin_lock_irqsave(&vptr->lock, flags); |
1da177e4 | 2345 | |
2cf71d2e DJ |
2346 | netif_stop_queue(dev); |
2347 | velocity_shutdown(vptr); | |
1da177e4 | 2348 | |
2cf71d2e DJ |
2349 | rx = vptr->rx; |
2350 | tx = vptr->tx; | |
1da177e4 | 2351 | |
2cf71d2e DJ |
2352 | vptr->rx = tmp_vptr->rx; |
2353 | vptr->tx = tmp_vptr->tx; | |
1da177e4 | 2354 | |
2cf71d2e DJ |
2355 | tmp_vptr->rx = rx; |
2356 | tmp_vptr->tx = tx; | |
1da177e4 | 2357 | |
2cf71d2e | 2358 | dev->mtu = new_mtu; |
6aa20a22 | 2359 | |
2cf71d2e | 2360 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); |
1da177e4 | 2361 | |
35bb5cad BH |
2362 | velocity_give_many_rx_descs(vptr); |
2363 | ||
bc9627e7 | 2364 | napi_enable(&vptr->napi); |
2365 | ||
2cf71d2e DJ |
2366 | mac_enable_int(vptr->mac_regs); |
2367 | netif_start_queue(dev); | |
6aa20a22 | 2368 | |
2cf71d2e | 2369 | spin_unlock_irqrestore(&vptr->lock, flags); |
1da177e4 | 2370 | |
2cf71d2e | 2371 | velocity_free_rings(tmp_vptr); |
1da177e4 | 2372 | |
2cf71d2e DJ |
2373 | out_free_tmp_vptr_1: |
2374 | kfree(tmp_vptr); | |
2375 | } | |
2376 | out_0: | |
2377 | return ret; | |
1da177e4 | 2378 | } |
1da177e4 | 2379 | |
0887a576 AU |
2380 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2381 | /** | |
2382 | * velocity_poll_controller - Velocity Poll controller function | |
2383 | * @dev: network device | |
2384 | * | |
2385 | * | |
2386 | * Used by NETCONSOLE and other diagnostic tools to allow network I/P | |
2387 | * with interrupts disabled. | |
2388 | */ | |
2389 | static void velocity_poll_controller(struct net_device *dev) | |
2390 | { | |
2391 | disable_irq(dev->irq); | |
2392 | velocity_intr(dev->irq, dev); | |
2393 | enable_irq(dev->irq); | |
2394 | } | |
2395 | #endif | |
2396 | ||
1da177e4 | 2397 | /** |
2cf71d2e DJ |
2398 | * velocity_mii_ioctl - MII ioctl handler |
2399 | * @dev: network device | |
2400 | * @ifr: the ifreq block for the ioctl | |
2401 | * @cmd: the command | |
1da177e4 | 2402 | * |
2cf71d2e DJ |
2403 | * Process MII requests made via ioctl from the network layer. These |
2404 | * are used by tools like kudzu to interrogate the link state of the | |
2405 | * hardware | |
1da177e4 | 2406 | */ |
2cf71d2e | 2407 | static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
1da177e4 | 2408 | { |
2cf71d2e DJ |
2409 | struct velocity_info *vptr = netdev_priv(dev); |
2410 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
2411 | unsigned long flags; | |
2412 | struct mii_ioctl_data *miidata = if_mii(ifr); | |
2413 | int err; | |
1da177e4 | 2414 | |
2cf71d2e DJ |
2415 | switch (cmd) { |
2416 | case SIOCGMIIPHY: | |
2417 | miidata->phy_id = readb(®s->MIIADR) & 0x1f; | |
1da177e4 | 2418 | break; |
2cf71d2e | 2419 | case SIOCGMIIREG: |
2cf71d2e DJ |
2420 | if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) |
2421 | return -ETIMEDOUT; | |
1da177e4 | 2422 | break; |
2cf71d2e | 2423 | case SIOCSMIIREG: |
2cf71d2e DJ |
2424 | spin_lock_irqsave(&vptr->lock, flags); |
2425 | err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); | |
2426 | spin_unlock_irqrestore(&vptr->lock, flags); | |
2427 | check_connection_type(vptr->mac_regs); | |
2428 | if (err) | |
2429 | return err; | |
1da177e4 LT |
2430 | break; |
2431 | default: | |
2cf71d2e | 2432 | return -EOPNOTSUPP; |
1da177e4 | 2433 | } |
2cf71d2e | 2434 | return 0; |
1da177e4 LT |
2435 | } |
2436 | ||
2437 | /** | |
2cf71d2e DJ |
2438 | * velocity_ioctl - ioctl entry point |
2439 | * @dev: network device | |
2440 | * @rq: interface request ioctl | |
2441 | * @cmd: command code | |
1da177e4 | 2442 | * |
2cf71d2e DJ |
2443 | * Called when the user issues an ioctl request to the network |
2444 | * device in question. The velocity interface supports MII. | |
1da177e4 | 2445 | */ |
2cf71d2e DJ |
2446 | static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2447 | { | |
2448 | struct velocity_info *vptr = netdev_priv(dev); | |
2449 | int ret; | |
6aa20a22 | 2450 | |
2cf71d2e DJ |
2451 | /* If we are asked for information and the device is power |
2452 | saving then we need to bring the device back up to talk to it */ | |
1da177e4 | 2453 | |
2cf71d2e | 2454 | if (!netif_running(dev)) |
6dffbe53 | 2455 | velocity_set_power_state(vptr, PCI_D0); |
1da177e4 | 2456 | |
2cf71d2e DJ |
2457 | switch (cmd) { |
2458 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | |
2459 | case SIOCGMIIREG: /* Read MII PHY register. */ | |
2460 | case SIOCSMIIREG: /* Write to MII PHY register. */ | |
2461 | ret = velocity_mii_ioctl(dev, rq, cmd); | |
2462 | break; | |
1da177e4 | 2463 | |
2cf71d2e DJ |
2464 | default: |
2465 | ret = -EOPNOTSUPP; | |
2466 | } | |
2467 | if (!netif_running(dev)) | |
6dffbe53 | 2468 | velocity_set_power_state(vptr, PCI_D3hot); |
1da177e4 | 2469 | |
c4067400 | 2470 | |
2cf71d2e | 2471 | return ret; |
1da177e4 LT |
2472 | } |
2473 | ||
2474 | /** | |
2cf71d2e DJ |
2475 | * velocity_get_status - statistics callback |
2476 | * @dev: network device | |
1da177e4 | 2477 | * |
2cf71d2e DJ |
2478 | * Callback from the network layer to allow driver statistics |
2479 | * to be resynchronized with hardware collected state. In the | |
2480 | * case of the velocity we need to pull the MIB counters from | |
2481 | * the hardware into the counters before letting the network | |
2482 | * layer display them. | |
1da177e4 | 2483 | */ |
2cf71d2e | 2484 | static struct net_device_stats *velocity_get_stats(struct net_device *dev) |
1da177e4 | 2485 | { |
2cf71d2e | 2486 | struct velocity_info *vptr = netdev_priv(dev); |
1da177e4 | 2487 | |
2cf71d2e DJ |
2488 | /* If the hardware is down, don't touch MII */ |
2489 | if (!netif_running(dev)) | |
2490 | return &dev->stats; | |
1da177e4 | 2491 | |
2cf71d2e DJ |
2492 | spin_lock_irq(&vptr->lock); |
2493 | velocity_update_hw_mibs(vptr); | |
2494 | spin_unlock_irq(&vptr->lock); | |
1da177e4 | 2495 | |
2cf71d2e DJ |
2496 | dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; |
2497 | dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; | |
2498 | dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; | |
1da177e4 | 2499 | |
2cf71d2e DJ |
2500 | // unsigned long rx_dropped; /* no space in linux buffers */ |
2501 | dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; | |
2502 | /* detailed rx_errors: */ | |
2503 | // unsigned long rx_length_errors; | |
2504 | // unsigned long rx_over_errors; /* receiver ring buff overflow */ | |
2505 | dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; | |
2506 | // unsigned long rx_frame_errors; /* recv'd frame alignment error */ | |
2507 | // unsigned long rx_fifo_errors; /* recv'r fifo overrun */ | |
2508 | // unsigned long rx_missed_errors; /* receiver missed packet */ | |
2509 | ||
2510 | /* detailed tx_errors */ | |
2511 | // unsigned long tx_fifo_errors; | |
2512 | ||
2513 | return &dev->stats; | |
1da177e4 LT |
2514 | } |
2515 | ||
2cf71d2e DJ |
2516 | /** |
2517 | * velocity_close - close adapter callback | |
2518 | * @dev: network device | |
2519 | * | |
2520 | * Callback from the network layer when the velocity is being | |
2521 | * deactivated by the network layer | |
2522 | */ | |
2523 | static int velocity_close(struct net_device *dev) | |
1da177e4 | 2524 | { |
2cf71d2e | 2525 | struct velocity_info *vptr = netdev_priv(dev); |
1da177e4 | 2526 | |
dfff7144 | 2527 | napi_disable(&vptr->napi); |
2cf71d2e DJ |
2528 | netif_stop_queue(dev); |
2529 | velocity_shutdown(vptr); | |
1da177e4 | 2530 | |
2cf71d2e DJ |
2531 | if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) |
2532 | velocity_get_ip(vptr); | |
dfda3578 | 2533 | |
6dffbe53 | 2534 | free_irq(dev->irq, dev); |
1da177e4 | 2535 | |
2cf71d2e | 2536 | velocity_free_rings(vptr); |
1da177e4 | 2537 | |
2cf71d2e DJ |
2538 | vptr->flags &= (~VELOCITY_FLAGS_OPENED); |
2539 | return 0; | |
1da177e4 LT |
2540 | } |
2541 | ||
2542 | /** | |
2cf71d2e DJ |
2543 | * velocity_xmit - transmit packet callback |
2544 | * @skb: buffer to transmit | |
2545 | * @dev: network device | |
1da177e4 | 2546 | * |
2cf71d2e DJ |
2547 | * Called by the networ layer to request a packet is queued to |
2548 | * the velocity. Returns zero on success. | |
1da177e4 | 2549 | */ |
61357325 SH |
2550 | static netdev_tx_t velocity_xmit(struct sk_buff *skb, |
2551 | struct net_device *dev) | |
1da177e4 | 2552 | { |
2cf71d2e DJ |
2553 | struct velocity_info *vptr = netdev_priv(dev); |
2554 | int qnum = 0; | |
2555 | struct tx_desc *td_ptr; | |
2556 | struct velocity_td_info *tdinfo; | |
2557 | unsigned long flags; | |
2558 | int pktlen; | |
c79992fd SK |
2559 | int index, prev; |
2560 | int i = 0; | |
1da177e4 | 2561 | |
2cf71d2e DJ |
2562 | if (skb_padto(skb, ETH_ZLEN)) |
2563 | goto out; | |
1da177e4 | 2564 | |
c79992fd SK |
2565 | /* The hardware can handle at most 7 memory segments, so merge |
2566 | * the skb if there are more */ | |
2567 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { | |
001eadf6 | 2568 | dev_kfree_skb_any(skb); |
c79992fd SK |
2569 | return NETDEV_TX_OK; |
2570 | } | |
2571 | ||
2572 | pktlen = skb_shinfo(skb)->nr_frags == 0 ? | |
2573 | max_t(unsigned int, skb->len, ETH_ZLEN) : | |
2574 | skb_headlen(skb); | |
1da177e4 | 2575 | |
2cf71d2e | 2576 | spin_lock_irqsave(&vptr->lock, flags); |
1da177e4 | 2577 | |
2cf71d2e DJ |
2578 | index = vptr->tx.curr[qnum]; |
2579 | td_ptr = &(vptr->tx.rings[qnum][index]); | |
2580 | tdinfo = &(vptr->tx.infos[qnum][index]); | |
1da177e4 | 2581 | |
2cf71d2e DJ |
2582 | td_ptr->tdesc1.TCR = TCR0_TIC; |
2583 | td_ptr->td_buf[0].size &= ~TD_QUEUE; | |
1da177e4 | 2584 | |
2cf71d2e DJ |
2585 | /* |
2586 | * Map the linear network buffer into PCI space and | |
2587 | * add it to the transmit ring. | |
2588 | */ | |
2589 | tdinfo->skb = skb; | |
e2c41f14 TP |
2590 | tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, |
2591 | DMA_TO_DEVICE); | |
c79992fd | 2592 | td_ptr->tdesc0.len = cpu_to_le16(pktlen); |
2cf71d2e DJ |
2593 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); |
2594 | td_ptr->td_buf[0].pa_high = 0; | |
c79992fd SK |
2595 | td_ptr->td_buf[0].size = cpu_to_le16(pktlen); |
2596 | ||
2597 | /* Handle fragments */ | |
2598 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
9e903e08 | 2599 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
c79992fd | 2600 | |
e2c41f14 | 2601 | tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, |
e4cb193f | 2602 | frag, 0, |
9e903e08 | 2603 | skb_frag_size(frag), |
5d6bcdfe | 2604 | DMA_TO_DEVICE); |
c79992fd SK |
2605 | |
2606 | td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | |
2607 | td_ptr->td_buf[i + 1].pa_high = 0; | |
9e903e08 | 2608 | td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag)); |
c79992fd SK |
2609 | } |
2610 | tdinfo->nskb_dma = i + 1; | |
2cf71d2e DJ |
2611 | |
2612 | td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; | |
2613 | ||
eab6d18d | 2614 | if (vlan_tx_tag_present(skb)) { |
2cf71d2e DJ |
2615 | td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); |
2616 | td_ptr->tdesc1.TCR |= TCR0_VETAG; | |
2617 | } | |
2618 | ||
2619 | /* | |
2620 | * Handle hardware checksum | |
2621 | */ | |
f593fe36 | 2622 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2cf71d2e DJ |
2623 | const struct iphdr *ip = ip_hdr(skb); |
2624 | if (ip->protocol == IPPROTO_TCP) | |
2625 | td_ptr->tdesc1.TCR |= TCR0_TCPCK; | |
2626 | else if (ip->protocol == IPPROTO_UDP) | |
2627 | td_ptr->tdesc1.TCR |= (TCR0_UDPCK); | |
2628 | td_ptr->tdesc1.TCR |= TCR0_IPCK; | |
2629 | } | |
1da177e4 | 2630 | |
c79992fd SK |
2631 | prev = index - 1; |
2632 | if (prev < 0) | |
2633 | prev = vptr->options.numtx - 1; | |
2634 | td_ptr->tdesc0.len |= OWNED_BY_NIC; | |
2635 | vptr->tx.used[qnum]++; | |
2636 | vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; | |
1da177e4 | 2637 | |
c79992fd SK |
2638 | if (AVAIL_TD(vptr, qnum) < 1) |
2639 | netif_stop_queue(dev); | |
1da177e4 | 2640 | |
c79992fd SK |
2641 | td_ptr = &(vptr->tx.rings[qnum][prev]); |
2642 | td_ptr->td_buf[0].size |= TD_QUEUE; | |
2643 | mac_tx_queue_wake(vptr->mac_regs, qnum); | |
1da177e4 | 2644 | |
2cf71d2e DJ |
2645 | spin_unlock_irqrestore(&vptr->lock, flags); |
2646 | out: | |
2647 | return NETDEV_TX_OK; | |
1da177e4 LT |
2648 | } |
2649 | ||
2cf71d2e DJ |
2650 | static const struct net_device_ops velocity_netdev_ops = { |
2651 | .ndo_open = velocity_open, | |
2652 | .ndo_stop = velocity_close, | |
2653 | .ndo_start_xmit = velocity_xmit, | |
2654 | .ndo_get_stats = velocity_get_stats, | |
2655 | .ndo_validate_addr = eth_validate_addr, | |
5ae297b0 | 2656 | .ndo_set_mac_address = eth_mac_addr, |
afc4b13d | 2657 | .ndo_set_rx_mode = velocity_set_multi, |
2cf71d2e DJ |
2658 | .ndo_change_mtu = velocity_change_mtu, |
2659 | .ndo_do_ioctl = velocity_ioctl, | |
2660 | .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, | |
2661 | .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, | |
0887a576 AU |
2662 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2663 | .ndo_poll_controller = velocity_poll_controller, | |
2664 | #endif | |
2cf71d2e DJ |
2665 | }; |
2666 | ||
1da177e4 | 2667 | /** |
2cf71d2e DJ |
2668 | * velocity_init_info - init private data |
2669 | * @pdev: PCI device | |
2670 | * @vptr: Velocity info | |
2671 | * @info: Board type | |
1da177e4 | 2672 | * |
2cf71d2e DJ |
2673 | * Set up the initial velocity_info struct for the device that has been |
2674 | * discovered. | |
1da177e4 | 2675 | */ |
6dffbe53 TP |
2676 | static void velocity_init_info(struct velocity_info *vptr, |
2677 | const struct velocity_info_tbl *info) | |
1da177e4 | 2678 | { |
2cf71d2e DJ |
2679 | vptr->chip_id = info->chip_id; |
2680 | vptr->tx.numq = info->txqueue; | |
2681 | vptr->multicast_limit = MCAM_SIZE; | |
2682 | spin_lock_init(&vptr->lock); | |
6aa20a22 | 2683 | } |
1da177e4 LT |
2684 | |
2685 | /** | |
2cf71d2e DJ |
2686 | * velocity_get_pci_info - retrieve PCI info for device |
2687 | * @vptr: velocity device | |
2688 | * @pdev: PCI device it matches | |
1da177e4 | 2689 | * |
2cf71d2e DJ |
2690 | * Retrieve the PCI configuration space data that interests us from |
2691 | * the kernel PCI layer | |
1da177e4 | 2692 | */ |
6dffbe53 | 2693 | static int velocity_get_pci_info(struct velocity_info *vptr) |
1da177e4 | 2694 | { |
6dffbe53 | 2695 | struct pci_dev *pdev = vptr->pdev; |
1da177e4 | 2696 | |
2cf71d2e | 2697 | pci_set_master(pdev); |
1da177e4 | 2698 | |
2cf71d2e DJ |
2699 | vptr->ioaddr = pci_resource_start(pdev, 0); |
2700 | vptr->memaddr = pci_resource_start(pdev, 1); | |
1da177e4 | 2701 | |
2cf71d2e DJ |
2702 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { |
2703 | dev_err(&pdev->dev, | |
2704 | "region #0 is not an I/O resource, aborting.\n"); | |
2705 | return -EINVAL; | |
2706 | } | |
2707 | ||
2708 | if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) { | |
2709 | dev_err(&pdev->dev, | |
2710 | "region #1 is an I/O resource, aborting.\n"); | |
2711 | return -EINVAL; | |
2712 | } | |
2713 | ||
2714 | if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) { | |
2715 | dev_err(&pdev->dev, "region #1 is too small.\n"); | |
2716 | return -EINVAL; | |
2717 | } | |
6dffbe53 TP |
2718 | |
2719 | return 0; | |
2720 | } | |
2721 | ||
2722 | /** | |
2723 | * velocity_get_platform_info - retrieve platform info for device | |
2724 | * @vptr: velocity device | |
2725 | * @pdev: platform device it matches | |
2726 | * | |
2727 | * Retrieve the Platform configuration data that interests us | |
2728 | */ | |
2729 | static int velocity_get_platform_info(struct velocity_info *vptr) | |
2730 | { | |
2731 | struct resource res; | |
2732 | int ret; | |
2733 | ||
2734 | if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) | |
2735 | vptr->no_eeprom = 1; | |
2736 | ||
2737 | ret = of_address_to_resource(vptr->dev->of_node, 0, &res); | |
2738 | if (ret) { | |
2739 | dev_err(vptr->dev, "unable to find memory address\n"); | |
2740 | return ret; | |
2741 | } | |
2742 | ||
2743 | vptr->memaddr = res.start; | |
2744 | ||
2745 | if (resource_size(&res) < VELOCITY_IO_SIZE) { | |
2746 | dev_err(vptr->dev, "memory region is too small.\n"); | |
2747 | return -EINVAL; | |
2748 | } | |
6aa20a22 | 2749 | |
1da177e4 LT |
2750 | return 0; |
2751 | } | |
2752 | ||
2cf71d2e DJ |
2753 | /** |
2754 | * velocity_print_info - per driver data | |
2755 | * @vptr: velocity | |
2756 | * | |
2757 | * Print per driver data as the kernel driver finds Velocity | |
2758 | * hardware | |
2759 | */ | |
27add006 | 2760 | static void velocity_print_info(struct velocity_info *vptr) |
1da177e4 | 2761 | { |
a9683c94 | 2762 | struct net_device *dev = vptr->netdev; |
1da177e4 | 2763 | |
2cf71d2e | 2764 | printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); |
aa7c68a5 HS |
2765 | printk(KERN_INFO "%s: Ethernet Address: %pM\n", |
2766 | dev->name, dev->dev_addr); | |
1da177e4 LT |
2767 | } |
2768 | ||
2769 | static u32 velocity_get_link(struct net_device *dev) | |
2770 | { | |
8ab6f3f7 | 2771 | struct velocity_info *vptr = netdev_priv(dev); |
c4067400 | 2772 | struct mac_regs __iomem *regs = vptr->mac_regs; |
59b693fb | 2773 | return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; |
1da177e4 LT |
2774 | } |
2775 | ||
2cf71d2e | 2776 | /** |
6dffbe53 | 2777 | * velocity_probe - set up discovered velocity device |
2cf71d2e DJ |
2778 | * @pdev: PCI device |
2779 | * @ent: PCI device table entry that matched | |
6dffbe53 | 2780 | * @bustype: bus that device is connected to |
2cf71d2e DJ |
2781 | * |
2782 | * Configure a discovered adapter from scratch. Return a negative | |
2783 | * errno error code on failure paths. | |
2784 | */ | |
6dffbe53 TP |
2785 | static int velocity_probe(struct device *dev, int irq, |
2786 | const struct velocity_info_tbl *info, | |
2787 | enum velocity_bus_type bustype) | |
1da177e4 | 2788 | { |
2cf71d2e | 2789 | static int first = 1; |
6dffbe53 | 2790 | struct net_device *netdev; |
2cf71d2e DJ |
2791 | int i; |
2792 | const char *drv_string; | |
2cf71d2e DJ |
2793 | struct velocity_info *vptr; |
2794 | struct mac_regs __iomem *regs; | |
2795 | int ret = -ENOMEM; | |
1da177e4 | 2796 | |
2cf71d2e DJ |
2797 | /* FIXME: this driver, like almost all other ethernet drivers, |
2798 | * can support more than MAX_UNITS. | |
2799 | */ | |
2800 | if (velocity_nics >= MAX_UNITS) { | |
6dffbe53 | 2801 | dev_notice(dev, "already found %d NICs.\n", velocity_nics); |
2cf71d2e DJ |
2802 | return -ENODEV; |
2803 | } | |
1da177e4 | 2804 | |
6dffbe53 TP |
2805 | netdev = alloc_etherdev(sizeof(struct velocity_info)); |
2806 | if (!netdev) | |
2cf71d2e | 2807 | goto out; |
1da177e4 | 2808 | |
2cf71d2e | 2809 | /* Chain it all together */ |
1da177e4 | 2810 | |
6dffbe53 TP |
2811 | SET_NETDEV_DEV(netdev, dev); |
2812 | vptr = netdev_priv(netdev); | |
2cf71d2e | 2813 | |
2cf71d2e DJ |
2814 | if (first) { |
2815 | printk(KERN_INFO "%s Ver. %s\n", | |
2816 | VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); | |
2817 | printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); | |
2818 | printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); | |
2819 | first = 0; | |
2820 | } | |
2821 | ||
6dffbe53 TP |
2822 | netdev->irq = irq; |
2823 | vptr->netdev = netdev; | |
2824 | vptr->dev = dev; | |
2cf71d2e | 2825 | |
6dffbe53 | 2826 | velocity_init_info(vptr, info); |
2cf71d2e | 2827 | |
6dffbe53 TP |
2828 | if (bustype == BUS_PCI) { |
2829 | vptr->pdev = to_pci_dev(dev); | |
2cf71d2e | 2830 | |
6dffbe53 TP |
2831 | ret = velocity_get_pci_info(vptr); |
2832 | if (ret < 0) | |
2833 | goto err_free_dev; | |
2834 | } else { | |
2835 | vptr->pdev = NULL; | |
2836 | ret = velocity_get_platform_info(vptr); | |
2837 | if (ret < 0) | |
2838 | goto err_free_dev; | |
1da177e4 | 2839 | } |
2cf71d2e DJ |
2840 | |
2841 | regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); | |
2842 | if (regs == NULL) { | |
2843 | ret = -EIO; | |
6dffbe53 | 2844 | goto err_free_dev; |
1da177e4 | 2845 | } |
1da177e4 | 2846 | |
2cf71d2e | 2847 | vptr->mac_regs = regs; |
6dffbe53 | 2848 | vptr->rev_id = readb(®s->rev_id); |
1da177e4 | 2849 | |
2cf71d2e | 2850 | mac_wol_reset(regs); |
1da177e4 | 2851 | |
2cf71d2e | 2852 | for (i = 0; i < 6; i++) |
6dffbe53 | 2853 | netdev->dev_addr[i] = readb(®s->PAR[i]); |
6aa20a22 | 2854 | |
6aa20a22 | 2855 | |
6dffbe53 | 2856 | drv_string = dev_driver_string(dev); |
1da177e4 | 2857 | |
2cf71d2e | 2858 | velocity_get_options(&vptr->options, velocity_nics, drv_string); |
1da177e4 | 2859 | |
2cf71d2e DJ |
2860 | /* |
2861 | * Mask out the options cannot be set to the chip | |
2862 | */ | |
6aa20a22 | 2863 | |
2cf71d2e | 2864 | vptr->options.flags &= info->flags; |
1da177e4 | 2865 | |
2cf71d2e DJ |
2866 | /* |
2867 | * Enable the chip specified capbilities | |
2868 | */ | |
1da177e4 | 2869 | |
2cf71d2e | 2870 | vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); |
1da177e4 | 2871 | |
2cf71d2e DJ |
2872 | vptr->wol_opts = vptr->options.wol_opts; |
2873 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
1da177e4 | 2874 | |
2cf71d2e | 2875 | vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); |
1da177e4 | 2876 | |
6dffbe53 TP |
2877 | netdev->netdev_ops = &velocity_netdev_ops; |
2878 | netdev->ethtool_ops = &velocity_ethtool_ops; | |
2879 | netif_napi_add(netdev, &vptr->napi, velocity_poll, | |
2880 | VELOCITY_NAPI_WEIGHT); | |
6aa20a22 | 2881 | |
6dffbe53 | 2882 | netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
f646968f | 2883 | NETIF_F_HW_VLAN_CTAG_TX; |
6dffbe53 TP |
2884 | netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | |
2885 | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | | |
2886 | NETIF_F_IP_CSUM; | |
1da177e4 | 2887 | |
6dffbe53 | 2888 | ret = register_netdev(netdev); |
2cf71d2e DJ |
2889 | if (ret < 0) |
2890 | goto err_iounmap; | |
2891 | ||
6dffbe53 TP |
2892 | if (!velocity_get_link(netdev)) { |
2893 | netif_carrier_off(netdev); | |
2cf71d2e | 2894 | vptr->mii_status |= VELOCITY_LINK_FAIL; |
1da177e4 LT |
2895 | } |
2896 | ||
2cf71d2e | 2897 | velocity_print_info(vptr); |
6dffbe53 | 2898 | dev_set_drvdata(vptr->dev, netdev); |
1da177e4 | 2899 | |
2cf71d2e | 2900 | /* and leave the chip powered down */ |
1da177e4 | 2901 | |
6dffbe53 | 2902 | velocity_set_power_state(vptr, PCI_D3hot); |
2cf71d2e DJ |
2903 | velocity_nics++; |
2904 | out: | |
2905 | return ret; | |
2906 | ||
2907 | err_iounmap: | |
2fdac010 | 2908 | netif_napi_del(&vptr->napi); |
2cf71d2e | 2909 | iounmap(regs); |
2cf71d2e | 2910 | err_free_dev: |
6dffbe53 | 2911 | free_netdev(netdev); |
2cf71d2e | 2912 | goto out; |
1da177e4 LT |
2913 | } |
2914 | ||
6dffbe53 TP |
2915 | /** |
2916 | * velocity_remove - device unplug | |
2917 | * @dev: device being removed | |
2918 | * | |
2919 | * Device unload callback. Called on an unplug or on module | |
2920 | * unload for each active device that is present. Disconnects | |
2921 | * the device from the network layer and frees all the resources | |
2922 | */ | |
2923 | static int velocity_remove(struct device *dev) | |
2924 | { | |
2925 | struct net_device *netdev = dev_get_drvdata(dev); | |
2926 | struct velocity_info *vptr = netdev_priv(netdev); | |
2927 | ||
2928 | unregister_netdev(netdev); | |
2fdac010 | 2929 | netif_napi_del(&vptr->napi); |
6dffbe53 TP |
2930 | iounmap(vptr->mac_regs); |
2931 | free_netdev(netdev); | |
2932 | velocity_nics--; | |
2933 | ||
2934 | return 0; | |
2935 | } | |
2936 | ||
2937 | static int velocity_pci_probe(struct pci_dev *pdev, | |
2938 | const struct pci_device_id *ent) | |
2939 | { | |
2940 | const struct velocity_info_tbl *info = | |
2941 | &chip_info_table[ent->driver_data]; | |
2942 | int ret; | |
2943 | ||
2944 | ret = pci_enable_device(pdev); | |
2945 | if (ret < 0) | |
2946 | return ret; | |
2947 | ||
2948 | ret = pci_request_regions(pdev, VELOCITY_NAME); | |
2949 | if (ret < 0) { | |
2950 | dev_err(&pdev->dev, "No PCI resources.\n"); | |
2951 | goto fail1; | |
2952 | } | |
2953 | ||
2954 | ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI); | |
2955 | if (ret == 0) | |
2956 | return 0; | |
2957 | ||
2958 | pci_release_regions(pdev); | |
2959 | fail1: | |
2960 | pci_disable_device(pdev); | |
2961 | return ret; | |
2962 | } | |
2963 | ||
2964 | static void velocity_pci_remove(struct pci_dev *pdev) | |
2965 | { | |
2966 | velocity_remove(&pdev->dev); | |
2967 | ||
2968 | pci_release_regions(pdev); | |
2969 | pci_disable_device(pdev); | |
2970 | } | |
2971 | ||
2972 | static int velocity_platform_probe(struct platform_device *pdev) | |
2973 | { | |
2974 | const struct of_device_id *of_id; | |
2975 | const struct velocity_info_tbl *info; | |
2976 | int irq; | |
2977 | ||
2978 | of_id = of_match_device(velocity_of_ids, &pdev->dev); | |
2979 | if (!of_id) | |
2980 | return -EINVAL; | |
2981 | info = of_id->data; | |
2982 | ||
2983 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | |
2984 | if (!irq) | |
2985 | return -EINVAL; | |
2986 | ||
2987 | return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM); | |
2988 | } | |
2989 | ||
2990 | static int velocity_platform_remove(struct platform_device *pdev) | |
2991 | { | |
2992 | velocity_remove(&pdev->dev); | |
2993 | ||
2994 | return 0; | |
2995 | } | |
2996 | ||
2997 | #ifdef CONFIG_PM_SLEEP | |
1da177e4 LT |
2998 | /** |
2999 | * wol_calc_crc - WOL CRC | |
3000 | * @pattern: data pattern | |
3001 | * @mask_pattern: mask | |
3002 | * | |
3003 | * Compute the wake on lan crc hashes for the packet header | |
3004 | * we are interested in. | |
3005 | */ | |
c4067400 | 3006 | static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern) |
1da177e4 LT |
3007 | { |
3008 | u16 crc = 0xFFFF; | |
3009 | u8 mask; | |
3010 | int i, j; | |
3011 | ||
3012 | for (i = 0; i < size; i++) { | |
3013 | mask = mask_pattern[i]; | |
3014 | ||
3015 | /* Skip this loop if the mask equals to zero */ | |
3016 | if (mask == 0x00) | |
3017 | continue; | |
3018 | ||
3019 | for (j = 0; j < 8; j++) { | |
3020 | if ((mask & 0x01) == 0) { | |
3021 | mask >>= 1; | |
3022 | continue; | |
3023 | } | |
3024 | mask >>= 1; | |
3025 | crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1); | |
3026 | } | |
3027 | } | |
3028 | /* Finally, invert the result once to get the correct data */ | |
3029 | crc = ~crc; | |
906d66df | 3030 | return bitrev32(crc) >> 16; |
1da177e4 LT |
3031 | } |
3032 | ||
3033 | /** | |
3034 | * velocity_set_wol - set up for wake on lan | |
3035 | * @vptr: velocity to set WOL status on | |
3036 | * | |
3037 | * Set a card up for wake on lan either by unicast or by | |
3038 | * ARP packet. | |
3039 | * | |
3040 | * FIXME: check static buffer is safe here | |
3041 | */ | |
1da177e4 LT |
3042 | static int velocity_set_wol(struct velocity_info *vptr) |
3043 | { | |
c4067400 | 3044 | struct mac_regs __iomem *regs = vptr->mac_regs; |
2ffa007e | 3045 | enum speed_opt spd_dpx = vptr->options.spd_dpx; |
1da177e4 LT |
3046 | static u8 buf[256]; |
3047 | int i; | |
3048 | ||
3049 | static u32 mask_pattern[2][4] = { | |
3050 | {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ | |
3051 | {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ | |
3052 | }; | |
3053 | ||
3054 | writew(0xFFFF, ®s->WOLCRClr); | |
3055 | writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet); | |
3056 | writew(WOLCR_MAGIC_EN, ®s->WOLCRSet); | |
3057 | ||
3058 | /* | |
3059 | if (vptr->wol_opts & VELOCITY_WOL_PHY) | |
3060 | writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet); | |
3061 | */ | |
3062 | ||
c4067400 | 3063 | if (vptr->wol_opts & VELOCITY_WOL_UCAST) |
1da177e4 | 3064 | writew(WOLCR_UNICAST_EN, ®s->WOLCRSet); |
1da177e4 LT |
3065 | |
3066 | if (vptr->wol_opts & VELOCITY_WOL_ARP) { | |
3067 | struct arp_packet *arp = (struct arp_packet *) buf; | |
3068 | u16 crc; | |
3069 | memset(buf, 0, sizeof(struct arp_packet) + 7); | |
3070 | ||
3071 | for (i = 0; i < 4; i++) | |
3072 | writel(mask_pattern[0][i], ®s->ByteMask[0][i]); | |
3073 | ||
3074 | arp->type = htons(ETH_P_ARP); | |
3075 | arp->ar_op = htons(1); | |
3076 | ||
3077 | memcpy(arp->ar_tip, vptr->ip_addr, 4); | |
3078 | ||
3079 | crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, | |
3080 | (u8 *) & mask_pattern[0][0]); | |
3081 | ||
3082 | writew(crc, ®s->PatternCRC[0]); | |
3083 | writew(WOLCR_ARP_EN, ®s->WOLCRSet); | |
3084 | } | |
3085 | ||
3086 | BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet); | |
3087 | BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet); | |
3088 | ||
3089 | writew(0x0FFF, ®s->WOLSRClr); | |
3090 | ||
2ffa007e | 3091 | if (spd_dpx == SPD_DPX_1000_FULL) |
3092 | goto mac_done; | |
3093 | ||
3094 | if (spd_dpx != SPD_DPX_AUTO) | |
3095 | goto advertise_done; | |
3096 | ||
1da177e4 LT |
3097 | if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { |
3098 | if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) | |
3a7f8681 | 3099 | MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); |
1da177e4 | 3100 | |
3a7f8681 | 3101 | MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); |
1da177e4 LT |
3102 | } |
3103 | ||
3104 | if (vptr->mii_status & VELOCITY_SPEED_1000) | |
3a7f8681 | 3105 | MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); |
1da177e4 | 3106 | |
2ffa007e | 3107 | advertise_done: |
1da177e4 LT |
3108 | BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); |
3109 | ||
3110 | { | |
3111 | u8 GCR; | |
3112 | GCR = readb(®s->CHIPGCR); | |
3113 | GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX; | |
3114 | writeb(GCR, ®s->CHIPGCR); | |
3115 | } | |
3116 | ||
2ffa007e | 3117 | mac_done: |
1da177e4 LT |
3118 | BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); |
3119 | /* Turn on SWPTAG just before entering power mode */ | |
3120 | BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); | |
3121 | /* Go to bed ..... */ | |
3122 | BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); | |
3123 | ||
3124 | return 0; | |
3125 | } | |
3126 | ||
2cf71d2e DJ |
3127 | /** |
3128 | * velocity_save_context - save registers | |
3129 | * @vptr: velocity | |
3130 | * @context: buffer for stored context | |
3131 | * | |
3132 | * Retrieve the current configuration from the velocity hardware | |
3133 | * and stash it in the context structure, for use by the context | |
3134 | * restore functions. This allows us to save things we need across | |
3135 | * power down states | |
3136 | */ | |
3137 | static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) | |
3138 | { | |
3139 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
3140 | u16 i; | |
3141 | u8 __iomem *ptr = (u8 __iomem *)regs; | |
3142 | ||
3143 | for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4) | |
3144 | *((u32 *) (context->mac_reg + i)) = readl(ptr + i); | |
3145 | ||
3146 | for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4) | |
3147 | *((u32 *) (context->mac_reg + i)) = readl(ptr + i); | |
3148 | ||
3149 | for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) | |
3150 | *((u32 *) (context->mac_reg + i)) = readl(ptr + i); | |
3151 | ||
3152 | } | |
3153 | ||
6dffbe53 | 3154 | static int velocity_suspend(struct device *dev) |
1da177e4 | 3155 | { |
6dffbe53 TP |
3156 | struct net_device *netdev = dev_get_drvdata(dev); |
3157 | struct velocity_info *vptr = netdev_priv(netdev); | |
1da177e4 LT |
3158 | unsigned long flags; |
3159 | ||
a9683c94 | 3160 | if (!netif_running(vptr->netdev)) |
1da177e4 LT |
3161 | return 0; |
3162 | ||
a9683c94 | 3163 | netif_device_detach(vptr->netdev); |
1da177e4 LT |
3164 | |
3165 | spin_lock_irqsave(&vptr->lock, flags); | |
6dffbe53 TP |
3166 | if (vptr->pdev) |
3167 | pci_save_state(vptr->pdev); | |
5ae297b0 | 3168 | |
1da177e4 LT |
3169 | if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { |
3170 | velocity_get_ip(vptr); | |
3171 | velocity_save_context(vptr, &vptr->context); | |
3172 | velocity_shutdown(vptr); | |
3173 | velocity_set_wol(vptr); | |
6dffbe53 TP |
3174 | if (vptr->pdev) |
3175 | pci_enable_wake(vptr->pdev, PCI_D3hot, 1); | |
3176 | velocity_set_power_state(vptr, PCI_D3hot); | |
1da177e4 LT |
3177 | } else { |
3178 | velocity_save_context(vptr, &vptr->context); | |
3179 | velocity_shutdown(vptr); | |
6dffbe53 TP |
3180 | if (vptr->pdev) |
3181 | pci_disable_device(vptr->pdev); | |
3182 | velocity_set_power_state(vptr, PCI_D3hot); | |
1da177e4 | 3183 | } |
5ae297b0 | 3184 | |
2cf71d2e DJ |
3185 | spin_unlock_irqrestore(&vptr->lock, flags); |
3186 | return 0; | |
3187 | } | |
3188 | ||
3189 | /** | |
3190 | * velocity_restore_context - restore registers | |
3191 | * @vptr: velocity | |
3192 | * @context: buffer for stored context | |
3193 | * | |
3194 | * Reload the register configuration from the velocity context | |
3195 | * created by velocity_save_context. | |
3196 | */ | |
3197 | static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) | |
3198 | { | |
3199 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
3200 | int i; | |
3201 | u8 __iomem *ptr = (u8 __iomem *)regs; | |
3202 | ||
3203 | for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) | |
3204 | writel(*((u32 *) (context->mac_reg + i)), ptr + i); | |
3205 | ||
3206 | /* Just skip cr0 */ | |
3207 | for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) { | |
3208 | /* Clear */ | |
3209 | writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4); | |
3210 | /* Set */ | |
3211 | writeb(*((u8 *) (context->mac_reg + i)), ptr + i); | |
3212 | } | |
3213 | ||
3214 | for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) | |
3215 | writel(*((u32 *) (context->mac_reg + i)), ptr + i); | |
3216 | ||
3217 | for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) | |
3218 | writel(*((u32 *) (context->mac_reg + i)), ptr + i); | |
3219 | ||
3220 | for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) | |
3221 | writeb(*((u8 *) (context->mac_reg + i)), ptr + i); | |
1da177e4 LT |
3222 | } |
3223 | ||
6dffbe53 | 3224 | static int velocity_resume(struct device *dev) |
1da177e4 | 3225 | { |
6dffbe53 TP |
3226 | struct net_device *netdev = dev_get_drvdata(dev); |
3227 | struct velocity_info *vptr = netdev_priv(netdev); | |
1da177e4 LT |
3228 | unsigned long flags; |
3229 | int i; | |
3230 | ||
a9683c94 | 3231 | if (!netif_running(vptr->netdev)) |
1da177e4 LT |
3232 | return 0; |
3233 | ||
6dffbe53 TP |
3234 | velocity_set_power_state(vptr, PCI_D0); |
3235 | ||
3236 | if (vptr->pdev) { | |
1ca01512 | 3237 | pci_enable_wake(vptr->pdev, PCI_D0, 0); |
6dffbe53 TP |
3238 | pci_restore_state(vptr->pdev); |
3239 | } | |
1da177e4 LT |
3240 | |
3241 | mac_wol_reset(vptr->mac_regs); | |
3242 | ||
3243 | spin_lock_irqsave(&vptr->lock, flags); | |
3244 | velocity_restore_context(vptr, &vptr->context); | |
3245 | velocity_init_registers(vptr, VELOCITY_INIT_WOL); | |
3246 | mac_disable_int(vptr->mac_regs); | |
3247 | ||
d6cade0f | 3248 | velocity_tx_srv(vptr); |
1da177e4 | 3249 | |
0fe9f15e | 3250 | for (i = 0; i < vptr->tx.numq; i++) { |
c4067400 | 3251 | if (vptr->tx.used[i]) |
1da177e4 | 3252 | mac_tx_queue_wake(vptr->mac_regs, i); |
1da177e4 LT |
3253 | } |
3254 | ||
3255 | mac_enable_int(vptr->mac_regs); | |
3256 | spin_unlock_irqrestore(&vptr->lock, flags); | |
a9683c94 | 3257 | netif_device_attach(vptr->netdev); |
1da177e4 LT |
3258 | |
3259 | return 0; | |
3260 | } | |
6dffbe53 TP |
3261 | #endif /* CONFIG_PM_SLEEP */ |
3262 | ||
3263 | static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume); | |
1da177e4 | 3264 | |
2cf71d2e DJ |
3265 | /* |
3266 | * Definition for our device driver. The PCI layer interface | |
3267 | * uses this to handle all our card discover and plugging | |
3268 | */ | |
6dffbe53 | 3269 | static struct pci_driver velocity_pci_driver = { |
5ae297b0 | 3270 | .name = VELOCITY_NAME, |
6dffbe53 TP |
3271 | .id_table = velocity_pci_id_table, |
3272 | .probe = velocity_pci_probe, | |
3273 | .remove = velocity_pci_remove, | |
3274 | .driver = { | |
3275 | .pm = &velocity_pm_ops, | |
3276 | }, | |
2cf71d2e DJ |
3277 | }; |
3278 | ||
6dffbe53 TP |
3279 | static struct platform_driver velocity_platform_driver = { |
3280 | .probe = velocity_platform_probe, | |
3281 | .remove = velocity_platform_remove, | |
3282 | .driver = { | |
3283 | .name = "via-velocity", | |
3284 | .owner = THIS_MODULE, | |
3285 | .of_match_table = velocity_of_ids, | |
3286 | .pm = &velocity_pm_ops, | |
3287 | }, | |
3288 | }; | |
2cf71d2e DJ |
3289 | |
3290 | /** | |
3291 | * velocity_ethtool_up - pre hook for ethtool | |
3292 | * @dev: network device | |
3293 | * | |
3294 | * Called before an ethtool operation. We need to make sure the | |
3295 | * chip is out of D3 state before we poke at it. | |
3296 | */ | |
3297 | static int velocity_ethtool_up(struct net_device *dev) | |
3298 | { | |
3299 | struct velocity_info *vptr = netdev_priv(dev); | |
3300 | if (!netif_running(dev)) | |
6dffbe53 | 3301 | velocity_set_power_state(vptr, PCI_D0); |
2cf71d2e DJ |
3302 | return 0; |
3303 | } | |
3304 | ||
3305 | /** | |
3306 | * velocity_ethtool_down - post hook for ethtool | |
3307 | * @dev: network device | |
3308 | * | |
3309 | * Called after an ethtool operation. Restore the chip back to D3 | |
3310 | * state if it isn't running. | |
3311 | */ | |
3312 | static void velocity_ethtool_down(struct net_device *dev) | |
3313 | { | |
3314 | struct velocity_info *vptr = netdev_priv(dev); | |
3315 | if (!netif_running(dev)) | |
6dffbe53 | 3316 | velocity_set_power_state(vptr, PCI_D3hot); |
2cf71d2e DJ |
3317 | } |
3318 | ||
70739497 DD |
3319 | static int velocity_get_settings(struct net_device *dev, |
3320 | struct ethtool_cmd *cmd) | |
2cf71d2e DJ |
3321 | { |
3322 | struct velocity_info *vptr = netdev_priv(dev); | |
3323 | struct mac_regs __iomem *regs = vptr->mac_regs; | |
3324 | u32 status; | |
3325 | status = check_connection_type(vptr->mac_regs); | |
3326 | ||
3327 | cmd->supported = SUPPORTED_TP | | |
3328 | SUPPORTED_Autoneg | | |
3329 | SUPPORTED_10baseT_Half | | |
3330 | SUPPORTED_10baseT_Full | | |
3331 | SUPPORTED_100baseT_Half | | |
3332 | SUPPORTED_100baseT_Full | | |
3333 | SUPPORTED_1000baseT_Half | | |
3334 | SUPPORTED_1000baseT_Full; | |
15419227 | 3335 | |
3336 | cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; | |
3337 | if (vptr->options.spd_dpx == SPD_DPX_AUTO) { | |
3338 | cmd->advertising |= | |
3339 | ADVERTISED_10baseT_Half | | |
3340 | ADVERTISED_10baseT_Full | | |
3341 | ADVERTISED_100baseT_Half | | |
3342 | ADVERTISED_100baseT_Full | | |
3343 | ADVERTISED_1000baseT_Half | | |
3344 | ADVERTISED_1000baseT_Full; | |
3345 | } else { | |
3346 | switch (vptr->options.spd_dpx) { | |
3347 | case SPD_DPX_1000_FULL: | |
3348 | cmd->advertising |= ADVERTISED_1000baseT_Full; | |
3349 | break; | |
3350 | case SPD_DPX_100_HALF: | |
3351 | cmd->advertising |= ADVERTISED_100baseT_Half; | |
3352 | break; | |
3353 | case SPD_DPX_100_FULL: | |
3354 | cmd->advertising |= ADVERTISED_100baseT_Full; | |
3355 | break; | |
3356 | case SPD_DPX_10_HALF: | |
3357 | cmd->advertising |= ADVERTISED_10baseT_Half; | |
3358 | break; | |
3359 | case SPD_DPX_10_FULL: | |
3360 | cmd->advertising |= ADVERTISED_10baseT_Full; | |
3361 | break; | |
3362 | default: | |
3363 | break; | |
3364 | } | |
3365 | } | |
70739497 | 3366 | |
2cf71d2e | 3367 | if (status & VELOCITY_SPEED_1000) |
70739497 | 3368 | ethtool_cmd_speed_set(cmd, SPEED_1000); |
2cf71d2e | 3369 | else if (status & VELOCITY_SPEED_100) |
70739497 | 3370 | ethtool_cmd_speed_set(cmd, SPEED_100); |
2cf71d2e | 3371 | else |
70739497 DD |
3372 | ethtool_cmd_speed_set(cmd, SPEED_10); |
3373 | ||
2cf71d2e DJ |
3374 | cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; |
3375 | cmd->port = PORT_TP; | |
3376 | cmd->transceiver = XCVR_INTERNAL; | |
3377 | cmd->phy_address = readb(®s->MIIADR) & 0x1F; | |
3378 | ||
3379 | if (status & VELOCITY_DUPLEX_FULL) | |
3380 | cmd->duplex = DUPLEX_FULL; | |
3381 | else | |
3382 | cmd->duplex = DUPLEX_HALF; | |
3383 | ||
3384 | return 0; | |
3385 | } | |
3386 | ||
25db0338 DD |
3387 | static int velocity_set_settings(struct net_device *dev, |
3388 | struct ethtool_cmd *cmd) | |
2cf71d2e DJ |
3389 | { |
3390 | struct velocity_info *vptr = netdev_priv(dev); | |
25db0338 | 3391 | u32 speed = ethtool_cmd_speed(cmd); |
2cf71d2e DJ |
3392 | u32 curr_status; |
3393 | u32 new_status = 0; | |
3394 | int ret = 0; | |
3395 | ||
3396 | curr_status = check_connection_type(vptr->mac_regs); | |
3397 | curr_status &= (~VELOCITY_LINK_FAIL); | |
3398 | ||
3399 | new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); | |
25db0338 DD |
3400 | new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0); |
3401 | new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); | |
3402 | new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); | |
2cf71d2e DJ |
3403 | new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); |
3404 | ||
15419227 | 3405 | if ((new_status & VELOCITY_AUTONEG_ENABLE) && |
3406 | (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) { | |
2cf71d2e | 3407 | ret = -EINVAL; |
15419227 | 3408 | } else { |
3409 | enum speed_opt spd_dpx; | |
3410 | ||
3411 | if (new_status & VELOCITY_AUTONEG_ENABLE) | |
3412 | spd_dpx = SPD_DPX_AUTO; | |
3413 | else if ((new_status & VELOCITY_SPEED_1000) && | |
3414 | (new_status & VELOCITY_DUPLEX_FULL)) { | |
3415 | spd_dpx = SPD_DPX_1000_FULL; | |
3416 | } else if (new_status & VELOCITY_SPEED_100) | |
3417 | spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? | |
3418 | SPD_DPX_100_FULL : SPD_DPX_100_HALF; | |
3419 | else if (new_status & VELOCITY_SPEED_10) | |
3420 | spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? | |
3421 | SPD_DPX_10_FULL : SPD_DPX_10_HALF; | |
3422 | else | |
3423 | return -EOPNOTSUPP; | |
3424 | ||
3425 | vptr->options.spd_dpx = spd_dpx; | |
3426 | ||
2cf71d2e | 3427 | velocity_set_media_mode(vptr, new_status); |
15419227 | 3428 | } |
2cf71d2e DJ |
3429 | |
3430 | return ret; | |
3431 | } | |
3432 | ||
3433 | static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
3434 | { | |
3435 | struct velocity_info *vptr = netdev_priv(dev); | |
6dffbe53 | 3436 | |
23020ab3 RJ |
3437 | strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); |
3438 | strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); | |
6dffbe53 TP |
3439 | if (vptr->pdev) |
3440 | strlcpy(info->bus_info, pci_name(vptr->pdev), | |
3441 | sizeof(info->bus_info)); | |
3442 | else | |
3443 | strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); | |
2cf71d2e DJ |
3444 | } |
3445 | ||
3446 | static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
3447 | { | |
3448 | struct velocity_info *vptr = netdev_priv(dev); | |
3449 | wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; | |
3450 | wol->wolopts |= WAKE_MAGIC; | |
3451 | /* | |
3452 | if (vptr->wol_opts & VELOCITY_WOL_PHY) | |
3453 | wol.wolopts|=WAKE_PHY; | |
3454 | */ | |
3455 | if (vptr->wol_opts & VELOCITY_WOL_UCAST) | |
3456 | wol->wolopts |= WAKE_UCAST; | |
3457 | if (vptr->wol_opts & VELOCITY_WOL_ARP) | |
3458 | wol->wolopts |= WAKE_ARP; | |
3459 | memcpy(&wol->sopass, vptr->wol_passwd, 6); | |
3460 | } | |
3461 | ||
3462 | static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
3463 | { | |
3464 | struct velocity_info *vptr = netdev_priv(dev); | |
3465 | ||
3466 | if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) | |
3467 | return -EFAULT; | |
3468 | vptr->wol_opts = VELOCITY_WOL_MAGIC; | |
3469 | ||
3470 | /* | |
3471 | if (wol.wolopts & WAKE_PHY) { | |
3472 | vptr->wol_opts|=VELOCITY_WOL_PHY; | |
3473 | vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED; | |
3474 | } | |
3475 | */ | |
3476 | ||
3477 | if (wol->wolopts & WAKE_MAGIC) { | |
3478 | vptr->wol_opts |= VELOCITY_WOL_MAGIC; | |
3479 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
3480 | } | |
3481 | if (wol->wolopts & WAKE_UCAST) { | |
3482 | vptr->wol_opts |= VELOCITY_WOL_UCAST; | |
3483 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
3484 | } | |
3485 | if (wol->wolopts & WAKE_ARP) { | |
3486 | vptr->wol_opts |= VELOCITY_WOL_ARP; | |
3487 | vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; | |
3488 | } | |
3489 | memcpy(vptr->wol_passwd, wol->sopass, 6); | |
3490 | return 0; | |
3491 | } | |
3492 | ||
3493 | static u32 velocity_get_msglevel(struct net_device *dev) | |
3494 | { | |
3495 | return msglevel; | |
3496 | } | |
3497 | ||
3498 | static void velocity_set_msglevel(struct net_device *dev, u32 value) | |
3499 | { | |
3500 | msglevel = value; | |
3501 | } | |
3502 | ||
6dfc4b95 SK |
3503 | static int get_pending_timer_val(int val) |
3504 | { | |
3505 | int mult_bits = val >> 6; | |
3506 | int mult = 1; | |
3507 | ||
3508 | switch (mult_bits) | |
3509 | { | |
3510 | case 1: | |
3511 | mult = 4; break; | |
3512 | case 2: | |
3513 | mult = 16; break; | |
3514 | case 3: | |
3515 | mult = 64; break; | |
3516 | case 0: | |
3517 | default: | |
3518 | break; | |
3519 | } | |
3520 | ||
3521 | return (val & 0x3f) * mult; | |
3522 | } | |
3523 | ||
3524 | static void set_pending_timer_val(int *val, u32 us) | |
3525 | { | |
3526 | u8 mult = 0; | |
3527 | u8 shift = 0; | |
3528 | ||
3529 | if (us >= 0x3f) { | |
3530 | mult = 1; /* mult with 4 */ | |
3531 | shift = 2; | |
3532 | } | |
3533 | if (us >= 0x3f * 4) { | |
3534 | mult = 2; /* mult with 16 */ | |
3535 | shift = 4; | |
3536 | } | |
3537 | if (us >= 0x3f * 16) { | |
3538 | mult = 3; /* mult with 64 */ | |
3539 | shift = 6; | |
3540 | } | |
3541 | ||
3542 | *val = (mult << 6) | ((us >> shift) & 0x3f); | |
3543 | } | |
3544 | ||
3545 | ||
3546 | static int velocity_get_coalesce(struct net_device *dev, | |
3547 | struct ethtool_coalesce *ecmd) | |
3548 | { | |
3549 | struct velocity_info *vptr = netdev_priv(dev); | |
3550 | ||
3551 | ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; | |
3552 | ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; | |
3553 | ||
3554 | ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); | |
3555 | ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); | |
3556 | ||
3557 | return 0; | |
3558 | } | |
3559 | ||
3560 | static int velocity_set_coalesce(struct net_device *dev, | |
3561 | struct ethtool_coalesce *ecmd) | |
3562 | { | |
3563 | struct velocity_info *vptr = netdev_priv(dev); | |
3564 | int max_us = 0x3f * 64; | |
39c2ff43 | 3565 | unsigned long flags; |
6dfc4b95 SK |
3566 | |
3567 | /* 6 bits of */ | |
3568 | if (ecmd->tx_coalesce_usecs > max_us) | |
3569 | return -EINVAL; | |
3570 | if (ecmd->rx_coalesce_usecs > max_us) | |
3571 | return -EINVAL; | |
3572 | ||
3573 | if (ecmd->tx_max_coalesced_frames > 0xff) | |
3574 | return -EINVAL; | |
3575 | if (ecmd->rx_max_coalesced_frames > 0xff) | |
3576 | return -EINVAL; | |
3577 | ||
3578 | vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; | |
3579 | vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; | |
3580 | ||
3581 | set_pending_timer_val(&vptr->options.rxqueue_timer, | |
3582 | ecmd->rx_coalesce_usecs); | |
3583 | set_pending_timer_val(&vptr->options.txqueue_timer, | |
3584 | ecmd->tx_coalesce_usecs); | |
3585 | ||
3586 | /* Setup the interrupt suppression and queue timers */ | |
39c2ff43 | 3587 | spin_lock_irqsave(&vptr->lock, flags); |
6dfc4b95 SK |
3588 | mac_disable_int(vptr->mac_regs); |
3589 | setup_adaptive_interrupts(vptr); | |
3590 | setup_queue_timers(vptr); | |
3591 | ||
3592 | mac_write_int_mask(vptr->int_mask, vptr->mac_regs); | |
3593 | mac_clear_isr(vptr->mac_regs); | |
3594 | mac_enable_int(vptr->mac_regs); | |
39c2ff43 | 3595 | spin_unlock_irqrestore(&vptr->lock, flags); |
6dfc4b95 SK |
3596 | |
3597 | return 0; | |
3598 | } | |
3599 | ||
ad66fa7a | 3600 | static const char velocity_gstrings[][ETH_GSTRING_LEN] = { |
3601 | "rx_all", | |
3602 | "rx_ok", | |
3603 | "tx_ok", | |
3604 | "rx_error", | |
3605 | "rx_runt_ok", | |
3606 | "rx_runt_err", | |
3607 | "rx_64", | |
3608 | "tx_64", | |
3609 | "rx_65_to_127", | |
3610 | "tx_65_to_127", | |
3611 | "rx_128_to_255", | |
3612 | "tx_128_to_255", | |
3613 | "rx_256_to_511", | |
3614 | "tx_256_to_511", | |
3615 | "rx_512_to_1023", | |
3616 | "tx_512_to_1023", | |
3617 | "rx_1024_to_1518", | |
3618 | "tx_1024_to_1518", | |
3619 | "tx_ether_collisions", | |
3620 | "rx_crc_errors", | |
3621 | "rx_jumbo", | |
3622 | "tx_jumbo", | |
3623 | "rx_mac_control_frames", | |
3624 | "tx_mac_control_frames", | |
3625 | "rx_frame_alignement_errors", | |
3626 | "rx_long_ok", | |
3627 | "rx_long_err", | |
3628 | "tx_sqe_errors", | |
3629 | "rx_no_buf", | |
3630 | "rx_symbol_errors", | |
3631 | "in_range_length_errors", | |
3632 | "late_collisions" | |
3633 | }; | |
3634 | ||
3635 | static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data) | |
3636 | { | |
3637 | switch (sset) { | |
3638 | case ETH_SS_STATS: | |
3639 | memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings)); | |
3640 | break; | |
3641 | } | |
3642 | } | |
3643 | ||
3644 | static int velocity_get_sset_count(struct net_device *dev, int sset) | |
3645 | { | |
3646 | switch (sset) { | |
3647 | case ETH_SS_STATS: | |
3648 | return ARRAY_SIZE(velocity_gstrings); | |
3649 | default: | |
3650 | return -EOPNOTSUPP; | |
3651 | } | |
3652 | } | |
3653 | ||
3654 | static void velocity_get_ethtool_stats(struct net_device *dev, | |
3655 | struct ethtool_stats *stats, u64 *data) | |
3656 | { | |
3657 | if (netif_running(dev)) { | |
3658 | struct velocity_info *vptr = netdev_priv(dev); | |
3659 | u32 *p = vptr->mib_counter; | |
3660 | int i; | |
3661 | ||
3662 | spin_lock_irq(&vptr->lock); | |
3663 | velocity_update_hw_mibs(vptr); | |
3664 | spin_unlock_irq(&vptr->lock); | |
3665 | ||
3666 | for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++) | |
3667 | *data++ = *p++; | |
3668 | } | |
3669 | } | |
3670 | ||
2cf71d2e | 3671 | static const struct ethtool_ops velocity_ethtool_ops = { |
5ae297b0 | 3672 | .get_settings = velocity_get_settings, |
3673 | .set_settings = velocity_set_settings, | |
3674 | .get_drvinfo = velocity_get_drvinfo, | |
3675 | .get_wol = velocity_ethtool_get_wol, | |
3676 | .set_wol = velocity_ethtool_set_wol, | |
3677 | .get_msglevel = velocity_get_msglevel, | |
3678 | .set_msglevel = velocity_set_msglevel, | |
3679 | .get_link = velocity_get_link, | |
ad66fa7a | 3680 | .get_strings = velocity_get_strings, |
3681 | .get_sset_count = velocity_get_sset_count, | |
3682 | .get_ethtool_stats = velocity_get_ethtool_stats, | |
5ae297b0 | 3683 | .get_coalesce = velocity_get_coalesce, |
3684 | .set_coalesce = velocity_set_coalesce, | |
3685 | .begin = velocity_ethtool_up, | |
3686 | .complete = velocity_ethtool_down | |
2cf71d2e | 3687 | }; |
ce9f7fe3 | 3688 | |
5ae297b0 | 3689 | #if defined(CONFIG_PM) && defined(CONFIG_INET) |
1da177e4 LT |
3690 | static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) |
3691 | { | |
5ae297b0 | 3692 | struct in_ifaddr *ifa = ptr; |
a337499f | 3693 | struct net_device *dev = ifa->ifa_dev->dev; |
1da177e4 | 3694 | |
516b4df1 BH |
3695 | if (dev_net(dev) == &init_net && |
3696 | dev->netdev_ops == &velocity_netdev_ops) | |
3697 | velocity_get_ip(netdev_priv(dev)); | |
a337499f | 3698 | |
1da177e4 LT |
3699 | return NOTIFY_DONE; |
3700 | } | |
ce9f7fe3 | 3701 | |
2cf71d2e | 3702 | static struct notifier_block velocity_inetaddr_notifier = { |
5ae297b0 | 3703 | .notifier_call = velocity_netdev_event, |
2cf71d2e DJ |
3704 | }; |
3705 | ||
3706 | static void velocity_register_notifier(void) | |
3707 | { | |
3708 | register_inetaddr_notifier(&velocity_inetaddr_notifier); | |
3709 | } | |
3710 | ||
3711 | static void velocity_unregister_notifier(void) | |
3712 | { | |
3713 | unregister_inetaddr_notifier(&velocity_inetaddr_notifier); | |
3714 | } | |
3715 | ||
3716 | #else | |
3717 | ||
3718 | #define velocity_register_notifier() do {} while (0) | |
3719 | #define velocity_unregister_notifier() do {} while (0) | |
3720 | ||
3721 | #endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */ | |
3722 | ||
3723 | /** | |
3724 | * velocity_init_module - load time function | |
3725 | * | |
3726 | * Called when the velocity module is loaded. The PCI driver | |
3727 | * is registered with the PCI layer, and in turn will call | |
3728 | * the probe functions for each velocity adapter installed | |
3729 | * in the system. | |
3730 | */ | |
3731 | static int __init velocity_init_module(void) | |
3732 | { | |
6dffbe53 | 3733 | int ret_pci, ret_platform; |
2cf71d2e DJ |
3734 | |
3735 | velocity_register_notifier(); | |
6dffbe53 TP |
3736 | |
3737 | ret_pci = pci_register_driver(&velocity_pci_driver); | |
3738 | ret_platform = platform_driver_register(&velocity_platform_driver); | |
3739 | ||
3740 | /* if both_registers failed, remove the notifier */ | |
3741 | if ((ret_pci < 0) && (ret_platform < 0)) { | |
2cf71d2e | 3742 | velocity_unregister_notifier(); |
6dffbe53 TP |
3743 | return ret_pci; |
3744 | } | |
3745 | ||
3746 | return 0; | |
2cf71d2e DJ |
3747 | } |
3748 | ||
3749 | /** | |
3750 | * velocity_cleanup - module unload | |
3751 | * | |
3752 | * When the velocity hardware is unloaded this function is called. | |
3753 | * It will clean up the notifiers and the unregister the PCI | |
3754 | * driver interface for this hardware. This in turn cleans up | |
3755 | * all discovered interfaces before returning from the function | |
3756 | */ | |
3757 | static void __exit velocity_cleanup_module(void) | |
3758 | { | |
3759 | velocity_unregister_notifier(); | |
6dffbe53 TP |
3760 | |
3761 | pci_unregister_driver(&velocity_pci_driver); | |
3762 | platform_driver_unregister(&velocity_platform_driver); | |
2cf71d2e DJ |
3763 | } |
3764 | ||
3765 | module_init(velocity_init_module); | |
3766 | module_exit(velocity_cleanup_module); |