]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/usb/lan78xx.c
lan78xx: remove unnecessary code
[mirror_ubuntu-artful-kernel.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
bdfba55e 33#include <linux/microchipphy.h>
55d7de9d
WH
34#include "lan78xx.h"
35
36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38#define DRIVER_NAME "lan78xx"
e4953910 39#define DRIVER_VERSION "1.0.2"
55d7de9d
WH
40
41#define TX_TIMEOUT_JIFFIES (5 * HZ)
42#define THROTTLE_JIFFIES (HZ / 8)
43#define UNLINK_TIMEOUT_MS 3
44
45#define RX_MAX_QUEUE_MEMORY (60 * 1518)
46
47#define SS_USB_PKT_SIZE (1024)
48#define HS_USB_PKT_SIZE (512)
49#define FS_USB_PKT_SIZE (64)
50
51#define MAX_RX_FIFO_SIZE (12 * 1024)
52#define MAX_TX_FIFO_SIZE (12 * 1024)
53#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54#define DEFAULT_BULK_IN_DELAY (0x0800)
55#define MAX_SINGLE_PACKET_SIZE (9000)
56#define DEFAULT_TX_CSUM_ENABLE (true)
57#define DEFAULT_RX_CSUM_ENABLE (true)
58#define DEFAULT_TSO_CSUM_ENABLE (true)
59#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
60#define TX_OVERHEAD (8)
61#define RXW_PADDING 2
62
63#define LAN78XX_USB_VENDOR_ID (0x0424)
64#define LAN7800_USB_PRODUCT_ID (0x7800)
65#define LAN7850_USB_PRODUCT_ID (0x7850)
66#define LAN78XX_EEPROM_MAGIC (0x78A5)
67#define LAN78XX_OTP_MAGIC (0x78F3)
68
69#define MII_READ 1
70#define MII_WRITE 0
71
72#define EEPROM_INDICATOR (0xA5)
73#define EEPROM_MAC_OFFSET (0x01)
74#define MAX_EEPROM_SIZE 512
75#define OTP_INDICATOR_1 (0xF3)
76#define OTP_INDICATOR_2 (0xF7)
77
78#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
81
82/* USB related defines */
83#define BULK_IN_PIPE 1
84#define BULK_OUT_PIPE 2
85
86/* default autosuspend delay (mSec)*/
87#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
88
89static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90 "RX FCS Errors",
91 "RX Alignment Errors",
92 "Rx Fragment Errors",
93 "RX Jabber Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
96 "RX Dropped Frames",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
100 "RX Unicast Frames",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
103 "RX Pause Frames",
104 "RX 64 Byte Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
112 "EEE RX LPI Time",
113 "TX FCS Errors",
114 "TX Excess Deferral Errors",
115 "TX Carrier Errors",
116 "TX Bad Byte Count",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
124 "TX Unicast Frames",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
127 "TX Pause Frames",
128 "TX 64 Byte Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
136 "EEE TX LPI Time",
137};
138
139struct lan78xx_statstage {
140 u32 rx_fcs_errors;
141 u32 rx_alignment_errors;
142 u32 rx_fragment_errors;
143 u32 rx_jabber_errors;
144 u32 rx_undersize_frame_errors;
145 u32 rx_oversize_frame_errors;
146 u32 rx_dropped_frames;
147 u32 rx_unicast_byte_count;
148 u32 rx_broadcast_byte_count;
149 u32 rx_multicast_byte_count;
150 u32 rx_unicast_frames;
151 u32 rx_broadcast_frames;
152 u32 rx_multicast_frames;
153 u32 rx_pause_frames;
154 u32 rx_64_byte_frames;
155 u32 rx_65_127_byte_frames;
156 u32 rx_128_255_byte_frames;
157 u32 rx_256_511_bytes_frames;
158 u32 rx_512_1023_byte_frames;
159 u32 rx_1024_1518_byte_frames;
160 u32 rx_greater_1518_byte_frames;
161 u32 eee_rx_lpi_transitions;
162 u32 eee_rx_lpi_time;
163 u32 tx_fcs_errors;
164 u32 tx_excess_deferral_errors;
165 u32 tx_carrier_errors;
166 u32 tx_bad_byte_count;
167 u32 tx_single_collisions;
168 u32 tx_multiple_collisions;
169 u32 tx_excessive_collision;
170 u32 tx_late_collisions;
171 u32 tx_unicast_byte_count;
172 u32 tx_broadcast_byte_count;
173 u32 tx_multicast_byte_count;
174 u32 tx_unicast_frames;
175 u32 tx_broadcast_frames;
176 u32 tx_multicast_frames;
177 u32 tx_pause_frames;
178 u32 tx_64_byte_frames;
179 u32 tx_65_127_byte_frames;
180 u32 tx_128_255_byte_frames;
181 u32 tx_256_511_bytes_frames;
182 u32 tx_512_1023_byte_frames;
183 u32 tx_1024_1518_byte_frames;
184 u32 tx_greater_1518_byte_frames;
185 u32 eee_tx_lpi_transitions;
186 u32 eee_tx_lpi_time;
187};
188
189struct lan78xx_net;
190
191struct lan78xx_priv {
192 struct lan78xx_net *dev;
193 u32 rfe_ctl;
194 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197 struct mutex dataport_mutex; /* for dataport access */
198 spinlock_t rfe_ctl_lock; /* for rfe register access */
199 struct work_struct set_multicast;
200 struct work_struct set_vlan;
201 u32 wol;
202};
203
204enum skb_state {
205 illegal = 0,
206 tx_start,
207 tx_done,
208 rx_start,
209 rx_done,
210 rx_cleanup,
211 unlink_start
212};
213
214struct skb_data { /* skb->cb is one of these */
215 struct urb *urb;
216 struct lan78xx_net *dev;
217 enum skb_state state;
218 size_t length;
219};
220
221struct usb_context {
222 struct usb_ctrlrequest req;
223 struct lan78xx_net *dev;
224};
225
226#define EVENT_TX_HALT 0
227#define EVENT_RX_HALT 1
228#define EVENT_RX_MEMORY 2
229#define EVENT_STS_SPLIT 3
230#define EVENT_LINK_RESET 4
231#define EVENT_RX_PAUSED 5
232#define EVENT_DEV_WAKING 6
233#define EVENT_DEV_ASLEEP 7
234#define EVENT_DEV_OPEN 8
235
236struct lan78xx_net {
237 struct net_device *net;
238 struct usb_device *udev;
239 struct usb_interface *intf;
240 void *driver_priv;
241
242 int rx_qlen;
243 int tx_qlen;
244 struct sk_buff_head rxq;
245 struct sk_buff_head txq;
246 struct sk_buff_head done;
247 struct sk_buff_head rxq_pause;
248 struct sk_buff_head txq_pend;
249
250 struct tasklet_struct bh;
251 struct delayed_work wq;
252
253 struct usb_host_endpoint *ep_blkin;
254 struct usb_host_endpoint *ep_blkout;
255 struct usb_host_endpoint *ep_intr;
256
257 int msg_enable;
258
259 struct urb *urb_intr;
260 struct usb_anchor deferred;
261
262 struct mutex phy_mutex; /* for phy access */
263 unsigned pipe_in, pipe_out, pipe_intr;
264
265 u32 hard_mtu; /* count any extra framing */
266 size_t rx_urb_size; /* size for rx urbs */
267
268 unsigned long flags;
269
270 wait_queue_head_t *wait;
271 unsigned char suspend_count;
272
273 unsigned maxpacket;
274 struct timer_list delay;
275
276 unsigned long data[5];
55d7de9d
WH
277
278 int link_on;
279 u8 mdix_ctrl;
ce85e13a 280
87177ba6
WH
281 u32 chipid;
282 u32 chiprev;
ce85e13a 283 struct mii_bus *mdiobus;
55d7de9d
WH
284};
285
286/* use ethtool to change the level for any given device */
287static int msg_level = -1;
288module_param(msg_level, int, 0);
289MODULE_PARM_DESC(msg_level, "Override default message level");
290
291static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
292{
293 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
294 int ret;
295
55d7de9d
WH
296 if (!buf)
297 return -ENOMEM;
298
299 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
300 USB_VENDOR_REQUEST_READ_REGISTER,
301 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
302 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
303 if (likely(ret >= 0)) {
304 le32_to_cpus(buf);
305 *data = *buf;
306 } else {
307 netdev_warn(dev->net,
308 "Failed to read register index 0x%08x. ret = %d",
309 index, ret);
310 }
311
312 kfree(buf);
313
314 return ret;
315}
316
317static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
318{
319 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
320 int ret;
321
55d7de9d
WH
322 if (!buf)
323 return -ENOMEM;
324
325 *buf = data;
326 cpu_to_le32s(buf);
327
328 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
329 USB_VENDOR_REQUEST_WRITE_REGISTER,
330 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
331 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
332 if (unlikely(ret < 0)) {
333 netdev_warn(dev->net,
334 "Failed to write register index 0x%08x. ret = %d",
335 index, ret);
336 }
337
338 kfree(buf);
339
340 return ret;
341}
342
343static int lan78xx_read_stats(struct lan78xx_net *dev,
344 struct lan78xx_statstage *data)
345{
346 int ret = 0;
347 int i;
348 struct lan78xx_statstage *stats;
349 u32 *src;
350 u32 *dst;
351
55d7de9d
WH
352 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
353 if (!stats)
354 return -ENOMEM;
355
356 ret = usb_control_msg(dev->udev,
357 usb_rcvctrlpipe(dev->udev, 0),
358 USB_VENDOR_REQUEST_GET_STATS,
359 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
360 0,
361 0,
362 (void *)stats,
363 sizeof(*stats),
364 USB_CTRL_SET_TIMEOUT);
365 if (likely(ret >= 0)) {
366 src = (u32 *)stats;
367 dst = (u32 *)data;
368 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
369 le32_to_cpus(&src[i]);
370 dst[i] = src[i];
371 }
372 } else {
373 netdev_warn(dev->net,
374 "Failed to read stat ret = 0x%x", ret);
375 }
376
377 kfree(stats);
378
379 return ret;
380}
381
382/* Loop until the read is completed with timeout called with phy_mutex held */
383static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
384{
385 unsigned long start_time = jiffies;
386 u32 val;
387 int ret;
388
389 do {
390 ret = lan78xx_read_reg(dev, MII_ACC, &val);
391 if (unlikely(ret < 0))
392 return -EIO;
393
394 if (!(val & MII_ACC_MII_BUSY_))
395 return 0;
396 } while (!time_after(jiffies, start_time + HZ));
397
398 return -EIO;
399}
400
401static inline u32 mii_access(int id, int index, int read)
402{
403 u32 ret;
404
405 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
406 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
407 if (read)
408 ret |= MII_ACC_MII_READ_;
409 else
410 ret |= MII_ACC_MII_WRITE_;
411 ret |= MII_ACC_MII_BUSY_;
412
413 return ret;
414}
415
55d7de9d
WH
416static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
417{
418 unsigned long start_time = jiffies;
419 u32 val;
420 int ret;
421
422 do {
423 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
424 if (unlikely(ret < 0))
425 return -EIO;
426
427 if (!(val & E2P_CMD_EPC_BUSY_) ||
428 (val & E2P_CMD_EPC_TIMEOUT_))
429 break;
430 usleep_range(40, 100);
431 } while (!time_after(jiffies, start_time + HZ));
432
433 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
434 netdev_warn(dev->net, "EEPROM read operation timeout");
435 return -EIO;
436 }
437
438 return 0;
439}
440
441static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
442{
443 unsigned long start_time = jiffies;
444 u32 val;
445 int ret;
446
447 do {
448 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
449 if (unlikely(ret < 0))
450 return -EIO;
451
452 if (!(val & E2P_CMD_EPC_BUSY_))
453 return 0;
454
455 usleep_range(40, 100);
456 } while (!time_after(jiffies, start_time + HZ));
457
458 netdev_warn(dev->net, "EEPROM is busy");
459 return -EIO;
460}
461
462static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
463 u32 length, u8 *data)
464{
465 u32 val;
a0db7d10 466 u32 saved;
55d7de9d 467 int i, ret;
a0db7d10
WH
468 int retval;
469
470 /* depends on chip, some EEPROM pins are muxed with LED function.
471 * disable & restore LED function to access EEPROM.
472 */
473 ret = lan78xx_read_reg(dev, HW_CFG, &val);
474 saved = val;
87177ba6 475 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
476 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
477 ret = lan78xx_write_reg(dev, HW_CFG, val);
478 }
55d7de9d 479
a0db7d10
WH
480 retval = lan78xx_eeprom_confirm_not_busy(dev);
481 if (retval)
482 return retval;
55d7de9d
WH
483
484 for (i = 0; i < length; i++) {
485 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
486 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
487 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
488 if (unlikely(ret < 0)) {
489 retval = -EIO;
490 goto exit;
491 }
55d7de9d 492
a0db7d10
WH
493 retval = lan78xx_wait_eeprom(dev);
494 if (retval < 0)
495 goto exit;
55d7de9d
WH
496
497 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
498 if (unlikely(ret < 0)) {
499 retval = -EIO;
500 goto exit;
501 }
55d7de9d
WH
502
503 data[i] = val & 0xFF;
504 offset++;
505 }
506
a0db7d10
WH
507 retval = 0;
508exit:
87177ba6 509 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
510 ret = lan78xx_write_reg(dev, HW_CFG, saved);
511
512 return retval;
55d7de9d
WH
513}
514
515static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
516 u32 length, u8 *data)
517{
518 u8 sig;
519 int ret;
520
521 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
522 if ((ret == 0) && (sig == EEPROM_INDICATOR))
523 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
524 else
525 ret = -EINVAL;
526
527 return ret;
528}
529
530static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
531 u32 length, u8 *data)
532{
533 u32 val;
a0db7d10 534 u32 saved;
55d7de9d 535 int i, ret;
a0db7d10
WH
536 int retval;
537
538 /* depends on chip, some EEPROM pins are muxed with LED function.
539 * disable & restore LED function to access EEPROM.
540 */
541 ret = lan78xx_read_reg(dev, HW_CFG, &val);
542 saved = val;
87177ba6 543 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
544 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
545 ret = lan78xx_write_reg(dev, HW_CFG, val);
546 }
55d7de9d 547
a0db7d10
WH
548 retval = lan78xx_eeprom_confirm_not_busy(dev);
549 if (retval)
550 goto exit;
55d7de9d
WH
551
552 /* Issue write/erase enable command */
553 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
554 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
555 if (unlikely(ret < 0)) {
556 retval = -EIO;
557 goto exit;
558 }
55d7de9d 559
a0db7d10
WH
560 retval = lan78xx_wait_eeprom(dev);
561 if (retval < 0)
562 goto exit;
55d7de9d
WH
563
564 for (i = 0; i < length; i++) {
565 /* Fill data register */
566 val = data[i];
567 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
568 if (ret < 0) {
569 retval = -EIO;
570 goto exit;
571 }
55d7de9d
WH
572
573 /* Send "write" command */
574 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
575 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
576 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
577 if (ret < 0) {
578 retval = -EIO;
579 goto exit;
580 }
55d7de9d 581
a0db7d10
WH
582 retval = lan78xx_wait_eeprom(dev);
583 if (retval < 0)
584 goto exit;
55d7de9d
WH
585
586 offset++;
587 }
588
a0db7d10
WH
589 retval = 0;
590exit:
87177ba6 591 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
592 ret = lan78xx_write_reg(dev, HW_CFG, saved);
593
594 return retval;
55d7de9d
WH
595}
596
597static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
598 u32 length, u8 *data)
599{
600 int i;
601 int ret;
602 u32 buf;
603 unsigned long timeout;
604
605 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
606
607 if (buf & OTP_PWR_DN_PWRDN_N_) {
608 /* clear it and wait to be cleared */
609 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
610
611 timeout = jiffies + HZ;
612 do {
613 usleep_range(1, 10);
614 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
615 if (time_after(jiffies, timeout)) {
616 netdev_warn(dev->net,
617 "timeout on OTP_PWR_DN");
618 return -EIO;
619 }
620 } while (buf & OTP_PWR_DN_PWRDN_N_);
621 }
622
623 for (i = 0; i < length; i++) {
624 ret = lan78xx_write_reg(dev, OTP_ADDR1,
625 ((offset + i) >> 8) & OTP_ADDR1_15_11);
626 ret = lan78xx_write_reg(dev, OTP_ADDR2,
627 ((offset + i) & OTP_ADDR2_10_3));
628
629 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
630 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
631
632 timeout = jiffies + HZ;
633 do {
634 udelay(1);
635 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
636 if (time_after(jiffies, timeout)) {
637 netdev_warn(dev->net,
638 "timeout on OTP_STATUS");
639 return -EIO;
640 }
641 } while (buf & OTP_STATUS_BUSY_);
642
643 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
644
645 data[i] = (u8)(buf & 0xFF);
646 }
647
648 return 0;
649}
650
9fb6066d
WH
651static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
652 u32 length, u8 *data)
653{
654 int i;
655 int ret;
656 u32 buf;
657 unsigned long timeout;
658
659 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
660
661 if (buf & OTP_PWR_DN_PWRDN_N_) {
662 /* clear it and wait to be cleared */
663 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
664
665 timeout = jiffies + HZ;
666 do {
667 udelay(1);
668 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
669 if (time_after(jiffies, timeout)) {
670 netdev_warn(dev->net,
671 "timeout on OTP_PWR_DN completion");
672 return -EIO;
673 }
674 } while (buf & OTP_PWR_DN_PWRDN_N_);
675 }
676
677 /* set to BYTE program mode */
678 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
679
680 for (i = 0; i < length; i++) {
681 ret = lan78xx_write_reg(dev, OTP_ADDR1,
682 ((offset + i) >> 8) & OTP_ADDR1_15_11);
683 ret = lan78xx_write_reg(dev, OTP_ADDR2,
684 ((offset + i) & OTP_ADDR2_10_3));
685 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
686 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
687 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
688
689 timeout = jiffies + HZ;
690 do {
691 udelay(1);
692 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
693 if (time_after(jiffies, timeout)) {
694 netdev_warn(dev->net,
695 "Timeout on OTP_STATUS completion");
696 return -EIO;
697 }
698 } while (buf & OTP_STATUS_BUSY_);
699 }
700
701 return 0;
702}
703
55d7de9d
WH
704static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
705 u32 length, u8 *data)
706{
707 u8 sig;
708 int ret;
709
710 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
711
712 if (ret == 0) {
713 if (sig == OTP_INDICATOR_1)
714 offset = offset;
715 else if (sig == OTP_INDICATOR_2)
716 offset += 0x100;
717 else
718 ret = -EINVAL;
719 ret = lan78xx_read_raw_otp(dev, offset, length, data);
720 }
721
722 return ret;
723}
724
725static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
726{
727 int i, ret;
728
729 for (i = 0; i < 100; i++) {
730 u32 dp_sel;
731
732 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
733 if (unlikely(ret < 0))
734 return -EIO;
735
736 if (dp_sel & DP_SEL_DPRDY_)
737 return 0;
738
739 usleep_range(40, 100);
740 }
741
742 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
743
744 return -EIO;
745}
746
747static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
748 u32 addr, u32 length, u32 *buf)
749{
750 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
751 u32 dp_sel;
752 int i, ret;
753
754 if (usb_autopm_get_interface(dev->intf) < 0)
755 return 0;
756
757 mutex_lock(&pdata->dataport_mutex);
758
759 ret = lan78xx_dataport_wait_not_busy(dev);
760 if (ret < 0)
761 goto done;
762
763 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
764
765 dp_sel &= ~DP_SEL_RSEL_MASK_;
766 dp_sel |= ram_select;
767 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
768
769 for (i = 0; i < length; i++) {
770 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
771
772 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
773
774 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
775
776 ret = lan78xx_dataport_wait_not_busy(dev);
777 if (ret < 0)
778 goto done;
779 }
780
781done:
782 mutex_unlock(&pdata->dataport_mutex);
783 usb_autopm_put_interface(dev->intf);
784
785 return ret;
786}
787
788static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
789 int index, u8 addr[ETH_ALEN])
790{
791 u32 temp;
792
793 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
794 temp = addr[3];
795 temp = addr[2] | (temp << 8);
796 temp = addr[1] | (temp << 8);
797 temp = addr[0] | (temp << 8);
798 pdata->pfilter_table[index][1] = temp;
799 temp = addr[5];
800 temp = addr[4] | (temp << 8);
801 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
802 pdata->pfilter_table[index][0] = temp;
803 }
804}
805
806/* returns hash bit number for given MAC address */
807static inline u32 lan78xx_hash(char addr[ETH_ALEN])
808{
809 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
810}
811
812static void lan78xx_deferred_multicast_write(struct work_struct *param)
813{
814 struct lan78xx_priv *pdata =
815 container_of(param, struct lan78xx_priv, set_multicast);
816 struct lan78xx_net *dev = pdata->dev;
817 int i;
818 int ret;
819
820 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
821 pdata->rfe_ctl);
822
823 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
824 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
825
826 for (i = 1; i < NUM_OF_MAF; i++) {
827 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
828 ret = lan78xx_write_reg(dev, MAF_LO(i),
829 pdata->pfilter_table[i][1]);
830 ret = lan78xx_write_reg(dev, MAF_HI(i),
831 pdata->pfilter_table[i][0]);
832 }
833
834 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
835}
836
837static void lan78xx_set_multicast(struct net_device *netdev)
838{
839 struct lan78xx_net *dev = netdev_priv(netdev);
840 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
841 unsigned long flags;
842 int i;
843
844 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
845
846 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
847 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
848
849 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
850 pdata->mchash_table[i] = 0;
851 /* pfilter_table[0] has own HW address */
852 for (i = 1; i < NUM_OF_MAF; i++) {
853 pdata->pfilter_table[i][0] =
854 pdata->pfilter_table[i][1] = 0;
855 }
856
857 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
858
859 if (dev->net->flags & IFF_PROMISC) {
860 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
861 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
862 } else {
863 if (dev->net->flags & IFF_ALLMULTI) {
864 netif_dbg(dev, drv, dev->net,
865 "receive all multicast enabled");
866 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
867 }
868 }
869
870 if (netdev_mc_count(dev->net)) {
871 struct netdev_hw_addr *ha;
872 int i;
873
874 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
875
876 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
877
878 i = 1;
879 netdev_for_each_mc_addr(ha, netdev) {
880 /* set first 32 into Perfect Filter */
881 if (i < 33) {
882 lan78xx_set_addr_filter(pdata, i, ha->addr);
883 } else {
884 u32 bitnum = lan78xx_hash(ha->addr);
885
886 pdata->mchash_table[bitnum / 32] |=
887 (1 << (bitnum % 32));
888 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
889 }
890 i++;
891 }
892 }
893
894 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
895
896 /* defer register writes to a sleepable context */
897 schedule_work(&pdata->set_multicast);
898}
899
900static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
901 u16 lcladv, u16 rmtadv)
902{
903 u32 flow = 0, fct_flow = 0;
904 int ret;
905
906 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
907
908 if (cap & FLOW_CTRL_TX)
909 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
910
911 if (cap & FLOW_CTRL_RX)
912 flow |= FLOW_CR_RX_FCEN_;
913
914 if (dev->udev->speed == USB_SPEED_SUPER)
915 fct_flow = 0x817;
916 else if (dev->udev->speed == USB_SPEED_HIGH)
917 fct_flow = 0x211;
918
919 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
920 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
921 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
922
923 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
924
925 /* threshold value should be set before enabling flow */
926 ret = lan78xx_write_reg(dev, FLOW, flow);
927
928 return 0;
929}
930
931static int lan78xx_link_reset(struct lan78xx_net *dev)
932{
ce85e13a 933 struct phy_device *phydev = dev->net->phydev;
55d7de9d 934 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
99c79ece 935 int ladv, radv, ret;
55d7de9d
WH
936 u32 buf;
937
938 /* clear PHY interrupt status */
bdfba55e 939 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
940 if (unlikely(ret < 0))
941 return -EIO;
942
943 /* clear LAN78xx interrupt status */
944 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
945 if (unlikely(ret < 0))
946 return -EIO;
947
ce85e13a
WH
948 phy_read_status(phydev);
949
950 if (!phydev->link && dev->link_on) {
55d7de9d 951 dev->link_on = false;
55d7de9d
WH
952
953 /* reset MAC */
954 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
955 if (unlikely(ret < 0))
956 return -EIO;
957 buf |= MAC_CR_RST_;
958 ret = lan78xx_write_reg(dev, MAC_CR, buf);
959 if (unlikely(ret < 0))
960 return -EIO;
e4953910
WH
961
962 phy_mac_interrupt(phydev, 0);
ce85e13a 963 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
964 dev->link_on = true;
965
ce85e13a 966 phy_ethtool_gset(phydev, &ecmd);
55d7de9d 967
bdfba55e 968 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
969
970 if (dev->udev->speed == USB_SPEED_SUPER) {
971 if (ethtool_cmd_speed(&ecmd) == 1000) {
972 /* disable U2 */
973 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
974 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
975 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
976 /* enable U1 */
977 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
978 buf |= USB_CFG1_DEV_U1_INIT_EN_;
979 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
980 } else {
981 /* enable U1 & U2 */
982 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
983 buf |= USB_CFG1_DEV_U2_INIT_EN_;
984 buf |= USB_CFG1_DEV_U1_INIT_EN_;
985 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
986 }
987 }
988
ce85e13a 989 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
990 if (ladv < 0)
991 return ladv;
55d7de9d 992
ce85e13a 993 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
994 if (radv < 0)
995 return radv;
55d7de9d
WH
996
997 netif_dbg(dev, link, dev->net,
998 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
999 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1000
1001 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
e4953910 1002 phy_mac_interrupt(phydev, 1);
55d7de9d
WH
1003 }
1004
1005 return ret;
1006}
1007
1008/* some work can't be done in tasklets, so we use keventd
1009 *
1010 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1011 * but tasklet_schedule() doesn't. hope the failure is rare.
1012 */
1013void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1014{
1015 set_bit(work, &dev->flags);
1016 if (!schedule_delayed_work(&dev->wq, 0))
1017 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1018}
1019
1020static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1021{
1022 u32 intdata;
1023
1024 if (urb->actual_length != 4) {
1025 netdev_warn(dev->net,
1026 "unexpected urb length %d", urb->actual_length);
1027 return;
1028 }
1029
1030 memcpy(&intdata, urb->transfer_buffer, 4);
1031 le32_to_cpus(&intdata);
1032
1033 if (intdata & INT_ENP_PHY_INT) {
1034 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1035 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1036 } else
1037 netdev_warn(dev->net,
1038 "unexpected interrupt: 0x%08x\n", intdata);
1039}
1040
1041static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1042{
1043 return MAX_EEPROM_SIZE;
1044}
1045
1046static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1047 struct ethtool_eeprom *ee, u8 *data)
1048{
1049 struct lan78xx_net *dev = netdev_priv(netdev);
1050
1051 ee->magic = LAN78XX_EEPROM_MAGIC;
1052
1053 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1054}
1055
1056static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1057 struct ethtool_eeprom *ee, u8 *data)
1058{
1059 struct lan78xx_net *dev = netdev_priv(netdev);
1060
1061 /* Allow entire eeprom update only */
1062 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1063 (ee->offset == 0) &&
1064 (ee->len == 512) &&
1065 (data[0] == EEPROM_INDICATOR))
1066 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1067 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1068 (ee->offset == 0) &&
1069 (ee->len == 512) &&
1070 (data[0] == OTP_INDICATOR_1))
9fb6066d 1071 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d
WH
1072
1073 return -EINVAL;
1074}
1075
1076static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1077 u8 *data)
1078{
1079 if (stringset == ETH_SS_STATS)
1080 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1081}
1082
1083static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1084{
1085 if (sset == ETH_SS_STATS)
1086 return ARRAY_SIZE(lan78xx_gstrings);
1087 else
1088 return -EOPNOTSUPP;
1089}
1090
1091static void lan78xx_get_stats(struct net_device *netdev,
1092 struct ethtool_stats *stats, u64 *data)
1093{
1094 struct lan78xx_net *dev = netdev_priv(netdev);
1095 struct lan78xx_statstage lan78xx_stat;
1096 u32 *p;
1097 int i;
1098
1099 if (usb_autopm_get_interface(dev->intf) < 0)
1100 return;
1101
1102 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1103 p = (u32 *)&lan78xx_stat;
1104 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1105 data[i] = p[i];
1106 }
1107
1108 usb_autopm_put_interface(dev->intf);
1109}
1110
1111static void lan78xx_get_wol(struct net_device *netdev,
1112 struct ethtool_wolinfo *wol)
1113{
1114 struct lan78xx_net *dev = netdev_priv(netdev);
1115 int ret;
1116 u32 buf;
1117 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1118
1119 if (usb_autopm_get_interface(dev->intf) < 0)
1120 return;
1121
1122 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1123 if (unlikely(ret < 0)) {
1124 wol->supported = 0;
1125 wol->wolopts = 0;
1126 } else {
1127 if (buf & USB_CFG_RMT_WKP_) {
1128 wol->supported = WAKE_ALL;
1129 wol->wolopts = pdata->wol;
1130 } else {
1131 wol->supported = 0;
1132 wol->wolopts = 0;
1133 }
1134 }
1135
1136 usb_autopm_put_interface(dev->intf);
1137}
1138
1139static int lan78xx_set_wol(struct net_device *netdev,
1140 struct ethtool_wolinfo *wol)
1141{
1142 struct lan78xx_net *dev = netdev_priv(netdev);
1143 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1144 int ret;
1145
1146 ret = usb_autopm_get_interface(dev->intf);
1147 if (ret < 0)
1148 return ret;
1149
1150 pdata->wol = 0;
1151 if (wol->wolopts & WAKE_UCAST)
1152 pdata->wol |= WAKE_UCAST;
1153 if (wol->wolopts & WAKE_MCAST)
1154 pdata->wol |= WAKE_MCAST;
1155 if (wol->wolopts & WAKE_BCAST)
1156 pdata->wol |= WAKE_BCAST;
1157 if (wol->wolopts & WAKE_MAGIC)
1158 pdata->wol |= WAKE_MAGIC;
1159 if (wol->wolopts & WAKE_PHY)
1160 pdata->wol |= WAKE_PHY;
1161 if (wol->wolopts & WAKE_ARP)
1162 pdata->wol |= WAKE_ARP;
1163
1164 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1165
ce85e13a
WH
1166 phy_ethtool_set_wol(netdev->phydev, wol);
1167
55d7de9d
WH
1168 usb_autopm_put_interface(dev->intf);
1169
1170 return ret;
1171}
1172
1173static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1174{
1175 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1176 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1177 int ret;
1178 u32 buf;
55d7de9d
WH
1179
1180 ret = usb_autopm_get_interface(dev->intf);
1181 if (ret < 0)
1182 return ret;
1183
ce85e13a
WH
1184 ret = phy_ethtool_get_eee(phydev, edata);
1185 if (ret < 0)
1186 goto exit;
1187
55d7de9d
WH
1188 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1189 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1190 edata->eee_enabled = true;
ce85e13a
WH
1191 edata->eee_active = !!(edata->advertised &
1192 edata->lp_advertised);
55d7de9d
WH
1193 edata->tx_lpi_enabled = true;
1194 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1195 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1196 edata->tx_lpi_timer = buf;
1197 } else {
55d7de9d
WH
1198 edata->eee_enabled = false;
1199 edata->eee_active = false;
55d7de9d
WH
1200 edata->tx_lpi_enabled = false;
1201 edata->tx_lpi_timer = 0;
1202 }
1203
ce85e13a
WH
1204 ret = 0;
1205exit:
55d7de9d
WH
1206 usb_autopm_put_interface(dev->intf);
1207
ce85e13a 1208 return ret;
55d7de9d
WH
1209}
1210
1211static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1212{
1213 struct lan78xx_net *dev = netdev_priv(net);
1214 int ret;
1215 u32 buf;
1216
1217 ret = usb_autopm_get_interface(dev->intf);
1218 if (ret < 0)
1219 return ret;
1220
1221 if (edata->eee_enabled) {
1222 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1223 buf |= MAC_CR_EEE_EN_;
1224 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1225
ce85e13a
WH
1226 phy_ethtool_set_eee(net->phydev, edata);
1227
1228 buf = (u32)edata->tx_lpi_timer;
1229 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1230 } else {
1231 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1232 buf &= ~MAC_CR_EEE_EN_;
1233 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1234 }
1235
1236 usb_autopm_put_interface(dev->intf);
1237
1238 return 0;
1239}
1240
1241static u32 lan78xx_get_link(struct net_device *net)
1242{
ce85e13a 1243 phy_read_status(net->phydev);
55d7de9d 1244
ce85e13a 1245 return net->phydev->link;
55d7de9d
WH
1246}
1247
1248int lan78xx_nway_reset(struct net_device *net)
1249{
ce85e13a 1250 return phy_start_aneg(net->phydev);
55d7de9d
WH
1251}
1252
1253static void lan78xx_get_drvinfo(struct net_device *net,
1254 struct ethtool_drvinfo *info)
1255{
1256 struct lan78xx_net *dev = netdev_priv(net);
1257
1258 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1259 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1260 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1261}
1262
1263static u32 lan78xx_get_msglevel(struct net_device *net)
1264{
1265 struct lan78xx_net *dev = netdev_priv(net);
1266
1267 return dev->msg_enable;
1268}
1269
1270static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1271{
1272 struct lan78xx_net *dev = netdev_priv(net);
1273
1274 dev->msg_enable = level;
1275}
1276
758c5c11
WH
1277static int lan78xx_get_mdix_status(struct net_device *net)
1278{
1279 struct phy_device *phydev = net->phydev;
1280 int buf;
1281
1282 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1283 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1284 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1285
1286 return buf;
1287}
1288
1289static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1290{
1291 struct lan78xx_net *dev = netdev_priv(net);
1292 struct phy_device *phydev = net->phydev;
1293 int buf;
1294
1295 if (mdix_ctrl == ETH_TP_MDI) {
1296 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1297 LAN88XX_EXT_PAGE_SPACE_1);
1298 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1299 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1300 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1301 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1302 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1303 LAN88XX_EXT_PAGE_SPACE_0);
1304 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1305 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1306 LAN88XX_EXT_PAGE_SPACE_1);
1307 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1308 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1309 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1310 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1311 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1312 LAN88XX_EXT_PAGE_SPACE_0);
1313 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1314 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1315 LAN88XX_EXT_PAGE_SPACE_1);
1316 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1317 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1318 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1319 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1320 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1321 LAN88XX_EXT_PAGE_SPACE_0);
1322 }
1323 dev->mdix_ctrl = mdix_ctrl;
1324}
1325
55d7de9d
WH
1326static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1327{
1328 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1329 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1330 int ret;
1331 int buf;
1332
55d7de9d
WH
1333 ret = usb_autopm_get_interface(dev->intf);
1334 if (ret < 0)
1335 return ret;
1336
ce85e13a 1337 ret = phy_ethtool_gset(phydev, cmd);
55d7de9d 1338
758c5c11 1339 buf = lan78xx_get_mdix_status(net);
55d7de9d 1340
bdfba55e
WH
1341 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1342 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
55d7de9d
WH
1343 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1344 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
bdfba55e 1345 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
55d7de9d
WH
1346 cmd->eth_tp_mdix = ETH_TP_MDI;
1347 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
bdfba55e 1348 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
55d7de9d
WH
1349 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1350 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1351 }
1352
1353 usb_autopm_put_interface(dev->intf);
1354
1355 return ret;
1356}
1357
1358static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1359{
1360 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1361 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1362 int ret = 0;
1363 int temp;
1364
55d7de9d
WH
1365 ret = usb_autopm_get_interface(dev->intf);
1366 if (ret < 0)
1367 return ret;
1368
1369 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
758c5c11 1370 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
55d7de9d
WH
1371 }
1372
1373 /* change speed & duplex */
ce85e13a 1374 ret = phy_ethtool_sset(phydev, cmd);
55d7de9d
WH
1375
1376 if (!cmd->autoneg) {
1377 /* force link down */
ce85e13a
WH
1378 temp = phy_read(phydev, MII_BMCR);
1379 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1380 mdelay(1);
ce85e13a 1381 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1382 }
1383
1384 usb_autopm_put_interface(dev->intf);
1385
1386 return ret;
1387}
1388
1389static const struct ethtool_ops lan78xx_ethtool_ops = {
1390 .get_link = lan78xx_get_link,
1391 .nway_reset = lan78xx_nway_reset,
1392 .get_drvinfo = lan78xx_get_drvinfo,
1393 .get_msglevel = lan78xx_get_msglevel,
1394 .set_msglevel = lan78xx_set_msglevel,
1395 .get_settings = lan78xx_get_settings,
1396 .set_settings = lan78xx_set_settings,
1397 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1398 .get_eeprom = lan78xx_ethtool_get_eeprom,
1399 .set_eeprom = lan78xx_ethtool_set_eeprom,
1400 .get_ethtool_stats = lan78xx_get_stats,
1401 .get_sset_count = lan78xx_get_sset_count,
1402 .get_strings = lan78xx_get_strings,
1403 .get_wol = lan78xx_get_wol,
1404 .set_wol = lan78xx_set_wol,
1405 .get_eee = lan78xx_get_eee,
1406 .set_eee = lan78xx_set_eee,
1407};
1408
1409static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1410{
55d7de9d
WH
1411 if (!netif_running(netdev))
1412 return -EINVAL;
1413
ce85e13a 1414 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1415}
1416
1417static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1418{
1419 u32 addr_lo, addr_hi;
1420 int ret;
1421 u8 addr[6];
1422
1423 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1424 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1425
1426 addr[0] = addr_lo & 0xFF;
1427 addr[1] = (addr_lo >> 8) & 0xFF;
1428 addr[2] = (addr_lo >> 16) & 0xFF;
1429 addr[3] = (addr_lo >> 24) & 0xFF;
1430 addr[4] = addr_hi & 0xFF;
1431 addr[5] = (addr_hi >> 8) & 0xFF;
1432
1433 if (!is_valid_ether_addr(addr)) {
1434 /* reading mac address from EEPROM or OTP */
1435 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1436 addr) == 0) ||
1437 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1438 addr) == 0)) {
1439 if (is_valid_ether_addr(addr)) {
1440 /* eeprom values are valid so use them */
1441 netif_dbg(dev, ifup, dev->net,
1442 "MAC address read from EEPROM");
1443 } else {
1444 /* generate random MAC */
1445 random_ether_addr(addr);
1446 netif_dbg(dev, ifup, dev->net,
1447 "MAC address set to random addr");
1448 }
1449
1450 addr_lo = addr[0] | (addr[1] << 8) |
1451 (addr[2] << 16) | (addr[3] << 24);
1452 addr_hi = addr[4] | (addr[5] << 8);
1453
1454 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1455 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1456 } else {
1457 /* generate random MAC */
1458 random_ether_addr(addr);
1459 netif_dbg(dev, ifup, dev->net,
1460 "MAC address set to random addr");
1461 }
1462 }
1463
1464 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1465 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1466
1467 ether_addr_copy(dev->net->dev_addr, addr);
1468}
1469
ce85e13a
WH
1470/* MDIO read and write wrappers for phylib */
1471static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1472{
1473 struct lan78xx_net *dev = bus->priv;
1474 u32 val, addr;
1475 int ret;
1476
1477 ret = usb_autopm_get_interface(dev->intf);
1478 if (ret < 0)
1479 return ret;
1480
1481 mutex_lock(&dev->phy_mutex);
1482
1483 /* confirm MII not busy */
1484 ret = lan78xx_phy_wait_not_busy(dev);
1485 if (ret < 0)
1486 goto done;
1487
1488 /* set the address, index & direction (read from PHY) */
1489 addr = mii_access(phy_id, idx, MII_READ);
1490 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1491
1492 ret = lan78xx_phy_wait_not_busy(dev);
1493 if (ret < 0)
1494 goto done;
1495
1496 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1497
1498 ret = (int)(val & 0xFFFF);
1499
1500done:
1501 mutex_unlock(&dev->phy_mutex);
1502 usb_autopm_put_interface(dev->intf);
1503 return ret;
1504}
1505
1506static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1507 u16 regval)
1508{
1509 struct lan78xx_net *dev = bus->priv;
1510 u32 val, addr;
1511 int ret;
1512
1513 ret = usb_autopm_get_interface(dev->intf);
1514 if (ret < 0)
1515 return ret;
1516
1517 mutex_lock(&dev->phy_mutex);
1518
1519 /* confirm MII not busy */
1520 ret = lan78xx_phy_wait_not_busy(dev);
1521 if (ret < 0)
1522 goto done;
1523
1524 val = (u32)regval;
1525 ret = lan78xx_write_reg(dev, MII_DATA, val);
1526
1527 /* set the address, index & direction (write to PHY) */
1528 addr = mii_access(phy_id, idx, MII_WRITE);
1529 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1530
1531 ret = lan78xx_phy_wait_not_busy(dev);
1532 if (ret < 0)
1533 goto done;
1534
1535done:
1536 mutex_unlock(&dev->phy_mutex);
1537 usb_autopm_put_interface(dev->intf);
1538 return 0;
1539}
1540
1541static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1542{
ce85e13a 1543 int ret;
ce85e13a
WH
1544
1545 dev->mdiobus = mdiobus_alloc();
1546 if (!dev->mdiobus) {
1547 netdev_err(dev->net, "can't allocate MDIO bus\n");
1548 return -ENOMEM;
1549 }
1550
1551 dev->mdiobus->priv = (void *)dev;
1552 dev->mdiobus->read = lan78xx_mdiobus_read;
1553 dev->mdiobus->write = lan78xx_mdiobus_write;
1554 dev->mdiobus->name = "lan78xx-mdiobus";
1555
1556 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1557 dev->udev->bus->busnum, dev->udev->devnum);
1558
87177ba6
WH
1559 switch (dev->chipid) {
1560 case ID_REV_CHIP_ID_7800_:
1561 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1562 /* set to internal PHY id */
1563 dev->mdiobus->phy_mask = ~(1 << 1);
1564 break;
1565 }
1566
1567 ret = mdiobus_register(dev->mdiobus);
1568 if (ret) {
1569 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1570 goto exit1;
ce85e13a
WH
1571 }
1572
1573 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1574 return 0;
ce85e13a
WH
1575exit1:
1576 mdiobus_free(dev->mdiobus);
1577 return ret;
1578}
1579
1580static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1581{
1582 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1583 mdiobus_free(dev->mdiobus);
1584}
1585
1586static void lan78xx_link_status_change(struct net_device *net)
1587{
1588 /* nothing to do */
55d7de9d
WH
1589}
1590
1591static int lan78xx_phy_init(struct lan78xx_net *dev)
1592{
ce85e13a
WH
1593 int ret;
1594 struct phy_device *phydev = dev->net->phydev;
55d7de9d 1595
ce85e13a
WH
1596 phydev = phy_find_first(dev->mdiobus);
1597 if (!phydev) {
1598 netdev_err(dev->net, "no PHY found\n");
1599 return -EIO;
1600 }
55d7de9d 1601
e4953910
WH
1602 /* Enable PHY interrupts.
1603 * We handle our own interrupt
1604 */
1605 ret = phy_read(phydev, LAN88XX_INT_STS);
1606 ret = phy_write(phydev, LAN88XX_INT_MASK,
1607 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1608 LAN88XX_INT_MASK_LINK_CHANGE_);
1609
1610 phydev->irq = PHY_IGNORE_INTERRUPT;
1611
ce85e13a
WH
1612 ret = phy_connect_direct(dev->net, phydev,
1613 lan78xx_link_status_change,
1614 PHY_INTERFACE_MODE_GMII);
1615 if (ret) {
1616 netdev_err(dev->net, "can't attach PHY to %s\n",
1617 dev->mdiobus->id);
1618 return -EIO;
1619 }
55d7de9d
WH
1620
1621 /* set to AUTOMDIX */
758c5c11 1622 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
55d7de9d 1623
ce85e13a
WH
1624 /* MAC doesn't support 1000T Half */
1625 phydev->supported &= ~SUPPORTED_1000baseT_Half;
e270b2db 1626
ce85e13a
WH
1627 genphy_config_aneg(phydev);
1628
ce85e13a 1629 phy_start(phydev);
55d7de9d
WH
1630
1631 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1632
1633 return 0;
1634}
1635
1636static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1637{
1638 int ret = 0;
1639 u32 buf;
1640 bool rxenabled;
1641
1642 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1643
1644 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1645
1646 if (rxenabled) {
1647 buf &= ~MAC_RX_RXEN_;
1648 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1649 }
1650
1651 /* add 4 to size for FCS */
1652 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1653 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1654
1655 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1656
1657 if (rxenabled) {
1658 buf |= MAC_RX_RXEN_;
1659 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1660 }
1661
1662 return 0;
1663}
1664
1665static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1666{
1667 struct sk_buff *skb;
1668 unsigned long flags;
1669 int count = 0;
1670
1671 spin_lock_irqsave(&q->lock, flags);
1672 while (!skb_queue_empty(q)) {
1673 struct skb_data *entry;
1674 struct urb *urb;
1675 int ret;
1676
1677 skb_queue_walk(q, skb) {
1678 entry = (struct skb_data *)skb->cb;
1679 if (entry->state != unlink_start)
1680 goto found;
1681 }
1682 break;
1683found:
1684 entry->state = unlink_start;
1685 urb = entry->urb;
1686
1687 /* Get reference count of the URB to avoid it to be
1688 * freed during usb_unlink_urb, which may trigger
1689 * use-after-free problem inside usb_unlink_urb since
1690 * usb_unlink_urb is always racing with .complete
1691 * handler(include defer_bh).
1692 */
1693 usb_get_urb(urb);
1694 spin_unlock_irqrestore(&q->lock, flags);
1695 /* during some PM-driven resume scenarios,
1696 * these (async) unlinks complete immediately
1697 */
1698 ret = usb_unlink_urb(urb);
1699 if (ret != -EINPROGRESS && ret != 0)
1700 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1701 else
1702 count++;
1703 usb_put_urb(urb);
1704 spin_lock_irqsave(&q->lock, flags);
1705 }
1706 spin_unlock_irqrestore(&q->lock, flags);
1707 return count;
1708}
1709
1710static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1711{
1712 struct lan78xx_net *dev = netdev_priv(netdev);
1713 int ll_mtu = new_mtu + netdev->hard_header_len;
1714 int old_hard_mtu = dev->hard_mtu;
1715 int old_rx_urb_size = dev->rx_urb_size;
1716 int ret;
1717
1718 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1719 return -EINVAL;
1720
1721 if (new_mtu <= 0)
1722 return -EINVAL;
1723 /* no second zero-length packet read wanted after mtu-sized packets */
1724 if ((ll_mtu % dev->maxpacket) == 0)
1725 return -EDOM;
1726
1727 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1728
1729 netdev->mtu = new_mtu;
1730
1731 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1732 if (dev->rx_urb_size == old_hard_mtu) {
1733 dev->rx_urb_size = dev->hard_mtu;
1734 if (dev->rx_urb_size > old_rx_urb_size) {
1735 if (netif_running(dev->net)) {
1736 unlink_urbs(dev, &dev->rxq);
1737 tasklet_schedule(&dev->bh);
1738 }
1739 }
1740 }
1741
1742 return 0;
1743}
1744
1745int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1746{
1747 struct lan78xx_net *dev = netdev_priv(netdev);
1748 struct sockaddr *addr = p;
1749 u32 addr_lo, addr_hi;
1750 int ret;
1751
1752 if (netif_running(netdev))
1753 return -EBUSY;
1754
1755 if (!is_valid_ether_addr(addr->sa_data))
1756 return -EADDRNOTAVAIL;
1757
1758 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1759
1760 addr_lo = netdev->dev_addr[0] |
1761 netdev->dev_addr[1] << 8 |
1762 netdev->dev_addr[2] << 16 |
1763 netdev->dev_addr[3] << 24;
1764 addr_hi = netdev->dev_addr[4] |
1765 netdev->dev_addr[5] << 8;
1766
1767 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1768 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1769
1770 return 0;
1771}
1772
1773/* Enable or disable Rx checksum offload engine */
1774static int lan78xx_set_features(struct net_device *netdev,
1775 netdev_features_t features)
1776{
1777 struct lan78xx_net *dev = netdev_priv(netdev);
1778 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1779 unsigned long flags;
1780 int ret;
1781
1782 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1783
1784 if (features & NETIF_F_RXCSUM) {
1785 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1786 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1787 } else {
1788 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1789 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1790 }
1791
1792 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1793 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1794 else
1795 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1796
1797 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1798
1799 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1800
1801 return 0;
1802}
1803
1804static void lan78xx_deferred_vlan_write(struct work_struct *param)
1805{
1806 struct lan78xx_priv *pdata =
1807 container_of(param, struct lan78xx_priv, set_vlan);
1808 struct lan78xx_net *dev = pdata->dev;
1809
1810 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1811 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1812}
1813
1814static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1815 __be16 proto, u16 vid)
1816{
1817 struct lan78xx_net *dev = netdev_priv(netdev);
1818 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1819 u16 vid_bit_index;
1820 u16 vid_dword_index;
1821
1822 vid_dword_index = (vid >> 5) & 0x7F;
1823 vid_bit_index = vid & 0x1F;
1824
1825 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1826
1827 /* defer register writes to a sleepable context */
1828 schedule_work(&pdata->set_vlan);
1829
1830 return 0;
1831}
1832
1833static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1834 __be16 proto, u16 vid)
1835{
1836 struct lan78xx_net *dev = netdev_priv(netdev);
1837 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1838 u16 vid_bit_index;
1839 u16 vid_dword_index;
1840
1841 vid_dword_index = (vid >> 5) & 0x7F;
1842 vid_bit_index = vid & 0x1F;
1843
1844 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1845
1846 /* defer register writes to a sleepable context */
1847 schedule_work(&pdata->set_vlan);
1848
1849 return 0;
1850}
1851
1852static void lan78xx_init_ltm(struct lan78xx_net *dev)
1853{
1854 int ret;
1855 u32 buf;
1856 u32 regs[6] = { 0 };
1857
1858 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1859 if (buf & USB_CFG1_LTM_ENABLE_) {
1860 u8 temp[2];
1861 /* Get values from EEPROM first */
1862 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1863 if (temp[0] == 24) {
1864 ret = lan78xx_read_raw_eeprom(dev,
1865 temp[1] * 2,
1866 24,
1867 (u8 *)regs);
1868 if (ret < 0)
1869 return;
1870 }
1871 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1872 if (temp[0] == 24) {
1873 ret = lan78xx_read_raw_otp(dev,
1874 temp[1] * 2,
1875 24,
1876 (u8 *)regs);
1877 if (ret < 0)
1878 return;
1879 }
1880 }
1881 }
1882
1883 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1884 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1885 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1886 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1887 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1888 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1889}
1890
1891static int lan78xx_reset(struct lan78xx_net *dev)
1892{
1893 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1894 u32 buf;
1895 int ret = 0;
1896 unsigned long timeout;
1897
1898 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1899 buf |= HW_CFG_LRST_;
1900 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1901
1902 timeout = jiffies + HZ;
1903 do {
1904 mdelay(1);
1905 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1906 if (time_after(jiffies, timeout)) {
1907 netdev_warn(dev->net,
1908 "timeout on completion of LiteReset");
1909 return -EIO;
1910 }
1911 } while (buf & HW_CFG_LRST_);
1912
1913 lan78xx_init_mac_address(dev);
1914
ce85e13a
WH
1915 /* save DEVID for later usage */
1916 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
1917 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
1918 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 1919
55d7de9d
WH
1920 /* Respond to the IN token with a NAK */
1921 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1922 buf |= USB_CFG_BIR_;
1923 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1924
1925 /* Init LTM */
1926 lan78xx_init_ltm(dev);
1927
1928 dev->net->hard_header_len += TX_OVERHEAD;
1929 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1930
1931 if (dev->udev->speed == USB_SPEED_SUPER) {
1932 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1933 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1934 dev->rx_qlen = 4;
1935 dev->tx_qlen = 4;
1936 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1937 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1938 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1939 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1940 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1941 } else {
1942 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1943 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1944 dev->rx_qlen = 4;
1945 }
1946
1947 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1948 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1949
1950 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1951 buf |= HW_CFG_MEF_;
1952 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1953
1954 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1955 buf |= USB_CFG_BCE_;
1956 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1957
1958 /* set FIFO sizes */
1959 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1960 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1961
1962 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1963 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1964
1965 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1966 ret = lan78xx_write_reg(dev, FLOW, 0);
1967 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1968
1969 /* Don't need rfe_ctl_lock during initialisation */
1970 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1971 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1972 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1973
1974 /* Enable or disable checksum offload engines */
1975 lan78xx_set_features(dev->net, dev->net->features);
1976
1977 lan78xx_set_multicast(dev->net);
1978
1979 /* reset PHY */
1980 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1981 buf |= PMT_CTL_PHY_RST_;
1982 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1983
1984 timeout = jiffies + HZ;
1985 do {
1986 mdelay(1);
1987 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1988 if (time_after(jiffies, timeout)) {
1989 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1990 return -EIO;
1991 }
6c595b03 1992 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 1993
55d7de9d 1994 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
55d7de9d 1995 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
55d7de9d
WH
1996 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1997
55d7de9d
WH
1998 /* enable PHY interrupts */
1999 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2000 buf |= INT_ENP_PHY_INT;
2001 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2002
2003 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2004 buf |= MAC_TX_TXEN_;
2005 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2006
2007 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2008 buf |= FCT_TX_CTL_EN_;
2009 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2010
2011 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2012
2013 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2014 buf |= MAC_RX_RXEN_;
2015 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2016
2017 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2018 buf |= FCT_RX_CTL_EN_;
2019 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2020
55d7de9d
WH
2021 return 0;
2022}
2023
2024static int lan78xx_open(struct net_device *net)
2025{
2026 struct lan78xx_net *dev = netdev_priv(net);
2027 int ret;
2028
2029 ret = usb_autopm_get_interface(dev->intf);
2030 if (ret < 0)
2031 goto out;
2032
2033 ret = lan78xx_reset(dev);
2034 if (ret < 0)
2035 goto done;
2036
ce85e13a
WH
2037 ret = lan78xx_phy_init(dev);
2038 if (ret < 0)
2039 goto done;
2040
55d7de9d
WH
2041 /* for Link Check */
2042 if (dev->urb_intr) {
2043 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2044 if (ret < 0) {
2045 netif_err(dev, ifup, dev->net,
2046 "intr submit %d\n", ret);
2047 goto done;
2048 }
2049 }
2050
2051 set_bit(EVENT_DEV_OPEN, &dev->flags);
2052
2053 netif_start_queue(net);
2054
2055 dev->link_on = false;
2056
2057 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2058done:
2059 usb_autopm_put_interface(dev->intf);
2060
2061out:
2062 return ret;
2063}
2064
2065static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2066{
2067 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2068 DECLARE_WAITQUEUE(wait, current);
2069 int temp;
2070
2071 /* ensure there are no more active urbs */
2072 add_wait_queue(&unlink_wakeup, &wait);
2073 set_current_state(TASK_UNINTERRUPTIBLE);
2074 dev->wait = &unlink_wakeup;
2075 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2076
2077 /* maybe wait for deletions to finish. */
2078 while (!skb_queue_empty(&dev->rxq) &&
2079 !skb_queue_empty(&dev->txq) &&
2080 !skb_queue_empty(&dev->done)) {
2081 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2082 set_current_state(TASK_UNINTERRUPTIBLE);
2083 netif_dbg(dev, ifdown, dev->net,
2084 "waited for %d urb completions\n", temp);
2085 }
2086 set_current_state(TASK_RUNNING);
2087 dev->wait = NULL;
2088 remove_wait_queue(&unlink_wakeup, &wait);
2089}
2090
2091int lan78xx_stop(struct net_device *net)
2092{
2093 struct lan78xx_net *dev = netdev_priv(net);
2094
ce85e13a
WH
2095 phy_stop(net->phydev);
2096 phy_disconnect(net->phydev);
2097 net->phydev = NULL;
2098
55d7de9d
WH
2099 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2100 netif_stop_queue(net);
2101
2102 netif_info(dev, ifdown, dev->net,
2103 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2104 net->stats.rx_packets, net->stats.tx_packets,
2105 net->stats.rx_errors, net->stats.tx_errors);
2106
2107 lan78xx_terminate_urbs(dev);
2108
2109 usb_kill_urb(dev->urb_intr);
2110
2111 skb_queue_purge(&dev->rxq_pause);
2112
2113 /* deferred work (task, timer, softirq) must also stop.
2114 * can't flush_scheduled_work() until we drop rtnl (later),
2115 * else workers could deadlock; so make workers a NOP.
2116 */
2117 dev->flags = 0;
2118 cancel_delayed_work_sync(&dev->wq);
2119 tasklet_kill(&dev->bh);
2120
2121 usb_autopm_put_interface(dev->intf);
2122
2123 return 0;
2124}
2125
2126static int lan78xx_linearize(struct sk_buff *skb)
2127{
2128 return skb_linearize(skb);
2129}
2130
2131static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2132 struct sk_buff *skb, gfp_t flags)
2133{
2134 u32 tx_cmd_a, tx_cmd_b;
2135
2136 if (skb_headroom(skb) < TX_OVERHEAD) {
2137 struct sk_buff *skb2;
2138
2139 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2140 dev_kfree_skb_any(skb);
2141 skb = skb2;
2142 if (!skb)
2143 return NULL;
2144 }
2145
2146 if (lan78xx_linearize(skb) < 0)
2147 return NULL;
2148
2149 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2150
2151 if (skb->ip_summed == CHECKSUM_PARTIAL)
2152 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2153
2154 tx_cmd_b = 0;
2155 if (skb_is_gso(skb)) {
2156 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2157
2158 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2159
2160 tx_cmd_a |= TX_CMD_A_LSO_;
2161 }
2162
2163 if (skb_vlan_tag_present(skb)) {
2164 tx_cmd_a |= TX_CMD_A_IVTG_;
2165 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2166 }
2167
2168 skb_push(skb, 4);
2169 cpu_to_le32s(&tx_cmd_b);
2170 memcpy(skb->data, &tx_cmd_b, 4);
2171
2172 skb_push(skb, 4);
2173 cpu_to_le32s(&tx_cmd_a);
2174 memcpy(skb->data, &tx_cmd_a, 4);
2175
2176 return skb;
2177}
2178
2179static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2180 struct sk_buff_head *list, enum skb_state state)
2181{
2182 unsigned long flags;
2183 enum skb_state old_state;
2184 struct skb_data *entry = (struct skb_data *)skb->cb;
2185
2186 spin_lock_irqsave(&list->lock, flags);
2187 old_state = entry->state;
2188 entry->state = state;
55d7de9d
WH
2189
2190 __skb_unlink(skb, list);
2191 spin_unlock(&list->lock);
2192 spin_lock(&dev->done.lock);
55d7de9d
WH
2193
2194 __skb_queue_tail(&dev->done, skb);
2195 if (skb_queue_len(&dev->done) == 1)
2196 tasklet_schedule(&dev->bh);
2197 spin_unlock_irqrestore(&dev->done.lock, flags);
2198
2199 return old_state;
2200}
2201
2202static void tx_complete(struct urb *urb)
2203{
2204 struct sk_buff *skb = (struct sk_buff *)urb->context;
2205 struct skb_data *entry = (struct skb_data *)skb->cb;
2206 struct lan78xx_net *dev = entry->dev;
2207
2208 if (urb->status == 0) {
2209 dev->net->stats.tx_packets++;
2210 dev->net->stats.tx_bytes += entry->length;
2211 } else {
2212 dev->net->stats.tx_errors++;
2213
2214 switch (urb->status) {
2215 case -EPIPE:
2216 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2217 break;
2218
2219 /* software-driven interface shutdown */
2220 case -ECONNRESET:
2221 case -ESHUTDOWN:
2222 break;
2223
2224 case -EPROTO:
2225 case -ETIME:
2226 case -EILSEQ:
2227 netif_stop_queue(dev->net);
2228 break;
2229 default:
2230 netif_dbg(dev, tx_err, dev->net,
2231 "tx err %d\n", entry->urb->status);
2232 break;
2233 }
2234 }
2235
2236 usb_autopm_put_interface_async(dev->intf);
2237
81c38e81 2238 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2239}
2240
2241static void lan78xx_queue_skb(struct sk_buff_head *list,
2242 struct sk_buff *newsk, enum skb_state state)
2243{
2244 struct skb_data *entry = (struct skb_data *)newsk->cb;
2245
2246 __skb_queue_tail(list, newsk);
2247 entry->state = state;
2248}
2249
2250netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2251{
2252 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2253 struct sk_buff *skb2 = NULL;
55d7de9d 2254
81c38e81 2255 if (skb) {
55d7de9d 2256 skb_tx_timestamp(skb);
81c38e81
WH
2257 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2258 }
55d7de9d 2259
81c38e81
WH
2260 if (skb2) {
2261 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2262
4b2a4a96
WH
2263 /* throttle TX patch at slower than SUPER SPEED USB */
2264 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2265 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2266 netif_stop_queue(net);
2267 } else {
2268 netif_dbg(dev, tx_err, dev->net,
2269 "lan78xx_tx_prep return NULL\n");
2270 dev->net->stats.tx_errors++;
2271 dev->net->stats.tx_dropped++;
2272 }
2273
2274 tasklet_schedule(&dev->bh);
2275
2276 return NETDEV_TX_OK;
2277}
2278
2279int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2280{
2281 int tmp;
2282 struct usb_host_interface *alt = NULL;
2283 struct usb_host_endpoint *in = NULL, *out = NULL;
2284 struct usb_host_endpoint *status = NULL;
2285
2286 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2287 unsigned ep;
2288
2289 in = NULL;
2290 out = NULL;
2291 status = NULL;
2292 alt = intf->altsetting + tmp;
2293
2294 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2295 struct usb_host_endpoint *e;
2296 int intr = 0;
2297
2298 e = alt->endpoint + ep;
2299 switch (e->desc.bmAttributes) {
2300 case USB_ENDPOINT_XFER_INT:
2301 if (!usb_endpoint_dir_in(&e->desc))
2302 continue;
2303 intr = 1;
2304 /* FALLTHROUGH */
2305 case USB_ENDPOINT_XFER_BULK:
2306 break;
2307 default:
2308 continue;
2309 }
2310 if (usb_endpoint_dir_in(&e->desc)) {
2311 if (!intr && !in)
2312 in = e;
2313 else if (intr && !status)
2314 status = e;
2315 } else {
2316 if (!out)
2317 out = e;
2318 }
2319 }
2320 if (in && out)
2321 break;
2322 }
2323 if (!alt || !in || !out)
2324 return -EINVAL;
2325
2326 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2327 in->desc.bEndpointAddress &
2328 USB_ENDPOINT_NUMBER_MASK);
2329 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2330 out->desc.bEndpointAddress &
2331 USB_ENDPOINT_NUMBER_MASK);
2332 dev->ep_intr = status;
2333
2334 return 0;
2335}
2336
2337static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2338{
2339 struct lan78xx_priv *pdata = NULL;
2340 int ret;
2341 int i;
2342
2343 ret = lan78xx_get_endpoints(dev, intf);
2344
2345 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2346
2347 pdata = (struct lan78xx_priv *)(dev->data[0]);
2348 if (!pdata) {
2349 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2350 return -ENOMEM;
2351 }
2352
2353 pdata->dev = dev;
2354
2355 spin_lock_init(&pdata->rfe_ctl_lock);
2356 mutex_init(&pdata->dataport_mutex);
2357
2358 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2359
2360 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2361 pdata->vlan_table[i] = 0;
2362
2363 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2364
2365 dev->net->features = 0;
2366
2367 if (DEFAULT_TX_CSUM_ENABLE)
2368 dev->net->features |= NETIF_F_HW_CSUM;
2369
2370 if (DEFAULT_RX_CSUM_ENABLE)
2371 dev->net->features |= NETIF_F_RXCSUM;
2372
2373 if (DEFAULT_TSO_CSUM_ENABLE)
2374 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2375
2376 dev->net->hw_features = dev->net->features;
2377
2378 /* Init all registers */
2379 ret = lan78xx_reset(dev);
2380
ce85e13a
WH
2381 lan78xx_mdio_init(dev);
2382
55d7de9d
WH
2383 dev->net->flags |= IFF_MULTICAST;
2384
2385 pdata->wol = WAKE_MAGIC;
2386
2387 return 0;
2388}
2389
2390static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2391{
2392 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2393
ce85e13a
WH
2394 lan78xx_remove_mdio(dev);
2395
55d7de9d
WH
2396 if (pdata) {
2397 netif_dbg(dev, ifdown, dev->net, "free pdata");
2398 kfree(pdata);
2399 pdata = NULL;
2400 dev->data[0] = 0;
2401 }
2402}
2403
2404static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2405 struct sk_buff *skb,
2406 u32 rx_cmd_a, u32 rx_cmd_b)
2407{
2408 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2409 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2410 skb->ip_summed = CHECKSUM_NONE;
2411 } else {
2412 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2413 skb->ip_summed = CHECKSUM_COMPLETE;
2414 }
2415}
2416
2417void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2418{
2419 int status;
2420
2421 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2422 skb_queue_tail(&dev->rxq_pause, skb);
2423 return;
2424 }
2425
2426 skb->protocol = eth_type_trans(skb, dev->net);
2427 dev->net->stats.rx_packets++;
2428 dev->net->stats.rx_bytes += skb->len;
2429
2430 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2431 skb->len + sizeof(struct ethhdr), skb->protocol);
2432 memset(skb->cb, 0, sizeof(struct skb_data));
2433
2434 if (skb_defer_rx_timestamp(skb))
2435 return;
2436
2437 status = netif_rx(skb);
2438 if (status != NET_RX_SUCCESS)
2439 netif_dbg(dev, rx_err, dev->net,
2440 "netif_rx status %d\n", status);
2441}
2442
2443static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2444{
2445 if (skb->len < dev->net->hard_header_len)
2446 return 0;
2447
2448 while (skb->len > 0) {
2449 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2450 u16 rx_cmd_c;
2451 struct sk_buff *skb2;
2452 unsigned char *packet;
2453
2454 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2455 le32_to_cpus(&rx_cmd_a);
2456 skb_pull(skb, sizeof(rx_cmd_a));
2457
2458 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2459 le32_to_cpus(&rx_cmd_b);
2460 skb_pull(skb, sizeof(rx_cmd_b));
2461
2462 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2463 le16_to_cpus(&rx_cmd_c);
2464 skb_pull(skb, sizeof(rx_cmd_c));
2465
2466 packet = skb->data;
2467
2468 /* get the packet length */
2469 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2470 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2471
2472 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2473 netif_dbg(dev, rx_err, dev->net,
2474 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2475 } else {
2476 /* last frame in this batch */
2477 if (skb->len == size) {
2478 lan78xx_rx_csum_offload(dev, skb,
2479 rx_cmd_a, rx_cmd_b);
2480
2481 skb_trim(skb, skb->len - 4); /* remove fcs */
2482 skb->truesize = size + sizeof(struct sk_buff);
2483
2484 return 1;
2485 }
2486
2487 skb2 = skb_clone(skb, GFP_ATOMIC);
2488 if (unlikely(!skb2)) {
2489 netdev_warn(dev->net, "Error allocating skb");
2490 return 0;
2491 }
2492
2493 skb2->len = size;
2494 skb2->data = packet;
2495 skb_set_tail_pointer(skb2, size);
2496
2497 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2498
2499 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2500 skb2->truesize = size + sizeof(struct sk_buff);
2501
2502 lan78xx_skb_return(dev, skb2);
2503 }
2504
2505 skb_pull(skb, size);
2506
2507 /* padding bytes before the next frame starts */
2508 if (skb->len)
2509 skb_pull(skb, align_count);
2510 }
2511
55d7de9d
WH
2512 return 1;
2513}
2514
2515static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2516{
2517 if (!lan78xx_rx(dev, skb)) {
2518 dev->net->stats.rx_errors++;
2519 goto done;
2520 }
2521
2522 if (skb->len) {
2523 lan78xx_skb_return(dev, skb);
2524 return;
2525 }
2526
2527 netif_dbg(dev, rx_err, dev->net, "drop\n");
2528 dev->net->stats.rx_errors++;
2529done:
2530 skb_queue_tail(&dev->done, skb);
2531}
2532
2533static void rx_complete(struct urb *urb);
2534
2535static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2536{
2537 struct sk_buff *skb;
2538 struct skb_data *entry;
2539 unsigned long lockflags;
2540 size_t size = dev->rx_urb_size;
2541 int ret = 0;
2542
2543 skb = netdev_alloc_skb_ip_align(dev->net, size);
2544 if (!skb) {
2545 usb_free_urb(urb);
2546 return -ENOMEM;
2547 }
2548
2549 entry = (struct skb_data *)skb->cb;
2550 entry->urb = urb;
2551 entry->dev = dev;
2552 entry->length = 0;
2553
2554 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2555 skb->data, size, rx_complete, skb);
2556
2557 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2558
2559 if (netif_device_present(dev->net) &&
2560 netif_running(dev->net) &&
2561 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2562 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2563 ret = usb_submit_urb(urb, GFP_ATOMIC);
2564 switch (ret) {
2565 case 0:
2566 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2567 break;
2568 case -EPIPE:
2569 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2570 break;
2571 case -ENODEV:
2572 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2573 netif_device_detach(dev->net);
2574 break;
2575 case -EHOSTUNREACH:
2576 ret = -ENOLINK;
2577 break;
2578 default:
2579 netif_dbg(dev, rx_err, dev->net,
2580 "rx submit, %d\n", ret);
2581 tasklet_schedule(&dev->bh);
2582 }
2583 } else {
2584 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2585 ret = -ENOLINK;
2586 }
2587 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2588 if (ret) {
2589 dev_kfree_skb_any(skb);
2590 usb_free_urb(urb);
2591 }
2592 return ret;
2593}
2594
2595static void rx_complete(struct urb *urb)
2596{
2597 struct sk_buff *skb = (struct sk_buff *)urb->context;
2598 struct skb_data *entry = (struct skb_data *)skb->cb;
2599 struct lan78xx_net *dev = entry->dev;
2600 int urb_status = urb->status;
2601 enum skb_state state;
2602
2603 skb_put(skb, urb->actual_length);
2604 state = rx_done;
2605 entry->urb = NULL;
2606
2607 switch (urb_status) {
2608 case 0:
2609 if (skb->len < dev->net->hard_header_len) {
2610 state = rx_cleanup;
2611 dev->net->stats.rx_errors++;
2612 dev->net->stats.rx_length_errors++;
2613 netif_dbg(dev, rx_err, dev->net,
2614 "rx length %d\n", skb->len);
2615 }
2616 usb_mark_last_busy(dev->udev);
2617 break;
2618 case -EPIPE:
2619 dev->net->stats.rx_errors++;
2620 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2621 /* FALLTHROUGH */
2622 case -ECONNRESET: /* async unlink */
2623 case -ESHUTDOWN: /* hardware gone */
2624 netif_dbg(dev, ifdown, dev->net,
2625 "rx shutdown, code %d\n", urb_status);
2626 state = rx_cleanup;
2627 entry->urb = urb;
2628 urb = NULL;
2629 break;
2630 case -EPROTO:
2631 case -ETIME:
2632 case -EILSEQ:
2633 dev->net->stats.rx_errors++;
2634 state = rx_cleanup;
2635 entry->urb = urb;
2636 urb = NULL;
2637 break;
2638
2639 /* data overrun ... flush fifo? */
2640 case -EOVERFLOW:
2641 dev->net->stats.rx_over_errors++;
2642 /* FALLTHROUGH */
2643
2644 default:
2645 state = rx_cleanup;
2646 dev->net->stats.rx_errors++;
2647 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2648 break;
2649 }
2650
2651 state = defer_bh(dev, skb, &dev->rxq, state);
2652
2653 if (urb) {
2654 if (netif_running(dev->net) &&
2655 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2656 state != unlink_start) {
2657 rx_submit(dev, urb, GFP_ATOMIC);
2658 return;
2659 }
2660 usb_free_urb(urb);
2661 }
2662 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2663}
2664
2665static void lan78xx_tx_bh(struct lan78xx_net *dev)
2666{
2667 int length;
2668 struct urb *urb = NULL;
2669 struct skb_data *entry;
2670 unsigned long flags;
2671 struct sk_buff_head *tqp = &dev->txq_pend;
2672 struct sk_buff *skb, *skb2;
2673 int ret;
2674 int count, pos;
2675 int skb_totallen, pkt_cnt;
2676
2677 skb_totallen = 0;
2678 pkt_cnt = 0;
2679 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2680 if (skb_is_gso(skb)) {
2681 if (pkt_cnt) {
2682 /* handle previous packets first */
2683 break;
2684 }
2685 length = skb->len;
2686 skb2 = skb_dequeue(tqp);
2687 goto gso_skb;
2688 }
2689
2690 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2691 break;
2692 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2693 pkt_cnt++;
2694 }
2695
2696 /* copy to a single skb */
2697 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2698 if (!skb)
2699 goto drop;
2700
2701 skb_put(skb, skb_totallen);
2702
2703 for (count = pos = 0; count < pkt_cnt; count++) {
2704 skb2 = skb_dequeue(tqp);
2705 if (skb2) {
2706 memcpy(skb->data + pos, skb2->data, skb2->len);
2707 pos += roundup(skb2->len, sizeof(u32));
2708 dev_kfree_skb(skb2);
55d7de9d
WH
2709 }
2710 }
2711
2712 length = skb_totallen;
2713
2714gso_skb:
2715 urb = usb_alloc_urb(0, GFP_ATOMIC);
2716 if (!urb) {
2717 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2718 goto drop;
2719 }
2720
2721 entry = (struct skb_data *)skb->cb;
2722 entry->urb = urb;
2723 entry->dev = dev;
2724 entry->length = length;
2725
2726 spin_lock_irqsave(&dev->txq.lock, flags);
2727 ret = usb_autopm_get_interface_async(dev->intf);
2728 if (ret < 0) {
2729 spin_unlock_irqrestore(&dev->txq.lock, flags);
2730 goto drop;
2731 }
2732
2733 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2734 skb->data, skb->len, tx_complete, skb);
2735
2736 if (length % dev->maxpacket == 0) {
2737 /* send USB_ZERO_PACKET */
2738 urb->transfer_flags |= URB_ZERO_PACKET;
2739 }
2740
2741#ifdef CONFIG_PM
2742 /* if this triggers the device is still a sleep */
2743 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2744 /* transmission will be done in resume */
2745 usb_anchor_urb(urb, &dev->deferred);
2746 /* no use to process more packets */
2747 netif_stop_queue(dev->net);
2748 usb_put_urb(urb);
2749 spin_unlock_irqrestore(&dev->txq.lock, flags);
2750 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2751 return;
2752 }
2753#endif
2754
2755 ret = usb_submit_urb(urb, GFP_ATOMIC);
2756 switch (ret) {
2757 case 0:
2758 dev->net->trans_start = jiffies;
2759 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2760 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2761 netif_stop_queue(dev->net);
2762 break;
2763 case -EPIPE:
2764 netif_stop_queue(dev->net);
2765 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2766 usb_autopm_put_interface_async(dev->intf);
2767 break;
2768 default:
2769 usb_autopm_put_interface_async(dev->intf);
2770 netif_dbg(dev, tx_err, dev->net,
2771 "tx: submit urb err %d\n", ret);
2772 break;
2773 }
2774
2775 spin_unlock_irqrestore(&dev->txq.lock, flags);
2776
2777 if (ret) {
2778 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2779drop:
2780 dev->net->stats.tx_dropped++;
2781 if (skb)
2782 dev_kfree_skb_any(skb);
2783 usb_free_urb(urb);
2784 } else
2785 netif_dbg(dev, tx_queued, dev->net,
2786 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2787}
2788
2789static void lan78xx_rx_bh(struct lan78xx_net *dev)
2790{
2791 struct urb *urb;
2792 int i;
2793
2794 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2795 for (i = 0; i < 10; i++) {
2796 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2797 break;
2798 urb = usb_alloc_urb(0, GFP_ATOMIC);
2799 if (urb)
2800 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2801 return;
2802 }
2803
2804 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2805 tasklet_schedule(&dev->bh);
2806 }
2807 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2808 netif_wake_queue(dev->net);
2809}
2810
2811static void lan78xx_bh(unsigned long param)
2812{
2813 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2814 struct sk_buff *skb;
2815 struct skb_data *entry;
2816
55d7de9d
WH
2817 while ((skb = skb_dequeue(&dev->done))) {
2818 entry = (struct skb_data *)(skb->cb);
2819 switch (entry->state) {
2820 case rx_done:
2821 entry->state = rx_cleanup;
2822 rx_process(dev, skb);
2823 continue;
2824 case tx_done:
2825 usb_free_urb(entry->urb);
2826 dev_kfree_skb(skb);
2827 continue;
2828 case rx_cleanup:
2829 usb_free_urb(entry->urb);
2830 dev_kfree_skb(skb);
2831 continue;
2832 default:
2833 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2834 return;
2835 }
55d7de9d
WH
2836 }
2837
2838 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2839 if (!skb_queue_empty(&dev->txq_pend))
2840 lan78xx_tx_bh(dev);
2841
2842 if (!timer_pending(&dev->delay) &&
2843 !test_bit(EVENT_RX_HALT, &dev->flags))
2844 lan78xx_rx_bh(dev);
2845 }
2846}
2847
2848static void lan78xx_delayedwork(struct work_struct *work)
2849{
2850 int status;
2851 struct lan78xx_net *dev;
2852
2853 dev = container_of(work, struct lan78xx_net, wq.work);
2854
2855 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2856 unlink_urbs(dev, &dev->txq);
2857 status = usb_autopm_get_interface(dev->intf);
2858 if (status < 0)
2859 goto fail_pipe;
2860 status = usb_clear_halt(dev->udev, dev->pipe_out);
2861 usb_autopm_put_interface(dev->intf);
2862 if (status < 0 &&
2863 status != -EPIPE &&
2864 status != -ESHUTDOWN) {
2865 if (netif_msg_tx_err(dev))
2866fail_pipe:
2867 netdev_err(dev->net,
2868 "can't clear tx halt, status %d\n",
2869 status);
2870 } else {
2871 clear_bit(EVENT_TX_HALT, &dev->flags);
2872 if (status != -ESHUTDOWN)
2873 netif_wake_queue(dev->net);
2874 }
2875 }
2876 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2877 unlink_urbs(dev, &dev->rxq);
2878 status = usb_autopm_get_interface(dev->intf);
2879 if (status < 0)
2880 goto fail_halt;
2881 status = usb_clear_halt(dev->udev, dev->pipe_in);
2882 usb_autopm_put_interface(dev->intf);
2883 if (status < 0 &&
2884 status != -EPIPE &&
2885 status != -ESHUTDOWN) {
2886 if (netif_msg_rx_err(dev))
2887fail_halt:
2888 netdev_err(dev->net,
2889 "can't clear rx halt, status %d\n",
2890 status);
2891 } else {
2892 clear_bit(EVENT_RX_HALT, &dev->flags);
2893 tasklet_schedule(&dev->bh);
2894 }
2895 }
2896
2897 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2898 int ret = 0;
2899
2900 clear_bit(EVENT_LINK_RESET, &dev->flags);
2901 status = usb_autopm_get_interface(dev->intf);
2902 if (status < 0)
2903 goto skip_reset;
2904 if (lan78xx_link_reset(dev) < 0) {
2905 usb_autopm_put_interface(dev->intf);
2906skip_reset:
2907 netdev_info(dev->net, "link reset failed (%d)\n",
2908 ret);
2909 } else {
2910 usb_autopm_put_interface(dev->intf);
2911 }
2912 }
2913}
2914
2915static void intr_complete(struct urb *urb)
2916{
2917 struct lan78xx_net *dev = urb->context;
2918 int status = urb->status;
2919
2920 switch (status) {
2921 /* success */
2922 case 0:
2923 lan78xx_status(dev, urb);
2924 break;
2925
2926 /* software-driven interface shutdown */
2927 case -ENOENT: /* urb killed */
2928 case -ESHUTDOWN: /* hardware gone */
2929 netif_dbg(dev, ifdown, dev->net,
2930 "intr shutdown, code %d\n", status);
2931 return;
2932
2933 /* NOTE: not throttling like RX/TX, since this endpoint
2934 * already polls infrequently
2935 */
2936 default:
2937 netdev_dbg(dev->net, "intr status %d\n", status);
2938 break;
2939 }
2940
2941 if (!netif_running(dev->net))
2942 return;
2943
2944 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2945 status = usb_submit_urb(urb, GFP_ATOMIC);
2946 if (status != 0)
2947 netif_err(dev, timer, dev->net,
2948 "intr resubmit --> %d\n", status);
2949}
2950
2951static void lan78xx_disconnect(struct usb_interface *intf)
2952{
2953 struct lan78xx_net *dev;
2954 struct usb_device *udev;
2955 struct net_device *net;
2956
2957 dev = usb_get_intfdata(intf);
2958 usb_set_intfdata(intf, NULL);
2959 if (!dev)
2960 return;
2961
2962 udev = interface_to_usbdev(intf);
2963
2964 net = dev->net;
2965 unregister_netdev(net);
2966
2967 cancel_delayed_work_sync(&dev->wq);
2968
2969 usb_scuttle_anchored_urbs(&dev->deferred);
2970
2971 lan78xx_unbind(dev, intf);
2972
2973 usb_kill_urb(dev->urb_intr);
2974 usb_free_urb(dev->urb_intr);
2975
2976 free_netdev(net);
2977 usb_put_dev(udev);
2978}
2979
2980void lan78xx_tx_timeout(struct net_device *net)
2981{
2982 struct lan78xx_net *dev = netdev_priv(net);
2983
2984 unlink_urbs(dev, &dev->txq);
2985 tasklet_schedule(&dev->bh);
2986}
2987
2988static const struct net_device_ops lan78xx_netdev_ops = {
2989 .ndo_open = lan78xx_open,
2990 .ndo_stop = lan78xx_stop,
2991 .ndo_start_xmit = lan78xx_start_xmit,
2992 .ndo_tx_timeout = lan78xx_tx_timeout,
2993 .ndo_change_mtu = lan78xx_change_mtu,
2994 .ndo_set_mac_address = lan78xx_set_mac_addr,
2995 .ndo_validate_addr = eth_validate_addr,
2996 .ndo_do_ioctl = lan78xx_ioctl,
2997 .ndo_set_rx_mode = lan78xx_set_multicast,
2998 .ndo_set_features = lan78xx_set_features,
2999 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3000 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3001};
3002
3003static int lan78xx_probe(struct usb_interface *intf,
3004 const struct usb_device_id *id)
3005{
3006 struct lan78xx_net *dev;
3007 struct net_device *netdev;
3008 struct usb_device *udev;
3009 int ret;
3010 unsigned maxp;
3011 unsigned period;
3012 u8 *buf = NULL;
3013
3014 udev = interface_to_usbdev(intf);
3015 udev = usb_get_dev(udev);
3016
3017 ret = -ENOMEM;
3018 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3019 if (!netdev) {
3020 dev_err(&intf->dev, "Error: OOM\n");
3021 goto out1;
3022 }
3023
3024 /* netdev_printk() needs this */
3025 SET_NETDEV_DEV(netdev, &intf->dev);
3026
3027 dev = netdev_priv(netdev);
3028 dev->udev = udev;
3029 dev->intf = intf;
3030 dev->net = netdev;
3031 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3032 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3033
3034 skb_queue_head_init(&dev->rxq);
3035 skb_queue_head_init(&dev->txq);
3036 skb_queue_head_init(&dev->done);
3037 skb_queue_head_init(&dev->rxq_pause);
3038 skb_queue_head_init(&dev->txq_pend);
3039 mutex_init(&dev->phy_mutex);
3040
3041 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3042 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3043 init_usb_anchor(&dev->deferred);
3044
3045 netdev->netdev_ops = &lan78xx_netdev_ops;
3046 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3047 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3048
3049 ret = lan78xx_bind(dev, intf);
3050 if (ret < 0)
3051 goto out2;
3052 strcpy(netdev->name, "eth%d");
3053
3054 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3055 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3056
3057 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3058 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3059 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3060
3061 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3062 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3063
3064 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3065 dev->ep_intr->desc.bEndpointAddress &
3066 USB_ENDPOINT_NUMBER_MASK);
3067 period = dev->ep_intr->desc.bInterval;
3068
3069 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3070 buf = kmalloc(maxp, GFP_KERNEL);
3071 if (buf) {
3072 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3073 if (!dev->urb_intr) {
3074 kfree(buf);
3075 goto out3;
3076 } else {
3077 usb_fill_int_urb(dev->urb_intr, dev->udev,
3078 dev->pipe_intr, buf, maxp,
3079 intr_complete, dev, period);
3080 }
3081 }
3082
3083 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3084
3085 /* driver requires remote-wakeup capability during autosuspend. */
3086 intf->needs_remote_wakeup = 1;
3087
3088 ret = register_netdev(netdev);
3089 if (ret != 0) {
3090 netif_err(dev, probe, netdev, "couldn't register the device\n");
3091 goto out2;
3092 }
3093
3094 usb_set_intfdata(intf, dev);
3095
3096 ret = device_set_wakeup_enable(&udev->dev, true);
3097
3098 /* Default delay of 2sec has more overhead than advantage.
3099 * Set to 10sec as default.
3100 */
3101 pm_runtime_set_autosuspend_delay(&udev->dev,
3102 DEFAULT_AUTOSUSPEND_DELAY);
3103
3104 return 0;
3105
55d7de9d
WH
3106out3:
3107 lan78xx_unbind(dev, intf);
3108out2:
3109 free_netdev(netdev);
3110out1:
3111 usb_put_dev(udev);
3112
3113 return ret;
3114}
3115
3116static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3117{
3118 const u16 crc16poly = 0x8005;
3119 int i;
3120 u16 bit, crc, msb;
3121 u8 data;
3122
3123 crc = 0xFFFF;
3124 for (i = 0; i < len; i++) {
3125 data = *buf++;
3126 for (bit = 0; bit < 8; bit++) {
3127 msb = crc >> 15;
3128 crc <<= 1;
3129
3130 if (msb ^ (u16)(data & 1)) {
3131 crc ^= crc16poly;
3132 crc |= (u16)0x0001U;
3133 }
3134 data >>= 1;
3135 }
3136 }
3137
3138 return crc;
3139}
3140
3141static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3142{
3143 u32 buf;
3144 int ret;
3145 int mask_index;
3146 u16 crc;
3147 u32 temp_wucsr;
3148 u32 temp_pmt_ctl;
3149 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3150 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3151 const u8 arp_type[2] = { 0x08, 0x06 };
3152
3153 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3154 buf &= ~MAC_TX_TXEN_;
3155 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3156 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3157 buf &= ~MAC_RX_RXEN_;
3158 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3159
3160 ret = lan78xx_write_reg(dev, WUCSR, 0);
3161 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3162 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3163
3164 temp_wucsr = 0;
3165
3166 temp_pmt_ctl = 0;
3167 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3168 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3169 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3170
3171 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3172 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3173
3174 mask_index = 0;
3175 if (wol & WAKE_PHY) {
3176 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3177
3178 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3179 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3180 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3181 }
3182 if (wol & WAKE_MAGIC) {
3183 temp_wucsr |= WUCSR_MPEN_;
3184
3185 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3186 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3187 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3188 }
3189 if (wol & WAKE_BCAST) {
3190 temp_wucsr |= WUCSR_BCST_EN_;
3191
3192 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3193 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3194 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3195 }
3196 if (wol & WAKE_MCAST) {
3197 temp_wucsr |= WUCSR_WAKE_EN_;
3198
3199 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3200 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3201 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3202 WUF_CFGX_EN_ |
3203 WUF_CFGX_TYPE_MCAST_ |
3204 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3205 (crc & WUF_CFGX_CRC16_MASK_));
3206
3207 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3208 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3209 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3210 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3211 mask_index++;
3212
3213 /* for IPv6 Multicast */
3214 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3215 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3216 WUF_CFGX_EN_ |
3217 WUF_CFGX_TYPE_MCAST_ |
3218 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3219 (crc & WUF_CFGX_CRC16_MASK_));
3220
3221 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3222 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3223 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3224 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3225 mask_index++;
3226
3227 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3228 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3229 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3230 }
3231 if (wol & WAKE_UCAST) {
3232 temp_wucsr |= WUCSR_PFDA_EN_;
3233
3234 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3235 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3236 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3237 }
3238 if (wol & WAKE_ARP) {
3239 temp_wucsr |= WUCSR_WAKE_EN_;
3240
3241 /* set WUF_CFG & WUF_MASK
3242 * for packettype (offset 12,13) = ARP (0x0806)
3243 */
3244 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3245 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3246 WUF_CFGX_EN_ |
3247 WUF_CFGX_TYPE_ALL_ |
3248 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3249 (crc & WUF_CFGX_CRC16_MASK_));
3250
3251 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3252 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3253 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3254 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3255 mask_index++;
3256
3257 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3258 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3259 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3260 }
3261
3262 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3263
3264 /* when multiple WOL bits are set */
3265 if (hweight_long((unsigned long)wol) > 1) {
3266 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3267 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3268 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3269 }
3270 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3271
3272 /* clear WUPS */
3273 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3274 buf |= PMT_CTL_WUPS_MASK_;
3275 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3276
3277 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3278 buf |= MAC_RX_RXEN_;
3279 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3280
3281 return 0;
3282}
3283
3284int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3285{
3286 struct lan78xx_net *dev = usb_get_intfdata(intf);
3287 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3288 u32 buf;
3289 int ret;
3290 int event;
3291
55d7de9d
WH
3292 event = message.event;
3293
3294 if (!dev->suspend_count++) {
3295 spin_lock_irq(&dev->txq.lock);
3296 /* don't autosuspend while transmitting */
3297 if ((skb_queue_len(&dev->txq) ||
3298 skb_queue_len(&dev->txq_pend)) &&
3299 PMSG_IS_AUTO(message)) {
3300 spin_unlock_irq(&dev->txq.lock);
3301 ret = -EBUSY;
3302 goto out;
3303 } else {
3304 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3305 spin_unlock_irq(&dev->txq.lock);
3306 }
3307
3308 /* stop TX & RX */
3309 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3310 buf &= ~MAC_TX_TXEN_;
3311 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3312 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3313 buf &= ~MAC_RX_RXEN_;
3314 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3315
3316 /* empty out the rx and queues */
3317 netif_device_detach(dev->net);
3318 lan78xx_terminate_urbs(dev);
3319 usb_kill_urb(dev->urb_intr);
3320
3321 /* reattach */
3322 netif_device_attach(dev->net);
3323 }
3324
3325 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3326 if (PMSG_IS_AUTO(message)) {
3327 /* auto suspend (selective suspend) */
3328 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3329 buf &= ~MAC_TX_TXEN_;
3330 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3331 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3332 buf &= ~MAC_RX_RXEN_;
3333 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3334
3335 ret = lan78xx_write_reg(dev, WUCSR, 0);
3336 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3337 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3338
3339 /* set goodframe wakeup */
3340 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3341
3342 buf |= WUCSR_RFE_WAKE_EN_;
3343 buf |= WUCSR_STORE_WAKE_;
3344
3345 ret = lan78xx_write_reg(dev, WUCSR, buf);
3346
3347 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3348
3349 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3350 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3351
3352 buf |= PMT_CTL_PHY_WAKE_EN_;
3353 buf |= PMT_CTL_WOL_EN_;
3354 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3355 buf |= PMT_CTL_SUS_MODE_3_;
3356
3357 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3358
3359 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3360
3361 buf |= PMT_CTL_WUPS_MASK_;
3362
3363 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3364
3365 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3366 buf |= MAC_RX_RXEN_;
3367 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3368 } else {
3369 lan78xx_set_suspend(dev, pdata->wol);
3370 }
3371 }
3372
49d28b56 3373 ret = 0;
55d7de9d
WH
3374out:
3375 return ret;
3376}
3377
3378int lan78xx_resume(struct usb_interface *intf)
3379{
3380 struct lan78xx_net *dev = usb_get_intfdata(intf);
3381 struct sk_buff *skb;
3382 struct urb *res;
3383 int ret;
3384 u32 buf;
3385
3386 if (!--dev->suspend_count) {
3387 /* resume interrupt URBs */
3388 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3389 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3390
3391 spin_lock_irq(&dev->txq.lock);
3392 while ((res = usb_get_from_anchor(&dev->deferred))) {
3393 skb = (struct sk_buff *)res->context;
3394 ret = usb_submit_urb(res, GFP_ATOMIC);
3395 if (ret < 0) {
3396 dev_kfree_skb_any(skb);
3397 usb_free_urb(res);
3398 usb_autopm_put_interface_async(dev->intf);
3399 } else {
3400 dev->net->trans_start = jiffies;
3401 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3402 }
3403 }
3404
3405 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3406 spin_unlock_irq(&dev->txq.lock);
3407
3408 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3409 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3410 netif_start_queue(dev->net);
3411 tasklet_schedule(&dev->bh);
3412 }
3413 }
3414
3415 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3416 ret = lan78xx_write_reg(dev, WUCSR, 0);
3417 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3418
3419 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3420 WUCSR2_ARP_RCD_ |
3421 WUCSR2_IPV6_TCPSYN_RCD_ |
3422 WUCSR2_IPV4_TCPSYN_RCD_);
3423
3424 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3425 WUCSR_EEE_RX_WAKE_ |
3426 WUCSR_PFDA_FR_ |
3427 WUCSR_RFE_WAKE_FR_ |
3428 WUCSR_WUFR_ |
3429 WUCSR_MPR_ |
3430 WUCSR_BCST_FR_);
3431
3432 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3433 buf |= MAC_TX_TXEN_;
3434 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3435
3436 return 0;
3437}
3438
3439int lan78xx_reset_resume(struct usb_interface *intf)
3440{
3441 struct lan78xx_net *dev = usb_get_intfdata(intf);
3442
3443 lan78xx_reset(dev);
ce85e13a
WH
3444
3445 lan78xx_phy_init(dev);
3446
55d7de9d
WH
3447 return lan78xx_resume(intf);
3448}
3449
3450static const struct usb_device_id products[] = {
3451 {
3452 /* LAN7800 USB Gigabit Ethernet Device */
3453 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3454 },
3455 {
3456 /* LAN7850 USB Gigabit Ethernet Device */
3457 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3458 },
3459 {},
3460};
3461MODULE_DEVICE_TABLE(usb, products);
3462
3463static struct usb_driver lan78xx_driver = {
3464 .name = DRIVER_NAME,
3465 .id_table = products,
3466 .probe = lan78xx_probe,
3467 .disconnect = lan78xx_disconnect,
3468 .suspend = lan78xx_suspend,
3469 .resume = lan78xx_resume,
3470 .reset_resume = lan78xx_reset_resume,
3471 .supports_autosuspend = 1,
3472 .disable_hub_initiated_lpm = 1,
3473};
3474
3475module_usb_driver(lan78xx_driver);
3476
3477MODULE_AUTHOR(DRIVER_AUTHOR);
3478MODULE_DESCRIPTION(DRIVER_DESC);
3479MODULE_LICENSE("GPL");