]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/usb/lan78xx.c
phy: mdio-octeon: Use devm_mdiobus_alloc_size()
[mirror_ubuntu-artful-kernel.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
bdfba55e 33#include <linux/microchipphy.h>
55d7de9d
WH
34#include "lan78xx.h"
35
36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38#define DRIVER_NAME "lan78xx"
ce85e13a 39#define DRIVER_VERSION "1.0.1"
55d7de9d
WH
40
41#define TX_TIMEOUT_JIFFIES (5 * HZ)
42#define THROTTLE_JIFFIES (HZ / 8)
43#define UNLINK_TIMEOUT_MS 3
44
45#define RX_MAX_QUEUE_MEMORY (60 * 1518)
46
47#define SS_USB_PKT_SIZE (1024)
48#define HS_USB_PKT_SIZE (512)
49#define FS_USB_PKT_SIZE (64)
50
51#define MAX_RX_FIFO_SIZE (12 * 1024)
52#define MAX_TX_FIFO_SIZE (12 * 1024)
53#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54#define DEFAULT_BULK_IN_DELAY (0x0800)
55#define MAX_SINGLE_PACKET_SIZE (9000)
56#define DEFAULT_TX_CSUM_ENABLE (true)
57#define DEFAULT_RX_CSUM_ENABLE (true)
58#define DEFAULT_TSO_CSUM_ENABLE (true)
59#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
60#define TX_OVERHEAD (8)
61#define RXW_PADDING 2
62
63#define LAN78XX_USB_VENDOR_ID (0x0424)
64#define LAN7800_USB_PRODUCT_ID (0x7800)
65#define LAN7850_USB_PRODUCT_ID (0x7850)
66#define LAN78XX_EEPROM_MAGIC (0x78A5)
67#define LAN78XX_OTP_MAGIC (0x78F3)
68
69#define MII_READ 1
70#define MII_WRITE 0
71
72#define EEPROM_INDICATOR (0xA5)
73#define EEPROM_MAC_OFFSET (0x01)
74#define MAX_EEPROM_SIZE 512
75#define OTP_INDICATOR_1 (0xF3)
76#define OTP_INDICATOR_2 (0xF7)
77
78#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
81
82/* USB related defines */
83#define BULK_IN_PIPE 1
84#define BULK_OUT_PIPE 2
85
86/* default autosuspend delay (mSec)*/
87#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
88
89static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
90 "RX FCS Errors",
91 "RX Alignment Errors",
92 "Rx Fragment Errors",
93 "RX Jabber Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
96 "RX Dropped Frames",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
100 "RX Unicast Frames",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
103 "RX Pause Frames",
104 "RX 64 Byte Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
112 "EEE RX LPI Time",
113 "TX FCS Errors",
114 "TX Excess Deferral Errors",
115 "TX Carrier Errors",
116 "TX Bad Byte Count",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
124 "TX Unicast Frames",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
127 "TX Pause Frames",
128 "TX 64 Byte Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
136 "EEE TX LPI Time",
137};
138
139struct lan78xx_statstage {
140 u32 rx_fcs_errors;
141 u32 rx_alignment_errors;
142 u32 rx_fragment_errors;
143 u32 rx_jabber_errors;
144 u32 rx_undersize_frame_errors;
145 u32 rx_oversize_frame_errors;
146 u32 rx_dropped_frames;
147 u32 rx_unicast_byte_count;
148 u32 rx_broadcast_byte_count;
149 u32 rx_multicast_byte_count;
150 u32 rx_unicast_frames;
151 u32 rx_broadcast_frames;
152 u32 rx_multicast_frames;
153 u32 rx_pause_frames;
154 u32 rx_64_byte_frames;
155 u32 rx_65_127_byte_frames;
156 u32 rx_128_255_byte_frames;
157 u32 rx_256_511_bytes_frames;
158 u32 rx_512_1023_byte_frames;
159 u32 rx_1024_1518_byte_frames;
160 u32 rx_greater_1518_byte_frames;
161 u32 eee_rx_lpi_transitions;
162 u32 eee_rx_lpi_time;
163 u32 tx_fcs_errors;
164 u32 tx_excess_deferral_errors;
165 u32 tx_carrier_errors;
166 u32 tx_bad_byte_count;
167 u32 tx_single_collisions;
168 u32 tx_multiple_collisions;
169 u32 tx_excessive_collision;
170 u32 tx_late_collisions;
171 u32 tx_unicast_byte_count;
172 u32 tx_broadcast_byte_count;
173 u32 tx_multicast_byte_count;
174 u32 tx_unicast_frames;
175 u32 tx_broadcast_frames;
176 u32 tx_multicast_frames;
177 u32 tx_pause_frames;
178 u32 tx_64_byte_frames;
179 u32 tx_65_127_byte_frames;
180 u32 tx_128_255_byte_frames;
181 u32 tx_256_511_bytes_frames;
182 u32 tx_512_1023_byte_frames;
183 u32 tx_1024_1518_byte_frames;
184 u32 tx_greater_1518_byte_frames;
185 u32 eee_tx_lpi_transitions;
186 u32 eee_tx_lpi_time;
187};
188
189struct lan78xx_net;
190
191struct lan78xx_priv {
192 struct lan78xx_net *dev;
193 u32 rfe_ctl;
194 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
195 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
196 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
197 struct mutex dataport_mutex; /* for dataport access */
198 spinlock_t rfe_ctl_lock; /* for rfe register access */
199 struct work_struct set_multicast;
200 struct work_struct set_vlan;
201 u32 wol;
202};
203
204enum skb_state {
205 illegal = 0,
206 tx_start,
207 tx_done,
208 rx_start,
209 rx_done,
210 rx_cleanup,
211 unlink_start
212};
213
214struct skb_data { /* skb->cb is one of these */
215 struct urb *urb;
216 struct lan78xx_net *dev;
217 enum skb_state state;
218 size_t length;
219};
220
221struct usb_context {
222 struct usb_ctrlrequest req;
223 struct lan78xx_net *dev;
224};
225
226#define EVENT_TX_HALT 0
227#define EVENT_RX_HALT 1
228#define EVENT_RX_MEMORY 2
229#define EVENT_STS_SPLIT 3
230#define EVENT_LINK_RESET 4
231#define EVENT_RX_PAUSED 5
232#define EVENT_DEV_WAKING 6
233#define EVENT_DEV_ASLEEP 7
234#define EVENT_DEV_OPEN 8
235
236struct lan78xx_net {
237 struct net_device *net;
238 struct usb_device *udev;
239 struct usb_interface *intf;
240 void *driver_priv;
241
242 int rx_qlen;
243 int tx_qlen;
244 struct sk_buff_head rxq;
245 struct sk_buff_head txq;
246 struct sk_buff_head done;
247 struct sk_buff_head rxq_pause;
248 struct sk_buff_head txq_pend;
249
250 struct tasklet_struct bh;
251 struct delayed_work wq;
252
253 struct usb_host_endpoint *ep_blkin;
254 struct usb_host_endpoint *ep_blkout;
255 struct usb_host_endpoint *ep_intr;
256
257 int msg_enable;
258
259 struct urb *urb_intr;
260 struct usb_anchor deferred;
261
262 struct mutex phy_mutex; /* for phy access */
263 unsigned pipe_in, pipe_out, pipe_intr;
264
265 u32 hard_mtu; /* count any extra framing */
266 size_t rx_urb_size; /* size for rx urbs */
267
268 unsigned long flags;
269
270 wait_queue_head_t *wait;
271 unsigned char suspend_count;
272
273 unsigned maxpacket;
274 struct timer_list delay;
275
276 unsigned long data[5];
55d7de9d
WH
277
278 int link_on;
279 u8 mdix_ctrl;
ce85e13a
WH
280
281 u32 devid;
282 struct mii_bus *mdiobus;
55d7de9d
WH
283};
284
285/* use ethtool to change the level for any given device */
286static int msg_level = -1;
287module_param(msg_level, int, 0);
288MODULE_PARM_DESC(msg_level, "Override default message level");
289
290static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
291{
292 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
293 int ret;
294
55d7de9d
WH
295 if (!buf)
296 return -ENOMEM;
297
298 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
299 USB_VENDOR_REQUEST_READ_REGISTER,
300 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
301 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
302 if (likely(ret >= 0)) {
303 le32_to_cpus(buf);
304 *data = *buf;
305 } else {
306 netdev_warn(dev->net,
307 "Failed to read register index 0x%08x. ret = %d",
308 index, ret);
309 }
310
311 kfree(buf);
312
313 return ret;
314}
315
316static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
317{
318 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
319 int ret;
320
55d7de9d
WH
321 if (!buf)
322 return -ENOMEM;
323
324 *buf = data;
325 cpu_to_le32s(buf);
326
327 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
328 USB_VENDOR_REQUEST_WRITE_REGISTER,
329 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
330 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
331 if (unlikely(ret < 0)) {
332 netdev_warn(dev->net,
333 "Failed to write register index 0x%08x. ret = %d",
334 index, ret);
335 }
336
337 kfree(buf);
338
339 return ret;
340}
341
342static int lan78xx_read_stats(struct lan78xx_net *dev,
343 struct lan78xx_statstage *data)
344{
345 int ret = 0;
346 int i;
347 struct lan78xx_statstage *stats;
348 u32 *src;
349 u32 *dst;
350
55d7de9d
WH
351 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
352 if (!stats)
353 return -ENOMEM;
354
355 ret = usb_control_msg(dev->udev,
356 usb_rcvctrlpipe(dev->udev, 0),
357 USB_VENDOR_REQUEST_GET_STATS,
358 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
359 0,
360 0,
361 (void *)stats,
362 sizeof(*stats),
363 USB_CTRL_SET_TIMEOUT);
364 if (likely(ret >= 0)) {
365 src = (u32 *)stats;
366 dst = (u32 *)data;
367 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
368 le32_to_cpus(&src[i]);
369 dst[i] = src[i];
370 }
371 } else {
372 netdev_warn(dev->net,
373 "Failed to read stat ret = 0x%x", ret);
374 }
375
376 kfree(stats);
377
378 return ret;
379}
380
381/* Loop until the read is completed with timeout called with phy_mutex held */
382static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
383{
384 unsigned long start_time = jiffies;
385 u32 val;
386 int ret;
387
388 do {
389 ret = lan78xx_read_reg(dev, MII_ACC, &val);
390 if (unlikely(ret < 0))
391 return -EIO;
392
393 if (!(val & MII_ACC_MII_BUSY_))
394 return 0;
395 } while (!time_after(jiffies, start_time + HZ));
396
397 return -EIO;
398}
399
400static inline u32 mii_access(int id, int index, int read)
401{
402 u32 ret;
403
404 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
405 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
406 if (read)
407 ret |= MII_ACC_MII_READ_;
408 else
409 ret |= MII_ACC_MII_WRITE_;
410 ret |= MII_ACC_MII_BUSY_;
411
412 return ret;
413}
414
55d7de9d
WH
415static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
416{
417 unsigned long start_time = jiffies;
418 u32 val;
419 int ret;
420
421 do {
422 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
423 if (unlikely(ret < 0))
424 return -EIO;
425
426 if (!(val & E2P_CMD_EPC_BUSY_) ||
427 (val & E2P_CMD_EPC_TIMEOUT_))
428 break;
429 usleep_range(40, 100);
430 } while (!time_after(jiffies, start_time + HZ));
431
432 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
433 netdev_warn(dev->net, "EEPROM read operation timeout");
434 return -EIO;
435 }
436
437 return 0;
438}
439
440static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
441{
442 unsigned long start_time = jiffies;
443 u32 val;
444 int ret;
445
446 do {
447 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
448 if (unlikely(ret < 0))
449 return -EIO;
450
451 if (!(val & E2P_CMD_EPC_BUSY_))
452 return 0;
453
454 usleep_range(40, 100);
455 } while (!time_after(jiffies, start_time + HZ));
456
457 netdev_warn(dev->net, "EEPROM is busy");
458 return -EIO;
459}
460
461static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
462 u32 length, u8 *data)
463{
464 u32 val;
465 int i, ret;
466
55d7de9d
WH
467 ret = lan78xx_eeprom_confirm_not_busy(dev);
468 if (ret)
469 return ret;
470
471 for (i = 0; i < length; i++) {
472 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
473 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
474 ret = lan78xx_write_reg(dev, E2P_CMD, val);
475 if (unlikely(ret < 0))
476 return -EIO;
477
478 ret = lan78xx_wait_eeprom(dev);
479 if (ret < 0)
480 return ret;
481
482 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
483 if (unlikely(ret < 0))
484 return -EIO;
485
486 data[i] = val & 0xFF;
487 offset++;
488 }
489
490 return 0;
491}
492
493static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
494 u32 length, u8 *data)
495{
496 u8 sig;
497 int ret;
498
499 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
500 if ((ret == 0) && (sig == EEPROM_INDICATOR))
501 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
502 else
503 ret = -EINVAL;
504
505 return ret;
506}
507
508static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
509 u32 length, u8 *data)
510{
511 u32 val;
512 int i, ret;
513
55d7de9d
WH
514 ret = lan78xx_eeprom_confirm_not_busy(dev);
515 if (ret)
516 return ret;
517
518 /* Issue write/erase enable command */
519 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
520 ret = lan78xx_write_reg(dev, E2P_CMD, val);
521 if (unlikely(ret < 0))
522 return -EIO;
523
524 ret = lan78xx_wait_eeprom(dev);
525 if (ret < 0)
526 return ret;
527
528 for (i = 0; i < length; i++) {
529 /* Fill data register */
530 val = data[i];
531 ret = lan78xx_write_reg(dev, E2P_DATA, val);
532 if (ret < 0)
533 return ret;
534
535 /* Send "write" command */
536 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
537 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
538 ret = lan78xx_write_reg(dev, E2P_CMD, val);
539 if (ret < 0)
540 return ret;
541
542 ret = lan78xx_wait_eeprom(dev);
543 if (ret < 0)
544 return ret;
545
546 offset++;
547 }
548
549 return 0;
550}
551
552static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
553 u32 length, u8 *data)
554{
555 int i;
556 int ret;
557 u32 buf;
558 unsigned long timeout;
559
560 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
561
562 if (buf & OTP_PWR_DN_PWRDN_N_) {
563 /* clear it and wait to be cleared */
564 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
565
566 timeout = jiffies + HZ;
567 do {
568 usleep_range(1, 10);
569 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
570 if (time_after(jiffies, timeout)) {
571 netdev_warn(dev->net,
572 "timeout on OTP_PWR_DN");
573 return -EIO;
574 }
575 } while (buf & OTP_PWR_DN_PWRDN_N_);
576 }
577
578 for (i = 0; i < length; i++) {
579 ret = lan78xx_write_reg(dev, OTP_ADDR1,
580 ((offset + i) >> 8) & OTP_ADDR1_15_11);
581 ret = lan78xx_write_reg(dev, OTP_ADDR2,
582 ((offset + i) & OTP_ADDR2_10_3));
583
584 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
585 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
586
587 timeout = jiffies + HZ;
588 do {
589 udelay(1);
590 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
591 if (time_after(jiffies, timeout)) {
592 netdev_warn(dev->net,
593 "timeout on OTP_STATUS");
594 return -EIO;
595 }
596 } while (buf & OTP_STATUS_BUSY_);
597
598 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
599
600 data[i] = (u8)(buf & 0xFF);
601 }
602
603 return 0;
604}
605
606static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
607 u32 length, u8 *data)
608{
609 u8 sig;
610 int ret;
611
612 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
613
614 if (ret == 0) {
615 if (sig == OTP_INDICATOR_1)
616 offset = offset;
617 else if (sig == OTP_INDICATOR_2)
618 offset += 0x100;
619 else
620 ret = -EINVAL;
621 ret = lan78xx_read_raw_otp(dev, offset, length, data);
622 }
623
624 return ret;
625}
626
627static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
628{
629 int i, ret;
630
631 for (i = 0; i < 100; i++) {
632 u32 dp_sel;
633
634 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
635 if (unlikely(ret < 0))
636 return -EIO;
637
638 if (dp_sel & DP_SEL_DPRDY_)
639 return 0;
640
641 usleep_range(40, 100);
642 }
643
644 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
645
646 return -EIO;
647}
648
649static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
650 u32 addr, u32 length, u32 *buf)
651{
652 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
653 u32 dp_sel;
654 int i, ret;
655
656 if (usb_autopm_get_interface(dev->intf) < 0)
657 return 0;
658
659 mutex_lock(&pdata->dataport_mutex);
660
661 ret = lan78xx_dataport_wait_not_busy(dev);
662 if (ret < 0)
663 goto done;
664
665 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
666
667 dp_sel &= ~DP_SEL_RSEL_MASK_;
668 dp_sel |= ram_select;
669 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
670
671 for (i = 0; i < length; i++) {
672 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
673
674 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
675
676 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
677
678 ret = lan78xx_dataport_wait_not_busy(dev);
679 if (ret < 0)
680 goto done;
681 }
682
683done:
684 mutex_unlock(&pdata->dataport_mutex);
685 usb_autopm_put_interface(dev->intf);
686
687 return ret;
688}
689
690static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
691 int index, u8 addr[ETH_ALEN])
692{
693 u32 temp;
694
695 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
696 temp = addr[3];
697 temp = addr[2] | (temp << 8);
698 temp = addr[1] | (temp << 8);
699 temp = addr[0] | (temp << 8);
700 pdata->pfilter_table[index][1] = temp;
701 temp = addr[5];
702 temp = addr[4] | (temp << 8);
703 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
704 pdata->pfilter_table[index][0] = temp;
705 }
706}
707
708/* returns hash bit number for given MAC address */
709static inline u32 lan78xx_hash(char addr[ETH_ALEN])
710{
711 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
712}
713
714static void lan78xx_deferred_multicast_write(struct work_struct *param)
715{
716 struct lan78xx_priv *pdata =
717 container_of(param, struct lan78xx_priv, set_multicast);
718 struct lan78xx_net *dev = pdata->dev;
719 int i;
720 int ret;
721
722 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
723 pdata->rfe_ctl);
724
725 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
726 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
727
728 for (i = 1; i < NUM_OF_MAF; i++) {
729 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
730 ret = lan78xx_write_reg(dev, MAF_LO(i),
731 pdata->pfilter_table[i][1]);
732 ret = lan78xx_write_reg(dev, MAF_HI(i),
733 pdata->pfilter_table[i][0]);
734 }
735
736 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
737}
738
739static void lan78xx_set_multicast(struct net_device *netdev)
740{
741 struct lan78xx_net *dev = netdev_priv(netdev);
742 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
743 unsigned long flags;
744 int i;
745
746 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
747
748 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
749 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
750
751 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
752 pdata->mchash_table[i] = 0;
753 /* pfilter_table[0] has own HW address */
754 for (i = 1; i < NUM_OF_MAF; i++) {
755 pdata->pfilter_table[i][0] =
756 pdata->pfilter_table[i][1] = 0;
757 }
758
759 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
760
761 if (dev->net->flags & IFF_PROMISC) {
762 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
763 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
764 } else {
765 if (dev->net->flags & IFF_ALLMULTI) {
766 netif_dbg(dev, drv, dev->net,
767 "receive all multicast enabled");
768 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
769 }
770 }
771
772 if (netdev_mc_count(dev->net)) {
773 struct netdev_hw_addr *ha;
774 int i;
775
776 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
777
778 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
779
780 i = 1;
781 netdev_for_each_mc_addr(ha, netdev) {
782 /* set first 32 into Perfect Filter */
783 if (i < 33) {
784 lan78xx_set_addr_filter(pdata, i, ha->addr);
785 } else {
786 u32 bitnum = lan78xx_hash(ha->addr);
787
788 pdata->mchash_table[bitnum / 32] |=
789 (1 << (bitnum % 32));
790 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
791 }
792 i++;
793 }
794 }
795
796 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
797
798 /* defer register writes to a sleepable context */
799 schedule_work(&pdata->set_multicast);
800}
801
802static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
803 u16 lcladv, u16 rmtadv)
804{
805 u32 flow = 0, fct_flow = 0;
806 int ret;
807
808 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
809
810 if (cap & FLOW_CTRL_TX)
811 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
812
813 if (cap & FLOW_CTRL_RX)
814 flow |= FLOW_CR_RX_FCEN_;
815
816 if (dev->udev->speed == USB_SPEED_SUPER)
817 fct_flow = 0x817;
818 else if (dev->udev->speed == USB_SPEED_HIGH)
819 fct_flow = 0x211;
820
821 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
822 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
823 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
824
825 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
826
827 /* threshold value should be set before enabling flow */
828 ret = lan78xx_write_reg(dev, FLOW, flow);
829
830 return 0;
831}
832
833static int lan78xx_link_reset(struct lan78xx_net *dev)
834{
ce85e13a 835 struct phy_device *phydev = dev->net->phydev;
55d7de9d 836 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
99c79ece 837 int ladv, radv, ret;
55d7de9d
WH
838 u32 buf;
839
840 /* clear PHY interrupt status */
bdfba55e 841 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
842 if (unlikely(ret < 0))
843 return -EIO;
844
845 /* clear LAN78xx interrupt status */
846 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
847 if (unlikely(ret < 0))
848 return -EIO;
849
ce85e13a
WH
850 phy_read_status(phydev);
851
852 if (!phydev->link && dev->link_on) {
55d7de9d
WH
853 dev->link_on = false;
854 netif_carrier_off(dev->net);
855
856 /* reset MAC */
857 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
858 if (unlikely(ret < 0))
859 return -EIO;
860 buf |= MAC_CR_RST_;
861 ret = lan78xx_write_reg(dev, MAC_CR, buf);
862 if (unlikely(ret < 0))
863 return -EIO;
ce85e13a 864 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
865 dev->link_on = true;
866
ce85e13a 867 phy_ethtool_gset(phydev, &ecmd);
55d7de9d 868
bdfba55e 869 ret = phy_read(phydev, LAN88XX_INT_STS);
55d7de9d
WH
870
871 if (dev->udev->speed == USB_SPEED_SUPER) {
872 if (ethtool_cmd_speed(&ecmd) == 1000) {
873 /* disable U2 */
874 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
875 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
876 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
877 /* enable U1 */
878 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
879 buf |= USB_CFG1_DEV_U1_INIT_EN_;
880 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
881 } else {
882 /* enable U1 & U2 */
883 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
884 buf |= USB_CFG1_DEV_U2_INIT_EN_;
885 buf |= USB_CFG1_DEV_U1_INIT_EN_;
886 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
887 }
888 }
889
ce85e13a 890 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
891 if (ladv < 0)
892 return ladv;
55d7de9d 893
ce85e13a 894 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
895 if (radv < 0)
896 return radv;
55d7de9d
WH
897
898 netif_dbg(dev, link, dev->net,
899 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
900 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
901
902 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
903 netif_carrier_on(dev->net);
904 }
905
906 return ret;
907}
908
909/* some work can't be done in tasklets, so we use keventd
910 *
911 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
912 * but tasklet_schedule() doesn't. hope the failure is rare.
913 */
914void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
915{
916 set_bit(work, &dev->flags);
917 if (!schedule_delayed_work(&dev->wq, 0))
918 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
919}
920
921static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
922{
923 u32 intdata;
924
925 if (urb->actual_length != 4) {
926 netdev_warn(dev->net,
927 "unexpected urb length %d", urb->actual_length);
928 return;
929 }
930
931 memcpy(&intdata, urb->transfer_buffer, 4);
932 le32_to_cpus(&intdata);
933
934 if (intdata & INT_ENP_PHY_INT) {
935 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
937 } else
938 netdev_warn(dev->net,
939 "unexpected interrupt: 0x%08x\n", intdata);
940}
941
942static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
943{
944 return MAX_EEPROM_SIZE;
945}
946
947static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948 struct ethtool_eeprom *ee, u8 *data)
949{
950 struct lan78xx_net *dev = netdev_priv(netdev);
951
952 ee->magic = LAN78XX_EEPROM_MAGIC;
953
954 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
955}
956
957static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958 struct ethtool_eeprom *ee, u8 *data)
959{
960 struct lan78xx_net *dev = netdev_priv(netdev);
961
962 /* Allow entire eeprom update only */
963 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
964 (ee->offset == 0) &&
965 (ee->len == 512) &&
966 (data[0] == EEPROM_INDICATOR))
967 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
969 (ee->offset == 0) &&
970 (ee->len == 512) &&
971 (data[0] == OTP_INDICATOR_1))
972 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
973
974 return -EINVAL;
975}
976
977static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
978 u8 *data)
979{
980 if (stringset == ETH_SS_STATS)
981 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
982}
983
984static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
985{
986 if (sset == ETH_SS_STATS)
987 return ARRAY_SIZE(lan78xx_gstrings);
988 else
989 return -EOPNOTSUPP;
990}
991
992static void lan78xx_get_stats(struct net_device *netdev,
993 struct ethtool_stats *stats, u64 *data)
994{
995 struct lan78xx_net *dev = netdev_priv(netdev);
996 struct lan78xx_statstage lan78xx_stat;
997 u32 *p;
998 int i;
999
1000 if (usb_autopm_get_interface(dev->intf) < 0)
1001 return;
1002
1003 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004 p = (u32 *)&lan78xx_stat;
1005 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1006 data[i] = p[i];
1007 }
1008
1009 usb_autopm_put_interface(dev->intf);
1010}
1011
1012static void lan78xx_get_wol(struct net_device *netdev,
1013 struct ethtool_wolinfo *wol)
1014{
1015 struct lan78xx_net *dev = netdev_priv(netdev);
1016 int ret;
1017 u32 buf;
1018 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1019
1020 if (usb_autopm_get_interface(dev->intf) < 0)
1021 return;
1022
1023 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024 if (unlikely(ret < 0)) {
1025 wol->supported = 0;
1026 wol->wolopts = 0;
1027 } else {
1028 if (buf & USB_CFG_RMT_WKP_) {
1029 wol->supported = WAKE_ALL;
1030 wol->wolopts = pdata->wol;
1031 } else {
1032 wol->supported = 0;
1033 wol->wolopts = 0;
1034 }
1035 }
1036
1037 usb_autopm_put_interface(dev->intf);
1038}
1039
1040static int lan78xx_set_wol(struct net_device *netdev,
1041 struct ethtool_wolinfo *wol)
1042{
1043 struct lan78xx_net *dev = netdev_priv(netdev);
1044 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045 int ret;
1046
1047 ret = usb_autopm_get_interface(dev->intf);
1048 if (ret < 0)
1049 return ret;
1050
1051 pdata->wol = 0;
1052 if (wol->wolopts & WAKE_UCAST)
1053 pdata->wol |= WAKE_UCAST;
1054 if (wol->wolopts & WAKE_MCAST)
1055 pdata->wol |= WAKE_MCAST;
1056 if (wol->wolopts & WAKE_BCAST)
1057 pdata->wol |= WAKE_BCAST;
1058 if (wol->wolopts & WAKE_MAGIC)
1059 pdata->wol |= WAKE_MAGIC;
1060 if (wol->wolopts & WAKE_PHY)
1061 pdata->wol |= WAKE_PHY;
1062 if (wol->wolopts & WAKE_ARP)
1063 pdata->wol |= WAKE_ARP;
1064
1065 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1066
ce85e13a
WH
1067 phy_ethtool_set_wol(netdev->phydev, wol);
1068
55d7de9d
WH
1069 usb_autopm_put_interface(dev->intf);
1070
1071 return ret;
1072}
1073
1074static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1075{
1076 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1077 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1078 int ret;
1079 u32 buf;
55d7de9d
WH
1080
1081 ret = usb_autopm_get_interface(dev->intf);
1082 if (ret < 0)
1083 return ret;
1084
ce85e13a
WH
1085 ret = phy_ethtool_get_eee(phydev, edata);
1086 if (ret < 0)
1087 goto exit;
1088
55d7de9d
WH
1089 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1090 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1091 edata->eee_enabled = true;
ce85e13a
WH
1092 edata->eee_active = !!(edata->advertised &
1093 edata->lp_advertised);
55d7de9d
WH
1094 edata->tx_lpi_enabled = true;
1095 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1096 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1097 edata->tx_lpi_timer = buf;
1098 } else {
55d7de9d
WH
1099 edata->eee_enabled = false;
1100 edata->eee_active = false;
55d7de9d
WH
1101 edata->tx_lpi_enabled = false;
1102 edata->tx_lpi_timer = 0;
1103 }
1104
ce85e13a
WH
1105 ret = 0;
1106exit:
55d7de9d
WH
1107 usb_autopm_put_interface(dev->intf);
1108
ce85e13a 1109 return ret;
55d7de9d
WH
1110}
1111
1112static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1113{
1114 struct lan78xx_net *dev = netdev_priv(net);
1115 int ret;
1116 u32 buf;
1117
1118 ret = usb_autopm_get_interface(dev->intf);
1119 if (ret < 0)
1120 return ret;
1121
1122 if (edata->eee_enabled) {
1123 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124 buf |= MAC_CR_EEE_EN_;
1125 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1126
ce85e13a
WH
1127 phy_ethtool_set_eee(net->phydev, edata);
1128
1129 buf = (u32)edata->tx_lpi_timer;
1130 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1131 } else {
1132 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1133 buf &= ~MAC_CR_EEE_EN_;
1134 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1135 }
1136
1137 usb_autopm_put_interface(dev->intf);
1138
1139 return 0;
1140}
1141
1142static u32 lan78xx_get_link(struct net_device *net)
1143{
ce85e13a 1144 phy_read_status(net->phydev);
55d7de9d 1145
ce85e13a 1146 return net->phydev->link;
55d7de9d
WH
1147}
1148
1149int lan78xx_nway_reset(struct net_device *net)
1150{
ce85e13a 1151 return phy_start_aneg(net->phydev);
55d7de9d
WH
1152}
1153
1154static void lan78xx_get_drvinfo(struct net_device *net,
1155 struct ethtool_drvinfo *info)
1156{
1157 struct lan78xx_net *dev = netdev_priv(net);
1158
1159 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1160 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1161 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1162}
1163
1164static u32 lan78xx_get_msglevel(struct net_device *net)
1165{
1166 struct lan78xx_net *dev = netdev_priv(net);
1167
1168 return dev->msg_enable;
1169}
1170
1171static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1172{
1173 struct lan78xx_net *dev = netdev_priv(net);
1174
1175 dev->msg_enable = level;
1176}
1177
758c5c11
WH
1178static int lan78xx_get_mdix_status(struct net_device *net)
1179{
1180 struct phy_device *phydev = net->phydev;
1181 int buf;
1182
1183 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1184 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1185 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1186
1187 return buf;
1188}
1189
1190static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1191{
1192 struct lan78xx_net *dev = netdev_priv(net);
1193 struct phy_device *phydev = net->phydev;
1194 int buf;
1195
1196 if (mdix_ctrl == ETH_TP_MDI) {
1197 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1198 LAN88XX_EXT_PAGE_SPACE_1);
1199 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1200 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1201 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1202 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1203 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1204 LAN88XX_EXT_PAGE_SPACE_0);
1205 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1206 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1207 LAN88XX_EXT_PAGE_SPACE_1);
1208 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1209 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1210 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1211 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1212 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1213 LAN88XX_EXT_PAGE_SPACE_0);
1214 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1215 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1216 LAN88XX_EXT_PAGE_SPACE_1);
1217 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1218 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1219 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1220 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1221 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1222 LAN88XX_EXT_PAGE_SPACE_0);
1223 }
1224 dev->mdix_ctrl = mdix_ctrl;
1225}
1226
55d7de9d
WH
1227static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1228{
1229 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1230 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1231 int ret;
1232 int buf;
1233
55d7de9d
WH
1234 ret = usb_autopm_get_interface(dev->intf);
1235 if (ret < 0)
1236 return ret;
1237
ce85e13a 1238 ret = phy_ethtool_gset(phydev, cmd);
55d7de9d 1239
758c5c11 1240 buf = lan78xx_get_mdix_status(net);
55d7de9d 1241
bdfba55e
WH
1242 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1243 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
55d7de9d
WH
1244 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1245 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
bdfba55e 1246 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
55d7de9d
WH
1247 cmd->eth_tp_mdix = ETH_TP_MDI;
1248 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
bdfba55e 1249 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
55d7de9d
WH
1250 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1251 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1252 }
1253
1254 usb_autopm_put_interface(dev->intf);
1255
1256 return ret;
1257}
1258
1259static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1260{
1261 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1262 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1263 int ret = 0;
1264 int temp;
1265
55d7de9d
WH
1266 ret = usb_autopm_get_interface(dev->intf);
1267 if (ret < 0)
1268 return ret;
1269
1270 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
758c5c11 1271 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
55d7de9d
WH
1272 }
1273
1274 /* change speed & duplex */
ce85e13a 1275 ret = phy_ethtool_sset(phydev, cmd);
55d7de9d
WH
1276
1277 if (!cmd->autoneg) {
1278 /* force link down */
ce85e13a
WH
1279 temp = phy_read(phydev, MII_BMCR);
1280 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1281 mdelay(1);
ce85e13a 1282 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1283 }
1284
1285 usb_autopm_put_interface(dev->intf);
1286
1287 return ret;
1288}
1289
1290static const struct ethtool_ops lan78xx_ethtool_ops = {
1291 .get_link = lan78xx_get_link,
1292 .nway_reset = lan78xx_nway_reset,
1293 .get_drvinfo = lan78xx_get_drvinfo,
1294 .get_msglevel = lan78xx_get_msglevel,
1295 .set_msglevel = lan78xx_set_msglevel,
1296 .get_settings = lan78xx_get_settings,
1297 .set_settings = lan78xx_set_settings,
1298 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1299 .get_eeprom = lan78xx_ethtool_get_eeprom,
1300 .set_eeprom = lan78xx_ethtool_set_eeprom,
1301 .get_ethtool_stats = lan78xx_get_stats,
1302 .get_sset_count = lan78xx_get_sset_count,
1303 .get_strings = lan78xx_get_strings,
1304 .get_wol = lan78xx_get_wol,
1305 .set_wol = lan78xx_set_wol,
1306 .get_eee = lan78xx_get_eee,
1307 .set_eee = lan78xx_set_eee,
1308};
1309
1310static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1311{
55d7de9d
WH
1312 if (!netif_running(netdev))
1313 return -EINVAL;
1314
ce85e13a 1315 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1316}
1317
1318static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1319{
1320 u32 addr_lo, addr_hi;
1321 int ret;
1322 u8 addr[6];
1323
1324 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1325 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1326
1327 addr[0] = addr_lo & 0xFF;
1328 addr[1] = (addr_lo >> 8) & 0xFF;
1329 addr[2] = (addr_lo >> 16) & 0xFF;
1330 addr[3] = (addr_lo >> 24) & 0xFF;
1331 addr[4] = addr_hi & 0xFF;
1332 addr[5] = (addr_hi >> 8) & 0xFF;
1333
1334 if (!is_valid_ether_addr(addr)) {
1335 /* reading mac address from EEPROM or OTP */
1336 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1337 addr) == 0) ||
1338 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1339 addr) == 0)) {
1340 if (is_valid_ether_addr(addr)) {
1341 /* eeprom values are valid so use them */
1342 netif_dbg(dev, ifup, dev->net,
1343 "MAC address read from EEPROM");
1344 } else {
1345 /* generate random MAC */
1346 random_ether_addr(addr);
1347 netif_dbg(dev, ifup, dev->net,
1348 "MAC address set to random addr");
1349 }
1350
1351 addr_lo = addr[0] | (addr[1] << 8) |
1352 (addr[2] << 16) | (addr[3] << 24);
1353 addr_hi = addr[4] | (addr[5] << 8);
1354
1355 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1356 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1357 } else {
1358 /* generate random MAC */
1359 random_ether_addr(addr);
1360 netif_dbg(dev, ifup, dev->net,
1361 "MAC address set to random addr");
1362 }
1363 }
1364
1365 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1366 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1367
1368 ether_addr_copy(dev->net->dev_addr, addr);
1369}
1370
ce85e13a
WH
1371/* MDIO read and write wrappers for phylib */
1372static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1373{
1374 struct lan78xx_net *dev = bus->priv;
1375 u32 val, addr;
1376 int ret;
1377
1378 ret = usb_autopm_get_interface(dev->intf);
1379 if (ret < 0)
1380 return ret;
1381
1382 mutex_lock(&dev->phy_mutex);
1383
1384 /* confirm MII not busy */
1385 ret = lan78xx_phy_wait_not_busy(dev);
1386 if (ret < 0)
1387 goto done;
1388
1389 /* set the address, index & direction (read from PHY) */
1390 addr = mii_access(phy_id, idx, MII_READ);
1391 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1392
1393 ret = lan78xx_phy_wait_not_busy(dev);
1394 if (ret < 0)
1395 goto done;
1396
1397 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1398
1399 ret = (int)(val & 0xFFFF);
1400
1401done:
1402 mutex_unlock(&dev->phy_mutex);
1403 usb_autopm_put_interface(dev->intf);
1404 return ret;
1405}
1406
1407static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1408 u16 regval)
1409{
1410 struct lan78xx_net *dev = bus->priv;
1411 u32 val, addr;
1412 int ret;
1413
1414 ret = usb_autopm_get_interface(dev->intf);
1415 if (ret < 0)
1416 return ret;
1417
1418 mutex_lock(&dev->phy_mutex);
1419
1420 /* confirm MII not busy */
1421 ret = lan78xx_phy_wait_not_busy(dev);
1422 if (ret < 0)
1423 goto done;
1424
1425 val = (u32)regval;
1426 ret = lan78xx_write_reg(dev, MII_DATA, val);
1427
1428 /* set the address, index & direction (write to PHY) */
1429 addr = mii_access(phy_id, idx, MII_WRITE);
1430 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1431
1432 ret = lan78xx_phy_wait_not_busy(dev);
1433 if (ret < 0)
1434 goto done;
1435
1436done:
1437 mutex_unlock(&dev->phy_mutex);
1438 usb_autopm_put_interface(dev->intf);
1439 return 0;
1440}
1441
1442static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1443{
ce85e13a
WH
1444 int ret;
1445 int i;
1446
1447 dev->mdiobus = mdiobus_alloc();
1448 if (!dev->mdiobus) {
1449 netdev_err(dev->net, "can't allocate MDIO bus\n");
1450 return -ENOMEM;
1451 }
1452
1453 dev->mdiobus->priv = (void *)dev;
1454 dev->mdiobus->read = lan78xx_mdiobus_read;
1455 dev->mdiobus->write = lan78xx_mdiobus_write;
1456 dev->mdiobus->name = "lan78xx-mdiobus";
1457
1458 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1459 dev->udev->bus->busnum, dev->udev->devnum);
1460
1461 dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1462 if (!dev->mdiobus->irq) {
1463 ret = -ENOMEM;
1464 goto exit1;
1465 }
1466
1467 /* handle our own interrupt */
1468 for (i = 0; i < PHY_MAX_ADDR; i++)
1469 dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1470
1471 switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1472 case 0x78000000:
1473 case 0x78500000:
1474 /* set to internal PHY id */
1475 dev->mdiobus->phy_mask = ~(1 << 1);
1476 break;
1477 }
1478
1479 ret = mdiobus_register(dev->mdiobus);
1480 if (ret) {
1481 netdev_err(dev->net, "can't register MDIO bus\n");
1482 goto exit2;
1483 }
1484
1485 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1486 return 0;
1487exit2:
1488 kfree(dev->mdiobus->irq);
1489exit1:
1490 mdiobus_free(dev->mdiobus);
1491 return ret;
1492}
1493
1494static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1495{
1496 mdiobus_unregister(dev->mdiobus);
1497 kfree(dev->mdiobus->irq);
1498 mdiobus_free(dev->mdiobus);
1499}
1500
1501static void lan78xx_link_status_change(struct net_device *net)
1502{
1503 /* nothing to do */
55d7de9d
WH
1504}
1505
1506static int lan78xx_phy_init(struct lan78xx_net *dev)
1507{
ce85e13a
WH
1508 int ret;
1509 struct phy_device *phydev = dev->net->phydev;
55d7de9d 1510
ce85e13a
WH
1511 phydev = phy_find_first(dev->mdiobus);
1512 if (!phydev) {
1513 netdev_err(dev->net, "no PHY found\n");
1514 return -EIO;
1515 }
55d7de9d 1516
ce85e13a
WH
1517 ret = phy_connect_direct(dev->net, phydev,
1518 lan78xx_link_status_change,
1519 PHY_INTERFACE_MODE_GMII);
1520 if (ret) {
1521 netdev_err(dev->net, "can't attach PHY to %s\n",
1522 dev->mdiobus->id);
1523 return -EIO;
1524 }
55d7de9d
WH
1525
1526 /* set to AUTOMDIX */
758c5c11 1527 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
55d7de9d 1528
ce85e13a
WH
1529 /* MAC doesn't support 1000T Half */
1530 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1531 phydev->supported |= (SUPPORTED_10baseT_Half |
1532 SUPPORTED_10baseT_Full |
1533 SUPPORTED_100baseT_Half |
1534 SUPPORTED_100baseT_Full |
1535 SUPPORTED_1000baseT_Full |
1536 SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1537 genphy_config_aneg(phydev);
1538
1539 /* Workaround to enable PHY interrupt.
1540 * phy_start_interrupts() is API for requesting and enabling
1541 * PHY interrupt. However, USB-to-Ethernet device can't use
1542 * request_irq() called in phy_start_interrupts().
1543 * Set PHY to PHY_HALTED and call phy_start()
1544 * to make a call to phy_enable_interrupts()
1545 */
1546 phy_stop(phydev);
1547 phy_start(phydev);
55d7de9d
WH
1548
1549 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1550
1551 return 0;
1552}
1553
1554static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1555{
1556 int ret = 0;
1557 u32 buf;
1558 bool rxenabled;
1559
1560 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1561
1562 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1563
1564 if (rxenabled) {
1565 buf &= ~MAC_RX_RXEN_;
1566 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1567 }
1568
1569 /* add 4 to size for FCS */
1570 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1571 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1572
1573 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1574
1575 if (rxenabled) {
1576 buf |= MAC_RX_RXEN_;
1577 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1578 }
1579
1580 return 0;
1581}
1582
1583static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1584{
1585 struct sk_buff *skb;
1586 unsigned long flags;
1587 int count = 0;
1588
1589 spin_lock_irqsave(&q->lock, flags);
1590 while (!skb_queue_empty(q)) {
1591 struct skb_data *entry;
1592 struct urb *urb;
1593 int ret;
1594
1595 skb_queue_walk(q, skb) {
1596 entry = (struct skb_data *)skb->cb;
1597 if (entry->state != unlink_start)
1598 goto found;
1599 }
1600 break;
1601found:
1602 entry->state = unlink_start;
1603 urb = entry->urb;
1604
1605 /* Get reference count of the URB to avoid it to be
1606 * freed during usb_unlink_urb, which may trigger
1607 * use-after-free problem inside usb_unlink_urb since
1608 * usb_unlink_urb is always racing with .complete
1609 * handler(include defer_bh).
1610 */
1611 usb_get_urb(urb);
1612 spin_unlock_irqrestore(&q->lock, flags);
1613 /* during some PM-driven resume scenarios,
1614 * these (async) unlinks complete immediately
1615 */
1616 ret = usb_unlink_urb(urb);
1617 if (ret != -EINPROGRESS && ret != 0)
1618 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1619 else
1620 count++;
1621 usb_put_urb(urb);
1622 spin_lock_irqsave(&q->lock, flags);
1623 }
1624 spin_unlock_irqrestore(&q->lock, flags);
1625 return count;
1626}
1627
1628static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1629{
1630 struct lan78xx_net *dev = netdev_priv(netdev);
1631 int ll_mtu = new_mtu + netdev->hard_header_len;
1632 int old_hard_mtu = dev->hard_mtu;
1633 int old_rx_urb_size = dev->rx_urb_size;
1634 int ret;
1635
1636 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1637 return -EINVAL;
1638
1639 if (new_mtu <= 0)
1640 return -EINVAL;
1641 /* no second zero-length packet read wanted after mtu-sized packets */
1642 if ((ll_mtu % dev->maxpacket) == 0)
1643 return -EDOM;
1644
1645 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1646
1647 netdev->mtu = new_mtu;
1648
1649 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1650 if (dev->rx_urb_size == old_hard_mtu) {
1651 dev->rx_urb_size = dev->hard_mtu;
1652 if (dev->rx_urb_size > old_rx_urb_size) {
1653 if (netif_running(dev->net)) {
1654 unlink_urbs(dev, &dev->rxq);
1655 tasklet_schedule(&dev->bh);
1656 }
1657 }
1658 }
1659
1660 return 0;
1661}
1662
1663int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1664{
1665 struct lan78xx_net *dev = netdev_priv(netdev);
1666 struct sockaddr *addr = p;
1667 u32 addr_lo, addr_hi;
1668 int ret;
1669
1670 if (netif_running(netdev))
1671 return -EBUSY;
1672
1673 if (!is_valid_ether_addr(addr->sa_data))
1674 return -EADDRNOTAVAIL;
1675
1676 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1677
1678 addr_lo = netdev->dev_addr[0] |
1679 netdev->dev_addr[1] << 8 |
1680 netdev->dev_addr[2] << 16 |
1681 netdev->dev_addr[3] << 24;
1682 addr_hi = netdev->dev_addr[4] |
1683 netdev->dev_addr[5] << 8;
1684
1685 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1686 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1687
1688 return 0;
1689}
1690
1691/* Enable or disable Rx checksum offload engine */
1692static int lan78xx_set_features(struct net_device *netdev,
1693 netdev_features_t features)
1694{
1695 struct lan78xx_net *dev = netdev_priv(netdev);
1696 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1697 unsigned long flags;
1698 int ret;
1699
1700 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1701
1702 if (features & NETIF_F_RXCSUM) {
1703 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1704 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1705 } else {
1706 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1707 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1708 }
1709
1710 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1711 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1712 else
1713 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1714
1715 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1716
1717 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1718
1719 return 0;
1720}
1721
1722static void lan78xx_deferred_vlan_write(struct work_struct *param)
1723{
1724 struct lan78xx_priv *pdata =
1725 container_of(param, struct lan78xx_priv, set_vlan);
1726 struct lan78xx_net *dev = pdata->dev;
1727
1728 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1729 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1730}
1731
1732static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1733 __be16 proto, u16 vid)
1734{
1735 struct lan78xx_net *dev = netdev_priv(netdev);
1736 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1737 u16 vid_bit_index;
1738 u16 vid_dword_index;
1739
1740 vid_dword_index = (vid >> 5) & 0x7F;
1741 vid_bit_index = vid & 0x1F;
1742
1743 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1744
1745 /* defer register writes to a sleepable context */
1746 schedule_work(&pdata->set_vlan);
1747
1748 return 0;
1749}
1750
1751static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1752 __be16 proto, u16 vid)
1753{
1754 struct lan78xx_net *dev = netdev_priv(netdev);
1755 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1756 u16 vid_bit_index;
1757 u16 vid_dword_index;
1758
1759 vid_dword_index = (vid >> 5) & 0x7F;
1760 vid_bit_index = vid & 0x1F;
1761
1762 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1763
1764 /* defer register writes to a sleepable context */
1765 schedule_work(&pdata->set_vlan);
1766
1767 return 0;
1768}
1769
1770static void lan78xx_init_ltm(struct lan78xx_net *dev)
1771{
1772 int ret;
1773 u32 buf;
1774 u32 regs[6] = { 0 };
1775
1776 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1777 if (buf & USB_CFG1_LTM_ENABLE_) {
1778 u8 temp[2];
1779 /* Get values from EEPROM first */
1780 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1781 if (temp[0] == 24) {
1782 ret = lan78xx_read_raw_eeprom(dev,
1783 temp[1] * 2,
1784 24,
1785 (u8 *)regs);
1786 if (ret < 0)
1787 return;
1788 }
1789 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1790 if (temp[0] == 24) {
1791 ret = lan78xx_read_raw_otp(dev,
1792 temp[1] * 2,
1793 24,
1794 (u8 *)regs);
1795 if (ret < 0)
1796 return;
1797 }
1798 }
1799 }
1800
1801 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1802 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1803 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1804 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1805 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1806 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1807}
1808
1809static int lan78xx_reset(struct lan78xx_net *dev)
1810{
1811 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1812 u32 buf;
1813 int ret = 0;
1814 unsigned long timeout;
1815
1816 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1817 buf |= HW_CFG_LRST_;
1818 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1819
1820 timeout = jiffies + HZ;
1821 do {
1822 mdelay(1);
1823 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1824 if (time_after(jiffies, timeout)) {
1825 netdev_warn(dev->net,
1826 "timeout on completion of LiteReset");
1827 return -EIO;
1828 }
1829 } while (buf & HW_CFG_LRST_);
1830
1831 lan78xx_init_mac_address(dev);
1832
ce85e13a
WH
1833 /* save DEVID for later usage */
1834 ret = lan78xx_read_reg(dev, ID_REV, &buf);
1835 dev->devid = buf;
1836
55d7de9d
WH
1837 /* Respond to the IN token with a NAK */
1838 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1839 buf |= USB_CFG_BIR_;
1840 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1841
1842 /* Init LTM */
1843 lan78xx_init_ltm(dev);
1844
1845 dev->net->hard_header_len += TX_OVERHEAD;
1846 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1847
1848 if (dev->udev->speed == USB_SPEED_SUPER) {
1849 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1850 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1851 dev->rx_qlen = 4;
1852 dev->tx_qlen = 4;
1853 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1854 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1855 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1856 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1857 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1858 } else {
1859 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1860 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1861 dev->rx_qlen = 4;
1862 }
1863
1864 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1865 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1866
1867 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1868 buf |= HW_CFG_MEF_;
1869 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1870
1871 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1872 buf |= USB_CFG_BCE_;
1873 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1874
1875 /* set FIFO sizes */
1876 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1877 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1878
1879 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1880 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1881
1882 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1883 ret = lan78xx_write_reg(dev, FLOW, 0);
1884 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1885
1886 /* Don't need rfe_ctl_lock during initialisation */
1887 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1888 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1889 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1890
1891 /* Enable or disable checksum offload engines */
1892 lan78xx_set_features(dev->net, dev->net->features);
1893
1894 lan78xx_set_multicast(dev->net);
1895
1896 /* reset PHY */
1897 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1898 buf |= PMT_CTL_PHY_RST_;
1899 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1900
1901 timeout = jiffies + HZ;
1902 do {
1903 mdelay(1);
1904 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1905 if (time_after(jiffies, timeout)) {
1906 netdev_warn(dev->net, "timeout waiting for PHY Reset");
1907 return -EIO;
1908 }
6c595b03 1909 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 1910
55d7de9d 1911 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
55d7de9d 1912 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
55d7de9d
WH
1913 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1914
55d7de9d
WH
1915 /* enable PHY interrupts */
1916 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1917 buf |= INT_ENP_PHY_INT;
1918 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1919
1920 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1921 buf |= MAC_TX_TXEN_;
1922 ret = lan78xx_write_reg(dev, MAC_TX, buf);
1923
1924 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1925 buf |= FCT_TX_CTL_EN_;
1926 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1927
1928 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1929
1930 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1931 buf |= MAC_RX_RXEN_;
1932 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1933
1934 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1935 buf |= FCT_RX_CTL_EN_;
1936 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1937
55d7de9d
WH
1938 return 0;
1939}
1940
1941static int lan78xx_open(struct net_device *net)
1942{
1943 struct lan78xx_net *dev = netdev_priv(net);
1944 int ret;
1945
1946 ret = usb_autopm_get_interface(dev->intf);
1947 if (ret < 0)
1948 goto out;
1949
1950 ret = lan78xx_reset(dev);
1951 if (ret < 0)
1952 goto done;
1953
ce85e13a
WH
1954 ret = lan78xx_phy_init(dev);
1955 if (ret < 0)
1956 goto done;
1957
55d7de9d
WH
1958 /* for Link Check */
1959 if (dev->urb_intr) {
1960 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1961 if (ret < 0) {
1962 netif_err(dev, ifup, dev->net,
1963 "intr submit %d\n", ret);
1964 goto done;
1965 }
1966 }
1967
1968 set_bit(EVENT_DEV_OPEN, &dev->flags);
1969
1970 netif_start_queue(net);
1971
1972 dev->link_on = false;
1973
1974 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1975done:
1976 usb_autopm_put_interface(dev->intf);
1977
1978out:
1979 return ret;
1980}
1981
1982static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1983{
1984 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1985 DECLARE_WAITQUEUE(wait, current);
1986 int temp;
1987
1988 /* ensure there are no more active urbs */
1989 add_wait_queue(&unlink_wakeup, &wait);
1990 set_current_state(TASK_UNINTERRUPTIBLE);
1991 dev->wait = &unlink_wakeup;
1992 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1993
1994 /* maybe wait for deletions to finish. */
1995 while (!skb_queue_empty(&dev->rxq) &&
1996 !skb_queue_empty(&dev->txq) &&
1997 !skb_queue_empty(&dev->done)) {
1998 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
1999 set_current_state(TASK_UNINTERRUPTIBLE);
2000 netif_dbg(dev, ifdown, dev->net,
2001 "waited for %d urb completions\n", temp);
2002 }
2003 set_current_state(TASK_RUNNING);
2004 dev->wait = NULL;
2005 remove_wait_queue(&unlink_wakeup, &wait);
2006}
2007
2008int lan78xx_stop(struct net_device *net)
2009{
2010 struct lan78xx_net *dev = netdev_priv(net);
2011
ce85e13a
WH
2012 phy_stop(net->phydev);
2013 phy_disconnect(net->phydev);
2014 net->phydev = NULL;
2015
55d7de9d
WH
2016 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2017 netif_stop_queue(net);
2018
2019 netif_info(dev, ifdown, dev->net,
2020 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2021 net->stats.rx_packets, net->stats.tx_packets,
2022 net->stats.rx_errors, net->stats.tx_errors);
2023
2024 lan78xx_terminate_urbs(dev);
2025
2026 usb_kill_urb(dev->urb_intr);
2027
2028 skb_queue_purge(&dev->rxq_pause);
2029
2030 /* deferred work (task, timer, softirq) must also stop.
2031 * can't flush_scheduled_work() until we drop rtnl (later),
2032 * else workers could deadlock; so make workers a NOP.
2033 */
2034 dev->flags = 0;
2035 cancel_delayed_work_sync(&dev->wq);
2036 tasklet_kill(&dev->bh);
2037
2038 usb_autopm_put_interface(dev->intf);
2039
2040 return 0;
2041}
2042
2043static int lan78xx_linearize(struct sk_buff *skb)
2044{
2045 return skb_linearize(skb);
2046}
2047
2048static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2049 struct sk_buff *skb, gfp_t flags)
2050{
2051 u32 tx_cmd_a, tx_cmd_b;
2052
2053 if (skb_headroom(skb) < TX_OVERHEAD) {
2054 struct sk_buff *skb2;
2055
2056 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2057 dev_kfree_skb_any(skb);
2058 skb = skb2;
2059 if (!skb)
2060 return NULL;
2061 }
2062
2063 if (lan78xx_linearize(skb) < 0)
2064 return NULL;
2065
2066 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2067
2068 if (skb->ip_summed == CHECKSUM_PARTIAL)
2069 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2070
2071 tx_cmd_b = 0;
2072 if (skb_is_gso(skb)) {
2073 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2074
2075 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2076
2077 tx_cmd_a |= TX_CMD_A_LSO_;
2078 }
2079
2080 if (skb_vlan_tag_present(skb)) {
2081 tx_cmd_a |= TX_CMD_A_IVTG_;
2082 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2083 }
2084
2085 skb_push(skb, 4);
2086 cpu_to_le32s(&tx_cmd_b);
2087 memcpy(skb->data, &tx_cmd_b, 4);
2088
2089 skb_push(skb, 4);
2090 cpu_to_le32s(&tx_cmd_a);
2091 memcpy(skb->data, &tx_cmd_a, 4);
2092
2093 return skb;
2094}
2095
2096static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2097 struct sk_buff_head *list, enum skb_state state)
2098{
2099 unsigned long flags;
2100 enum skb_state old_state;
2101 struct skb_data *entry = (struct skb_data *)skb->cb;
2102
2103 spin_lock_irqsave(&list->lock, flags);
2104 old_state = entry->state;
2105 entry->state = state;
55d7de9d
WH
2106
2107 __skb_unlink(skb, list);
2108 spin_unlock(&list->lock);
2109 spin_lock(&dev->done.lock);
55d7de9d
WH
2110
2111 __skb_queue_tail(&dev->done, skb);
2112 if (skb_queue_len(&dev->done) == 1)
2113 tasklet_schedule(&dev->bh);
2114 spin_unlock_irqrestore(&dev->done.lock, flags);
2115
2116 return old_state;
2117}
2118
2119static void tx_complete(struct urb *urb)
2120{
2121 struct sk_buff *skb = (struct sk_buff *)urb->context;
2122 struct skb_data *entry = (struct skb_data *)skb->cb;
2123 struct lan78xx_net *dev = entry->dev;
2124
2125 if (urb->status == 0) {
2126 dev->net->stats.tx_packets++;
2127 dev->net->stats.tx_bytes += entry->length;
2128 } else {
2129 dev->net->stats.tx_errors++;
2130
2131 switch (urb->status) {
2132 case -EPIPE:
2133 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2134 break;
2135
2136 /* software-driven interface shutdown */
2137 case -ECONNRESET:
2138 case -ESHUTDOWN:
2139 break;
2140
2141 case -EPROTO:
2142 case -ETIME:
2143 case -EILSEQ:
2144 netif_stop_queue(dev->net);
2145 break;
2146 default:
2147 netif_dbg(dev, tx_err, dev->net,
2148 "tx err %d\n", entry->urb->status);
2149 break;
2150 }
2151 }
2152
2153 usb_autopm_put_interface_async(dev->intf);
2154
81c38e81 2155 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2156}
2157
2158static void lan78xx_queue_skb(struct sk_buff_head *list,
2159 struct sk_buff *newsk, enum skb_state state)
2160{
2161 struct skb_data *entry = (struct skb_data *)newsk->cb;
2162
2163 __skb_queue_tail(list, newsk);
2164 entry->state = state;
2165}
2166
2167netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2168{
2169 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2170 struct sk_buff *skb2 = NULL;
55d7de9d 2171
81c38e81 2172 if (skb) {
55d7de9d 2173 skb_tx_timestamp(skb);
81c38e81
WH
2174 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2175 }
55d7de9d 2176
81c38e81
WH
2177 if (skb2) {
2178 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d
WH
2179
2180 if (skb_queue_len(&dev->txq_pend) > 10)
2181 netif_stop_queue(net);
2182 } else {
2183 netif_dbg(dev, tx_err, dev->net,
2184 "lan78xx_tx_prep return NULL\n");
2185 dev->net->stats.tx_errors++;
2186 dev->net->stats.tx_dropped++;
2187 }
2188
2189 tasklet_schedule(&dev->bh);
2190
2191 return NETDEV_TX_OK;
2192}
2193
2194int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2195{
2196 int tmp;
2197 struct usb_host_interface *alt = NULL;
2198 struct usb_host_endpoint *in = NULL, *out = NULL;
2199 struct usb_host_endpoint *status = NULL;
2200
2201 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2202 unsigned ep;
2203
2204 in = NULL;
2205 out = NULL;
2206 status = NULL;
2207 alt = intf->altsetting + tmp;
2208
2209 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2210 struct usb_host_endpoint *e;
2211 int intr = 0;
2212
2213 e = alt->endpoint + ep;
2214 switch (e->desc.bmAttributes) {
2215 case USB_ENDPOINT_XFER_INT:
2216 if (!usb_endpoint_dir_in(&e->desc))
2217 continue;
2218 intr = 1;
2219 /* FALLTHROUGH */
2220 case USB_ENDPOINT_XFER_BULK:
2221 break;
2222 default:
2223 continue;
2224 }
2225 if (usb_endpoint_dir_in(&e->desc)) {
2226 if (!intr && !in)
2227 in = e;
2228 else if (intr && !status)
2229 status = e;
2230 } else {
2231 if (!out)
2232 out = e;
2233 }
2234 }
2235 if (in && out)
2236 break;
2237 }
2238 if (!alt || !in || !out)
2239 return -EINVAL;
2240
2241 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2242 in->desc.bEndpointAddress &
2243 USB_ENDPOINT_NUMBER_MASK);
2244 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2245 out->desc.bEndpointAddress &
2246 USB_ENDPOINT_NUMBER_MASK);
2247 dev->ep_intr = status;
2248
2249 return 0;
2250}
2251
2252static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2253{
2254 struct lan78xx_priv *pdata = NULL;
2255 int ret;
2256 int i;
2257
2258 ret = lan78xx_get_endpoints(dev, intf);
2259
2260 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2261
2262 pdata = (struct lan78xx_priv *)(dev->data[0]);
2263 if (!pdata) {
2264 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2265 return -ENOMEM;
2266 }
2267
2268 pdata->dev = dev;
2269
2270 spin_lock_init(&pdata->rfe_ctl_lock);
2271 mutex_init(&pdata->dataport_mutex);
2272
2273 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2274
2275 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2276 pdata->vlan_table[i] = 0;
2277
2278 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2279
2280 dev->net->features = 0;
2281
2282 if (DEFAULT_TX_CSUM_ENABLE)
2283 dev->net->features |= NETIF_F_HW_CSUM;
2284
2285 if (DEFAULT_RX_CSUM_ENABLE)
2286 dev->net->features |= NETIF_F_RXCSUM;
2287
2288 if (DEFAULT_TSO_CSUM_ENABLE)
2289 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2290
2291 dev->net->hw_features = dev->net->features;
2292
2293 /* Init all registers */
2294 ret = lan78xx_reset(dev);
2295
ce85e13a
WH
2296 lan78xx_mdio_init(dev);
2297
55d7de9d
WH
2298 dev->net->flags |= IFF_MULTICAST;
2299
2300 pdata->wol = WAKE_MAGIC;
2301
2302 return 0;
2303}
2304
2305static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2306{
2307 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2308
ce85e13a
WH
2309 lan78xx_remove_mdio(dev);
2310
55d7de9d
WH
2311 if (pdata) {
2312 netif_dbg(dev, ifdown, dev->net, "free pdata");
2313 kfree(pdata);
2314 pdata = NULL;
2315 dev->data[0] = 0;
2316 }
2317}
2318
2319static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2320 struct sk_buff *skb,
2321 u32 rx_cmd_a, u32 rx_cmd_b)
2322{
2323 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2324 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2325 skb->ip_summed = CHECKSUM_NONE;
2326 } else {
2327 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2328 skb->ip_summed = CHECKSUM_COMPLETE;
2329 }
2330}
2331
2332void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2333{
2334 int status;
2335
2336 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2337 skb_queue_tail(&dev->rxq_pause, skb);
2338 return;
2339 }
2340
2341 skb->protocol = eth_type_trans(skb, dev->net);
2342 dev->net->stats.rx_packets++;
2343 dev->net->stats.rx_bytes += skb->len;
2344
2345 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2346 skb->len + sizeof(struct ethhdr), skb->protocol);
2347 memset(skb->cb, 0, sizeof(struct skb_data));
2348
2349 if (skb_defer_rx_timestamp(skb))
2350 return;
2351
2352 status = netif_rx(skb);
2353 if (status != NET_RX_SUCCESS)
2354 netif_dbg(dev, rx_err, dev->net,
2355 "netif_rx status %d\n", status);
2356}
2357
2358static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2359{
2360 if (skb->len < dev->net->hard_header_len)
2361 return 0;
2362
2363 while (skb->len > 0) {
2364 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2365 u16 rx_cmd_c;
2366 struct sk_buff *skb2;
2367 unsigned char *packet;
2368
2369 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2370 le32_to_cpus(&rx_cmd_a);
2371 skb_pull(skb, sizeof(rx_cmd_a));
2372
2373 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2374 le32_to_cpus(&rx_cmd_b);
2375 skb_pull(skb, sizeof(rx_cmd_b));
2376
2377 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2378 le16_to_cpus(&rx_cmd_c);
2379 skb_pull(skb, sizeof(rx_cmd_c));
2380
2381 packet = skb->data;
2382
2383 /* get the packet length */
2384 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2385 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2386
2387 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2388 netif_dbg(dev, rx_err, dev->net,
2389 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2390 } else {
2391 /* last frame in this batch */
2392 if (skb->len == size) {
2393 lan78xx_rx_csum_offload(dev, skb,
2394 rx_cmd_a, rx_cmd_b);
2395
2396 skb_trim(skb, skb->len - 4); /* remove fcs */
2397 skb->truesize = size + sizeof(struct sk_buff);
2398
2399 return 1;
2400 }
2401
2402 skb2 = skb_clone(skb, GFP_ATOMIC);
2403 if (unlikely(!skb2)) {
2404 netdev_warn(dev->net, "Error allocating skb");
2405 return 0;
2406 }
2407
2408 skb2->len = size;
2409 skb2->data = packet;
2410 skb_set_tail_pointer(skb2, size);
2411
2412 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2413
2414 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2415 skb2->truesize = size + sizeof(struct sk_buff);
2416
2417 lan78xx_skb_return(dev, skb2);
2418 }
2419
2420 skb_pull(skb, size);
2421
2422 /* padding bytes before the next frame starts */
2423 if (skb->len)
2424 skb_pull(skb, align_count);
2425 }
2426
55d7de9d
WH
2427 return 1;
2428}
2429
2430static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2431{
2432 if (!lan78xx_rx(dev, skb)) {
2433 dev->net->stats.rx_errors++;
2434 goto done;
2435 }
2436
2437 if (skb->len) {
2438 lan78xx_skb_return(dev, skb);
2439 return;
2440 }
2441
2442 netif_dbg(dev, rx_err, dev->net, "drop\n");
2443 dev->net->stats.rx_errors++;
2444done:
2445 skb_queue_tail(&dev->done, skb);
2446}
2447
2448static void rx_complete(struct urb *urb);
2449
2450static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2451{
2452 struct sk_buff *skb;
2453 struct skb_data *entry;
2454 unsigned long lockflags;
2455 size_t size = dev->rx_urb_size;
2456 int ret = 0;
2457
2458 skb = netdev_alloc_skb_ip_align(dev->net, size);
2459 if (!skb) {
2460 usb_free_urb(urb);
2461 return -ENOMEM;
2462 }
2463
2464 entry = (struct skb_data *)skb->cb;
2465 entry->urb = urb;
2466 entry->dev = dev;
2467 entry->length = 0;
2468
2469 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2470 skb->data, size, rx_complete, skb);
2471
2472 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2473
2474 if (netif_device_present(dev->net) &&
2475 netif_running(dev->net) &&
2476 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2477 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2478 ret = usb_submit_urb(urb, GFP_ATOMIC);
2479 switch (ret) {
2480 case 0:
2481 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2482 break;
2483 case -EPIPE:
2484 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2485 break;
2486 case -ENODEV:
2487 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2488 netif_device_detach(dev->net);
2489 break;
2490 case -EHOSTUNREACH:
2491 ret = -ENOLINK;
2492 break;
2493 default:
2494 netif_dbg(dev, rx_err, dev->net,
2495 "rx submit, %d\n", ret);
2496 tasklet_schedule(&dev->bh);
2497 }
2498 } else {
2499 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2500 ret = -ENOLINK;
2501 }
2502 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2503 if (ret) {
2504 dev_kfree_skb_any(skb);
2505 usb_free_urb(urb);
2506 }
2507 return ret;
2508}
2509
2510static void rx_complete(struct urb *urb)
2511{
2512 struct sk_buff *skb = (struct sk_buff *)urb->context;
2513 struct skb_data *entry = (struct skb_data *)skb->cb;
2514 struct lan78xx_net *dev = entry->dev;
2515 int urb_status = urb->status;
2516 enum skb_state state;
2517
2518 skb_put(skb, urb->actual_length);
2519 state = rx_done;
2520 entry->urb = NULL;
2521
2522 switch (urb_status) {
2523 case 0:
2524 if (skb->len < dev->net->hard_header_len) {
2525 state = rx_cleanup;
2526 dev->net->stats.rx_errors++;
2527 dev->net->stats.rx_length_errors++;
2528 netif_dbg(dev, rx_err, dev->net,
2529 "rx length %d\n", skb->len);
2530 }
2531 usb_mark_last_busy(dev->udev);
2532 break;
2533 case -EPIPE:
2534 dev->net->stats.rx_errors++;
2535 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2536 /* FALLTHROUGH */
2537 case -ECONNRESET: /* async unlink */
2538 case -ESHUTDOWN: /* hardware gone */
2539 netif_dbg(dev, ifdown, dev->net,
2540 "rx shutdown, code %d\n", urb_status);
2541 state = rx_cleanup;
2542 entry->urb = urb;
2543 urb = NULL;
2544 break;
2545 case -EPROTO:
2546 case -ETIME:
2547 case -EILSEQ:
2548 dev->net->stats.rx_errors++;
2549 state = rx_cleanup;
2550 entry->urb = urb;
2551 urb = NULL;
2552 break;
2553
2554 /* data overrun ... flush fifo? */
2555 case -EOVERFLOW:
2556 dev->net->stats.rx_over_errors++;
2557 /* FALLTHROUGH */
2558
2559 default:
2560 state = rx_cleanup;
2561 dev->net->stats.rx_errors++;
2562 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2563 break;
2564 }
2565
2566 state = defer_bh(dev, skb, &dev->rxq, state);
2567
2568 if (urb) {
2569 if (netif_running(dev->net) &&
2570 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2571 state != unlink_start) {
2572 rx_submit(dev, urb, GFP_ATOMIC);
2573 return;
2574 }
2575 usb_free_urb(urb);
2576 }
2577 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2578}
2579
2580static void lan78xx_tx_bh(struct lan78xx_net *dev)
2581{
2582 int length;
2583 struct urb *urb = NULL;
2584 struct skb_data *entry;
2585 unsigned long flags;
2586 struct sk_buff_head *tqp = &dev->txq_pend;
2587 struct sk_buff *skb, *skb2;
2588 int ret;
2589 int count, pos;
2590 int skb_totallen, pkt_cnt;
2591
2592 skb_totallen = 0;
2593 pkt_cnt = 0;
2594 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2595 if (skb_is_gso(skb)) {
2596 if (pkt_cnt) {
2597 /* handle previous packets first */
2598 break;
2599 }
2600 length = skb->len;
2601 skb2 = skb_dequeue(tqp);
2602 goto gso_skb;
2603 }
2604
2605 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2606 break;
2607 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2608 pkt_cnt++;
2609 }
2610
2611 /* copy to a single skb */
2612 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2613 if (!skb)
2614 goto drop;
2615
2616 skb_put(skb, skb_totallen);
2617
2618 for (count = pos = 0; count < pkt_cnt; count++) {
2619 skb2 = skb_dequeue(tqp);
2620 if (skb2) {
2621 memcpy(skb->data + pos, skb2->data, skb2->len);
2622 pos += roundup(skb2->len, sizeof(u32));
2623 dev_kfree_skb(skb2);
55d7de9d
WH
2624 }
2625 }
2626
2627 length = skb_totallen;
2628
2629gso_skb:
2630 urb = usb_alloc_urb(0, GFP_ATOMIC);
2631 if (!urb) {
2632 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2633 goto drop;
2634 }
2635
2636 entry = (struct skb_data *)skb->cb;
2637 entry->urb = urb;
2638 entry->dev = dev;
2639 entry->length = length;
2640
2641 spin_lock_irqsave(&dev->txq.lock, flags);
2642 ret = usb_autopm_get_interface_async(dev->intf);
2643 if (ret < 0) {
2644 spin_unlock_irqrestore(&dev->txq.lock, flags);
2645 goto drop;
2646 }
2647
2648 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2649 skb->data, skb->len, tx_complete, skb);
2650
2651 if (length % dev->maxpacket == 0) {
2652 /* send USB_ZERO_PACKET */
2653 urb->transfer_flags |= URB_ZERO_PACKET;
2654 }
2655
2656#ifdef CONFIG_PM
2657 /* if this triggers the device is still a sleep */
2658 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2659 /* transmission will be done in resume */
2660 usb_anchor_urb(urb, &dev->deferred);
2661 /* no use to process more packets */
2662 netif_stop_queue(dev->net);
2663 usb_put_urb(urb);
2664 spin_unlock_irqrestore(&dev->txq.lock, flags);
2665 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2666 return;
2667 }
2668#endif
2669
2670 ret = usb_submit_urb(urb, GFP_ATOMIC);
2671 switch (ret) {
2672 case 0:
2673 dev->net->trans_start = jiffies;
2674 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2675 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2676 netif_stop_queue(dev->net);
2677 break;
2678 case -EPIPE:
2679 netif_stop_queue(dev->net);
2680 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2681 usb_autopm_put_interface_async(dev->intf);
2682 break;
2683 default:
2684 usb_autopm_put_interface_async(dev->intf);
2685 netif_dbg(dev, tx_err, dev->net,
2686 "tx: submit urb err %d\n", ret);
2687 break;
2688 }
2689
2690 spin_unlock_irqrestore(&dev->txq.lock, flags);
2691
2692 if (ret) {
2693 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2694drop:
2695 dev->net->stats.tx_dropped++;
2696 if (skb)
2697 dev_kfree_skb_any(skb);
2698 usb_free_urb(urb);
2699 } else
2700 netif_dbg(dev, tx_queued, dev->net,
2701 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2702}
2703
2704static void lan78xx_rx_bh(struct lan78xx_net *dev)
2705{
2706 struct urb *urb;
2707 int i;
2708
2709 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2710 for (i = 0; i < 10; i++) {
2711 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2712 break;
2713 urb = usb_alloc_urb(0, GFP_ATOMIC);
2714 if (urb)
2715 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2716 return;
2717 }
2718
2719 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2720 tasklet_schedule(&dev->bh);
2721 }
2722 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2723 netif_wake_queue(dev->net);
2724}
2725
2726static void lan78xx_bh(unsigned long param)
2727{
2728 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2729 struct sk_buff *skb;
2730 struct skb_data *entry;
2731
55d7de9d
WH
2732 while ((skb = skb_dequeue(&dev->done))) {
2733 entry = (struct skb_data *)(skb->cb);
2734 switch (entry->state) {
2735 case rx_done:
2736 entry->state = rx_cleanup;
2737 rx_process(dev, skb);
2738 continue;
2739 case tx_done:
2740 usb_free_urb(entry->urb);
2741 dev_kfree_skb(skb);
2742 continue;
2743 case rx_cleanup:
2744 usb_free_urb(entry->urb);
2745 dev_kfree_skb(skb);
2746 continue;
2747 default:
2748 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2749 return;
2750 }
55d7de9d
WH
2751 }
2752
2753 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2754 if (!skb_queue_empty(&dev->txq_pend))
2755 lan78xx_tx_bh(dev);
2756
2757 if (!timer_pending(&dev->delay) &&
2758 !test_bit(EVENT_RX_HALT, &dev->flags))
2759 lan78xx_rx_bh(dev);
2760 }
2761}
2762
2763static void lan78xx_delayedwork(struct work_struct *work)
2764{
2765 int status;
2766 struct lan78xx_net *dev;
2767
2768 dev = container_of(work, struct lan78xx_net, wq.work);
2769
2770 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2771 unlink_urbs(dev, &dev->txq);
2772 status = usb_autopm_get_interface(dev->intf);
2773 if (status < 0)
2774 goto fail_pipe;
2775 status = usb_clear_halt(dev->udev, dev->pipe_out);
2776 usb_autopm_put_interface(dev->intf);
2777 if (status < 0 &&
2778 status != -EPIPE &&
2779 status != -ESHUTDOWN) {
2780 if (netif_msg_tx_err(dev))
2781fail_pipe:
2782 netdev_err(dev->net,
2783 "can't clear tx halt, status %d\n",
2784 status);
2785 } else {
2786 clear_bit(EVENT_TX_HALT, &dev->flags);
2787 if (status != -ESHUTDOWN)
2788 netif_wake_queue(dev->net);
2789 }
2790 }
2791 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2792 unlink_urbs(dev, &dev->rxq);
2793 status = usb_autopm_get_interface(dev->intf);
2794 if (status < 0)
2795 goto fail_halt;
2796 status = usb_clear_halt(dev->udev, dev->pipe_in);
2797 usb_autopm_put_interface(dev->intf);
2798 if (status < 0 &&
2799 status != -EPIPE &&
2800 status != -ESHUTDOWN) {
2801 if (netif_msg_rx_err(dev))
2802fail_halt:
2803 netdev_err(dev->net,
2804 "can't clear rx halt, status %d\n",
2805 status);
2806 } else {
2807 clear_bit(EVENT_RX_HALT, &dev->flags);
2808 tasklet_schedule(&dev->bh);
2809 }
2810 }
2811
2812 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2813 int ret = 0;
2814
2815 clear_bit(EVENT_LINK_RESET, &dev->flags);
2816 status = usb_autopm_get_interface(dev->intf);
2817 if (status < 0)
2818 goto skip_reset;
2819 if (lan78xx_link_reset(dev) < 0) {
2820 usb_autopm_put_interface(dev->intf);
2821skip_reset:
2822 netdev_info(dev->net, "link reset failed (%d)\n",
2823 ret);
2824 } else {
2825 usb_autopm_put_interface(dev->intf);
2826 }
2827 }
2828}
2829
2830static void intr_complete(struct urb *urb)
2831{
2832 struct lan78xx_net *dev = urb->context;
2833 int status = urb->status;
2834
2835 switch (status) {
2836 /* success */
2837 case 0:
2838 lan78xx_status(dev, urb);
2839 break;
2840
2841 /* software-driven interface shutdown */
2842 case -ENOENT: /* urb killed */
2843 case -ESHUTDOWN: /* hardware gone */
2844 netif_dbg(dev, ifdown, dev->net,
2845 "intr shutdown, code %d\n", status);
2846 return;
2847
2848 /* NOTE: not throttling like RX/TX, since this endpoint
2849 * already polls infrequently
2850 */
2851 default:
2852 netdev_dbg(dev->net, "intr status %d\n", status);
2853 break;
2854 }
2855
2856 if (!netif_running(dev->net))
2857 return;
2858
2859 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2860 status = usb_submit_urb(urb, GFP_ATOMIC);
2861 if (status != 0)
2862 netif_err(dev, timer, dev->net,
2863 "intr resubmit --> %d\n", status);
2864}
2865
2866static void lan78xx_disconnect(struct usb_interface *intf)
2867{
2868 struct lan78xx_net *dev;
2869 struct usb_device *udev;
2870 struct net_device *net;
2871
2872 dev = usb_get_intfdata(intf);
2873 usb_set_intfdata(intf, NULL);
2874 if (!dev)
2875 return;
2876
2877 udev = interface_to_usbdev(intf);
2878
2879 net = dev->net;
2880 unregister_netdev(net);
2881
2882 cancel_delayed_work_sync(&dev->wq);
2883
2884 usb_scuttle_anchored_urbs(&dev->deferred);
2885
2886 lan78xx_unbind(dev, intf);
2887
2888 usb_kill_urb(dev->urb_intr);
2889 usb_free_urb(dev->urb_intr);
2890
2891 free_netdev(net);
2892 usb_put_dev(udev);
2893}
2894
2895void lan78xx_tx_timeout(struct net_device *net)
2896{
2897 struct lan78xx_net *dev = netdev_priv(net);
2898
2899 unlink_urbs(dev, &dev->txq);
2900 tasklet_schedule(&dev->bh);
2901}
2902
2903static const struct net_device_ops lan78xx_netdev_ops = {
2904 .ndo_open = lan78xx_open,
2905 .ndo_stop = lan78xx_stop,
2906 .ndo_start_xmit = lan78xx_start_xmit,
2907 .ndo_tx_timeout = lan78xx_tx_timeout,
2908 .ndo_change_mtu = lan78xx_change_mtu,
2909 .ndo_set_mac_address = lan78xx_set_mac_addr,
2910 .ndo_validate_addr = eth_validate_addr,
2911 .ndo_do_ioctl = lan78xx_ioctl,
2912 .ndo_set_rx_mode = lan78xx_set_multicast,
2913 .ndo_set_features = lan78xx_set_features,
2914 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
2915 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
2916};
2917
2918static int lan78xx_probe(struct usb_interface *intf,
2919 const struct usb_device_id *id)
2920{
2921 struct lan78xx_net *dev;
2922 struct net_device *netdev;
2923 struct usb_device *udev;
2924 int ret;
2925 unsigned maxp;
2926 unsigned period;
2927 u8 *buf = NULL;
2928
2929 udev = interface_to_usbdev(intf);
2930 udev = usb_get_dev(udev);
2931
2932 ret = -ENOMEM;
2933 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2934 if (!netdev) {
2935 dev_err(&intf->dev, "Error: OOM\n");
2936 goto out1;
2937 }
2938
2939 /* netdev_printk() needs this */
2940 SET_NETDEV_DEV(netdev, &intf->dev);
2941
2942 dev = netdev_priv(netdev);
2943 dev->udev = udev;
2944 dev->intf = intf;
2945 dev->net = netdev;
2946 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2947 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
2948
2949 skb_queue_head_init(&dev->rxq);
2950 skb_queue_head_init(&dev->txq);
2951 skb_queue_head_init(&dev->done);
2952 skb_queue_head_init(&dev->rxq_pause);
2953 skb_queue_head_init(&dev->txq_pend);
2954 mutex_init(&dev->phy_mutex);
2955
2956 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2957 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2958 init_usb_anchor(&dev->deferred);
2959
2960 netdev->netdev_ops = &lan78xx_netdev_ops;
2961 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2962 netdev->ethtool_ops = &lan78xx_ethtool_ops;
2963
2964 ret = lan78xx_bind(dev, intf);
2965 if (ret < 0)
2966 goto out2;
2967 strcpy(netdev->name, "eth%d");
2968
2969 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2970 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2971
2972 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
2973 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
2974 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
2975
2976 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2977 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2978
2979 dev->pipe_intr = usb_rcvintpipe(dev->udev,
2980 dev->ep_intr->desc.bEndpointAddress &
2981 USB_ENDPOINT_NUMBER_MASK);
2982 period = dev->ep_intr->desc.bInterval;
2983
2984 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2985 buf = kmalloc(maxp, GFP_KERNEL);
2986 if (buf) {
2987 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2988 if (!dev->urb_intr) {
2989 kfree(buf);
2990 goto out3;
2991 } else {
2992 usb_fill_int_urb(dev->urb_intr, dev->udev,
2993 dev->pipe_intr, buf, maxp,
2994 intr_complete, dev, period);
2995 }
2996 }
2997
2998 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
2999
3000 /* driver requires remote-wakeup capability during autosuspend. */
3001 intf->needs_remote_wakeup = 1;
3002
3003 ret = register_netdev(netdev);
3004 if (ret != 0) {
3005 netif_err(dev, probe, netdev, "couldn't register the device\n");
3006 goto out2;
3007 }
3008
3009 usb_set_intfdata(intf, dev);
3010
3011 ret = device_set_wakeup_enable(&udev->dev, true);
3012
3013 /* Default delay of 2sec has more overhead than advantage.
3014 * Set to 10sec as default.
3015 */
3016 pm_runtime_set_autosuspend_delay(&udev->dev,
3017 DEFAULT_AUTOSUSPEND_DELAY);
3018
3019 return 0;
3020
55d7de9d
WH
3021out3:
3022 lan78xx_unbind(dev, intf);
3023out2:
3024 free_netdev(netdev);
3025out1:
3026 usb_put_dev(udev);
3027
3028 return ret;
3029}
3030
3031static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3032{
3033 const u16 crc16poly = 0x8005;
3034 int i;
3035 u16 bit, crc, msb;
3036 u8 data;
3037
3038 crc = 0xFFFF;
3039 for (i = 0; i < len; i++) {
3040 data = *buf++;
3041 for (bit = 0; bit < 8; bit++) {
3042 msb = crc >> 15;
3043 crc <<= 1;
3044
3045 if (msb ^ (u16)(data & 1)) {
3046 crc ^= crc16poly;
3047 crc |= (u16)0x0001U;
3048 }
3049 data >>= 1;
3050 }
3051 }
3052
3053 return crc;
3054}
3055
3056static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3057{
3058 u32 buf;
3059 int ret;
3060 int mask_index;
3061 u16 crc;
3062 u32 temp_wucsr;
3063 u32 temp_pmt_ctl;
3064 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3065 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3066 const u8 arp_type[2] = { 0x08, 0x06 };
3067
3068 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3069 buf &= ~MAC_TX_TXEN_;
3070 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3071 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3072 buf &= ~MAC_RX_RXEN_;
3073 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3074
3075 ret = lan78xx_write_reg(dev, WUCSR, 0);
3076 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3077 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3078
3079 temp_wucsr = 0;
3080
3081 temp_pmt_ctl = 0;
3082 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3083 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3084 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3085
3086 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3087 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3088
3089 mask_index = 0;
3090 if (wol & WAKE_PHY) {
3091 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3092
3093 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3094 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3095 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3096 }
3097 if (wol & WAKE_MAGIC) {
3098 temp_wucsr |= WUCSR_MPEN_;
3099
3100 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3101 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3102 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3103 }
3104 if (wol & WAKE_BCAST) {
3105 temp_wucsr |= WUCSR_BCST_EN_;
3106
3107 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3108 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3109 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3110 }
3111 if (wol & WAKE_MCAST) {
3112 temp_wucsr |= WUCSR_WAKE_EN_;
3113
3114 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3115 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3116 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3117 WUF_CFGX_EN_ |
3118 WUF_CFGX_TYPE_MCAST_ |
3119 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3120 (crc & WUF_CFGX_CRC16_MASK_));
3121
3122 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3123 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3124 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3125 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3126 mask_index++;
3127
3128 /* for IPv6 Multicast */
3129 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3130 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3131 WUF_CFGX_EN_ |
3132 WUF_CFGX_TYPE_MCAST_ |
3133 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3134 (crc & WUF_CFGX_CRC16_MASK_));
3135
3136 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3137 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3138 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3139 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3140 mask_index++;
3141
3142 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3143 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3144 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3145 }
3146 if (wol & WAKE_UCAST) {
3147 temp_wucsr |= WUCSR_PFDA_EN_;
3148
3149 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3150 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3151 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3152 }
3153 if (wol & WAKE_ARP) {
3154 temp_wucsr |= WUCSR_WAKE_EN_;
3155
3156 /* set WUF_CFG & WUF_MASK
3157 * for packettype (offset 12,13) = ARP (0x0806)
3158 */
3159 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3160 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3161 WUF_CFGX_EN_ |
3162 WUF_CFGX_TYPE_ALL_ |
3163 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3164 (crc & WUF_CFGX_CRC16_MASK_));
3165
3166 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3167 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3168 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3169 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3170 mask_index++;
3171
3172 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3173 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3174 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3175 }
3176
3177 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3178
3179 /* when multiple WOL bits are set */
3180 if (hweight_long((unsigned long)wol) > 1) {
3181 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3182 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3183 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3184 }
3185 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3186
3187 /* clear WUPS */
3188 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3189 buf |= PMT_CTL_WUPS_MASK_;
3190 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3191
3192 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3193 buf |= MAC_RX_RXEN_;
3194 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3195
3196 return 0;
3197}
3198
3199int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3200{
3201 struct lan78xx_net *dev = usb_get_intfdata(intf);
3202 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3203 u32 buf;
3204 int ret;
3205 int event;
3206
55d7de9d
WH
3207 event = message.event;
3208
3209 if (!dev->suspend_count++) {
3210 spin_lock_irq(&dev->txq.lock);
3211 /* don't autosuspend while transmitting */
3212 if ((skb_queue_len(&dev->txq) ||
3213 skb_queue_len(&dev->txq_pend)) &&
3214 PMSG_IS_AUTO(message)) {
3215 spin_unlock_irq(&dev->txq.lock);
3216 ret = -EBUSY;
3217 goto out;
3218 } else {
3219 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3220 spin_unlock_irq(&dev->txq.lock);
3221 }
3222
3223 /* stop TX & RX */
3224 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3225 buf &= ~MAC_TX_TXEN_;
3226 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3227 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3228 buf &= ~MAC_RX_RXEN_;
3229 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3230
3231 /* empty out the rx and queues */
3232 netif_device_detach(dev->net);
3233 lan78xx_terminate_urbs(dev);
3234 usb_kill_urb(dev->urb_intr);
3235
3236 /* reattach */
3237 netif_device_attach(dev->net);
3238 }
3239
3240 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3241 if (PMSG_IS_AUTO(message)) {
3242 /* auto suspend (selective suspend) */
3243 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3244 buf &= ~MAC_TX_TXEN_;
3245 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3246 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3247 buf &= ~MAC_RX_RXEN_;
3248 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3249
3250 ret = lan78xx_write_reg(dev, WUCSR, 0);
3251 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3252 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3253
3254 /* set goodframe wakeup */
3255 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3256
3257 buf |= WUCSR_RFE_WAKE_EN_;
3258 buf |= WUCSR_STORE_WAKE_;
3259
3260 ret = lan78xx_write_reg(dev, WUCSR, buf);
3261
3262 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3263
3264 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3265 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3266
3267 buf |= PMT_CTL_PHY_WAKE_EN_;
3268 buf |= PMT_CTL_WOL_EN_;
3269 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3270 buf |= PMT_CTL_SUS_MODE_3_;
3271
3272 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3273
3274 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3275
3276 buf |= PMT_CTL_WUPS_MASK_;
3277
3278 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3279
3280 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3281 buf |= MAC_RX_RXEN_;
3282 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3283 } else {
3284 lan78xx_set_suspend(dev, pdata->wol);
3285 }
3286 }
3287
49d28b56 3288 ret = 0;
55d7de9d
WH
3289out:
3290 return ret;
3291}
3292
3293int lan78xx_resume(struct usb_interface *intf)
3294{
3295 struct lan78xx_net *dev = usb_get_intfdata(intf);
3296 struct sk_buff *skb;
3297 struct urb *res;
3298 int ret;
3299 u32 buf;
3300
3301 if (!--dev->suspend_count) {
3302 /* resume interrupt URBs */
3303 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3304 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3305
3306 spin_lock_irq(&dev->txq.lock);
3307 while ((res = usb_get_from_anchor(&dev->deferred))) {
3308 skb = (struct sk_buff *)res->context;
3309 ret = usb_submit_urb(res, GFP_ATOMIC);
3310 if (ret < 0) {
3311 dev_kfree_skb_any(skb);
3312 usb_free_urb(res);
3313 usb_autopm_put_interface_async(dev->intf);
3314 } else {
3315 dev->net->trans_start = jiffies;
3316 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3317 }
3318 }
3319
3320 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3321 spin_unlock_irq(&dev->txq.lock);
3322
3323 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3324 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3325 netif_start_queue(dev->net);
3326 tasklet_schedule(&dev->bh);
3327 }
3328 }
3329
3330 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3331 ret = lan78xx_write_reg(dev, WUCSR, 0);
3332 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3333
3334 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3335 WUCSR2_ARP_RCD_ |
3336 WUCSR2_IPV6_TCPSYN_RCD_ |
3337 WUCSR2_IPV4_TCPSYN_RCD_);
3338
3339 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3340 WUCSR_EEE_RX_WAKE_ |
3341 WUCSR_PFDA_FR_ |
3342 WUCSR_RFE_WAKE_FR_ |
3343 WUCSR_WUFR_ |
3344 WUCSR_MPR_ |
3345 WUCSR_BCST_FR_);
3346
3347 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3348 buf |= MAC_TX_TXEN_;
3349 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3350
3351 return 0;
3352}
3353
3354int lan78xx_reset_resume(struct usb_interface *intf)
3355{
3356 struct lan78xx_net *dev = usb_get_intfdata(intf);
3357
3358 lan78xx_reset(dev);
ce85e13a
WH
3359
3360 lan78xx_phy_init(dev);
3361
55d7de9d
WH
3362 return lan78xx_resume(intf);
3363}
3364
3365static const struct usb_device_id products[] = {
3366 {
3367 /* LAN7800 USB Gigabit Ethernet Device */
3368 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3369 },
3370 {
3371 /* LAN7850 USB Gigabit Ethernet Device */
3372 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3373 },
3374 {},
3375};
3376MODULE_DEVICE_TABLE(usb, products);
3377
3378static struct usb_driver lan78xx_driver = {
3379 .name = DRIVER_NAME,
3380 .id_table = products,
3381 .probe = lan78xx_probe,
3382 .disconnect = lan78xx_disconnect,
3383 .suspend = lan78xx_suspend,
3384 .resume = lan78xx_resume,
3385 .reset_resume = lan78xx_reset_resume,
3386 .supports_autosuspend = 1,
3387 .disable_hub_initiated_lpm = 1,
3388};
3389
3390module_usb_driver(lan78xx_driver);
3391
3392MODULE_AUTHOR(DRIVER_AUTHOR);
3393MODULE_DESCRIPTION(DRIVER_DESC);
3394MODULE_LICENSE("GPL");