]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/usb/lan78xx.c
net: eth: altera: Remove sgdmadesclen member from altera_tse_private
[mirror_ubuntu-artful-kernel.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/mii.h>
23#include <linux/usb.h>
24#include <linux/crc32.h>
25#include <linux/signal.h>
26#include <linux/slab.h>
27#include <linux/if_vlan.h>
28#include <linux/uaccess.h>
29#include <linux/list.h>
30#include <linux/ip.h>
31#include <linux/ipv6.h>
32#include <linux/mdio.h>
33#include <net/ip6_checksum.h>
34#include "lan78xx.h"
35
36#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38#define DRIVER_NAME "lan78xx"
39#define DRIVER_VERSION "1.0.0"
40
41#define TX_TIMEOUT_JIFFIES (5 * HZ)
42#define THROTTLE_JIFFIES (HZ / 8)
43#define UNLINK_TIMEOUT_MS 3
44
45#define RX_MAX_QUEUE_MEMORY (60 * 1518)
46
47#define SS_USB_PKT_SIZE (1024)
48#define HS_USB_PKT_SIZE (512)
49#define FS_USB_PKT_SIZE (64)
50
51#define MAX_RX_FIFO_SIZE (12 * 1024)
52#define MAX_TX_FIFO_SIZE (12 * 1024)
53#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54#define DEFAULT_BULK_IN_DELAY (0x0800)
55#define MAX_SINGLE_PACKET_SIZE (9000)
56#define DEFAULT_TX_CSUM_ENABLE (true)
57#define DEFAULT_RX_CSUM_ENABLE (true)
58#define DEFAULT_TSO_CSUM_ENABLE (true)
59#define DEFAULT_VLAN_FILTER_ENABLE (true)
60#define INTERNAL_PHY_ID (2) /* 2: GMII */
61#define TX_OVERHEAD (8)
62#define RXW_PADDING 2
63
64#define LAN78XX_USB_VENDOR_ID (0x0424)
65#define LAN7800_USB_PRODUCT_ID (0x7800)
66#define LAN7850_USB_PRODUCT_ID (0x7850)
67#define LAN78XX_EEPROM_MAGIC (0x78A5)
68#define LAN78XX_OTP_MAGIC (0x78F3)
69
70#define MII_READ 1
71#define MII_WRITE 0
72
73#define EEPROM_INDICATOR (0xA5)
74#define EEPROM_MAC_OFFSET (0x01)
75#define MAX_EEPROM_SIZE 512
76#define OTP_INDICATOR_1 (0xF3)
77#define OTP_INDICATOR_2 (0xF7)
78
79#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
80 WAKE_MCAST | WAKE_BCAST | \
81 WAKE_ARP | WAKE_MAGIC)
82
83/* USB related defines */
84#define BULK_IN_PIPE 1
85#define BULK_OUT_PIPE 2
86
87/* default autosuspend delay (mSec)*/
88#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89
90static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
91 "RX FCS Errors",
92 "RX Alignment Errors",
93 "Rx Fragment Errors",
94 "RX Jabber Errors",
95 "RX Undersize Frame Errors",
96 "RX Oversize Frame Errors",
97 "RX Dropped Frames",
98 "RX Unicast Byte Count",
99 "RX Broadcast Byte Count",
100 "RX Multicast Byte Count",
101 "RX Unicast Frames",
102 "RX Broadcast Frames",
103 "RX Multicast Frames",
104 "RX Pause Frames",
105 "RX 64 Byte Frames",
106 "RX 65 - 127 Byte Frames",
107 "RX 128 - 255 Byte Frames",
108 "RX 256 - 511 Bytes Frames",
109 "RX 512 - 1023 Byte Frames",
110 "RX 1024 - 1518 Byte Frames",
111 "RX Greater 1518 Byte Frames",
112 "EEE RX LPI Transitions",
113 "EEE RX LPI Time",
114 "TX FCS Errors",
115 "TX Excess Deferral Errors",
116 "TX Carrier Errors",
117 "TX Bad Byte Count",
118 "TX Single Collisions",
119 "TX Multiple Collisions",
120 "TX Excessive Collision",
121 "TX Late Collisions",
122 "TX Unicast Byte Count",
123 "TX Broadcast Byte Count",
124 "TX Multicast Byte Count",
125 "TX Unicast Frames",
126 "TX Broadcast Frames",
127 "TX Multicast Frames",
128 "TX Pause Frames",
129 "TX 64 Byte Frames",
130 "TX 65 - 127 Byte Frames",
131 "TX 128 - 255 Byte Frames",
132 "TX 256 - 511 Bytes Frames",
133 "TX 512 - 1023 Byte Frames",
134 "TX 1024 - 1518 Byte Frames",
135 "TX Greater 1518 Byte Frames",
136 "EEE TX LPI Transitions",
137 "EEE TX LPI Time",
138};
139
140struct lan78xx_statstage {
141 u32 rx_fcs_errors;
142 u32 rx_alignment_errors;
143 u32 rx_fragment_errors;
144 u32 rx_jabber_errors;
145 u32 rx_undersize_frame_errors;
146 u32 rx_oversize_frame_errors;
147 u32 rx_dropped_frames;
148 u32 rx_unicast_byte_count;
149 u32 rx_broadcast_byte_count;
150 u32 rx_multicast_byte_count;
151 u32 rx_unicast_frames;
152 u32 rx_broadcast_frames;
153 u32 rx_multicast_frames;
154 u32 rx_pause_frames;
155 u32 rx_64_byte_frames;
156 u32 rx_65_127_byte_frames;
157 u32 rx_128_255_byte_frames;
158 u32 rx_256_511_bytes_frames;
159 u32 rx_512_1023_byte_frames;
160 u32 rx_1024_1518_byte_frames;
161 u32 rx_greater_1518_byte_frames;
162 u32 eee_rx_lpi_transitions;
163 u32 eee_rx_lpi_time;
164 u32 tx_fcs_errors;
165 u32 tx_excess_deferral_errors;
166 u32 tx_carrier_errors;
167 u32 tx_bad_byte_count;
168 u32 tx_single_collisions;
169 u32 tx_multiple_collisions;
170 u32 tx_excessive_collision;
171 u32 tx_late_collisions;
172 u32 tx_unicast_byte_count;
173 u32 tx_broadcast_byte_count;
174 u32 tx_multicast_byte_count;
175 u32 tx_unicast_frames;
176 u32 tx_broadcast_frames;
177 u32 tx_multicast_frames;
178 u32 tx_pause_frames;
179 u32 tx_64_byte_frames;
180 u32 tx_65_127_byte_frames;
181 u32 tx_128_255_byte_frames;
182 u32 tx_256_511_bytes_frames;
183 u32 tx_512_1023_byte_frames;
184 u32 tx_1024_1518_byte_frames;
185 u32 tx_greater_1518_byte_frames;
186 u32 eee_tx_lpi_transitions;
187 u32 eee_tx_lpi_time;
188};
189
190struct lan78xx_net;
191
192struct lan78xx_priv {
193 struct lan78xx_net *dev;
194 u32 rfe_ctl;
195 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
196 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
197 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
198 struct mutex dataport_mutex; /* for dataport access */
199 spinlock_t rfe_ctl_lock; /* for rfe register access */
200 struct work_struct set_multicast;
201 struct work_struct set_vlan;
202 u32 wol;
203};
204
205enum skb_state {
206 illegal = 0,
207 tx_start,
208 tx_done,
209 rx_start,
210 rx_done,
211 rx_cleanup,
212 unlink_start
213};
214
215struct skb_data { /* skb->cb is one of these */
216 struct urb *urb;
217 struct lan78xx_net *dev;
218 enum skb_state state;
219 size_t length;
220};
221
222struct usb_context {
223 struct usb_ctrlrequest req;
224 struct lan78xx_net *dev;
225};
226
227#define EVENT_TX_HALT 0
228#define EVENT_RX_HALT 1
229#define EVENT_RX_MEMORY 2
230#define EVENT_STS_SPLIT 3
231#define EVENT_LINK_RESET 4
232#define EVENT_RX_PAUSED 5
233#define EVENT_DEV_WAKING 6
234#define EVENT_DEV_ASLEEP 7
235#define EVENT_DEV_OPEN 8
236
237struct lan78xx_net {
238 struct net_device *net;
239 struct usb_device *udev;
240 struct usb_interface *intf;
241 void *driver_priv;
242
243 int rx_qlen;
244 int tx_qlen;
245 struct sk_buff_head rxq;
246 struct sk_buff_head txq;
247 struct sk_buff_head done;
248 struct sk_buff_head rxq_pause;
249 struct sk_buff_head txq_pend;
250
251 struct tasklet_struct bh;
252 struct delayed_work wq;
253
254 struct usb_host_endpoint *ep_blkin;
255 struct usb_host_endpoint *ep_blkout;
256 struct usb_host_endpoint *ep_intr;
257
258 int msg_enable;
259
260 struct urb *urb_intr;
261 struct usb_anchor deferred;
262
263 struct mutex phy_mutex; /* for phy access */
264 unsigned pipe_in, pipe_out, pipe_intr;
265
266 u32 hard_mtu; /* count any extra framing */
267 size_t rx_urb_size; /* size for rx urbs */
268
269 unsigned long flags;
270
271 wait_queue_head_t *wait;
272 unsigned char suspend_count;
273
274 unsigned maxpacket;
275 struct timer_list delay;
276
277 unsigned long data[5];
278 struct mii_if_info mii;
279
280 int link_on;
281 u8 mdix_ctrl;
282};
283
284/* use ethtool to change the level for any given device */
285static int msg_level = -1;
286module_param(msg_level, int, 0);
287MODULE_PARM_DESC(msg_level, "Override default message level");
288
289static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
290{
291 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
292 int ret;
293
294 BUG_ON(!dev);
295
296 if (!buf)
297 return -ENOMEM;
298
299 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
300 USB_VENDOR_REQUEST_READ_REGISTER,
301 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
302 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
303 if (likely(ret >= 0)) {
304 le32_to_cpus(buf);
305 *data = *buf;
306 } else {
307 netdev_warn(dev->net,
308 "Failed to read register index 0x%08x. ret = %d",
309 index, ret);
310 }
311
312 kfree(buf);
313
314 return ret;
315}
316
317static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
318{
319 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
320 int ret;
321
322 BUG_ON(!dev);
323
324 if (!buf)
325 return -ENOMEM;
326
327 *buf = data;
328 cpu_to_le32s(buf);
329
330 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
331 USB_VENDOR_REQUEST_WRITE_REGISTER,
332 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
333 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
334 if (unlikely(ret < 0)) {
335 netdev_warn(dev->net,
336 "Failed to write register index 0x%08x. ret = %d",
337 index, ret);
338 }
339
340 kfree(buf);
341
342 return ret;
343}
344
345static int lan78xx_read_stats(struct lan78xx_net *dev,
346 struct lan78xx_statstage *data)
347{
348 int ret = 0;
349 int i;
350 struct lan78xx_statstage *stats;
351 u32 *src;
352 u32 *dst;
353
354 BUG_ON(!dev);
355 BUG_ON(!data);
356 BUG_ON(sizeof(struct lan78xx_statstage) != 0xBC);
357
358 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
359 if (!stats)
360 return -ENOMEM;
361
362 ret = usb_control_msg(dev->udev,
363 usb_rcvctrlpipe(dev->udev, 0),
364 USB_VENDOR_REQUEST_GET_STATS,
365 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
366 0,
367 0,
368 (void *)stats,
369 sizeof(*stats),
370 USB_CTRL_SET_TIMEOUT);
371 if (likely(ret >= 0)) {
372 src = (u32 *)stats;
373 dst = (u32 *)data;
374 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
375 le32_to_cpus(&src[i]);
376 dst[i] = src[i];
377 }
378 } else {
379 netdev_warn(dev->net,
380 "Failed to read stat ret = 0x%x", ret);
381 }
382
383 kfree(stats);
384
385 return ret;
386}
387
388/* Loop until the read is completed with timeout called with phy_mutex held */
389static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
390{
391 unsigned long start_time = jiffies;
392 u32 val;
393 int ret;
394
395 do {
396 ret = lan78xx_read_reg(dev, MII_ACC, &val);
397 if (unlikely(ret < 0))
398 return -EIO;
399
400 if (!(val & MII_ACC_MII_BUSY_))
401 return 0;
402 } while (!time_after(jiffies, start_time + HZ));
403
404 return -EIO;
405}
406
407static inline u32 mii_access(int id, int index, int read)
408{
409 u32 ret;
410
411 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
412 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
413 if (read)
414 ret |= MII_ACC_MII_READ_;
415 else
416 ret |= MII_ACC_MII_WRITE_;
417 ret |= MII_ACC_MII_BUSY_;
418
419 return ret;
420}
421
422static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
423{
424 struct lan78xx_net *dev = netdev_priv(netdev);
425 u32 val, addr;
426 int ret;
427
428 ret = usb_autopm_get_interface(dev->intf);
429 if (ret < 0)
430 return ret;
431
432 mutex_lock(&dev->phy_mutex);
433
434 /* confirm MII not busy */
435 ret = lan78xx_phy_wait_not_busy(dev);
436 if (ret < 0)
437 goto done;
438
439 /* set the address, index & direction (read from PHY) */
440 phy_id &= dev->mii.phy_id_mask;
441 idx &= dev->mii.reg_num_mask;
442 addr = mii_access(phy_id, idx, MII_READ);
443 ret = lan78xx_write_reg(dev, MII_ACC, addr);
444
445 ret = lan78xx_phy_wait_not_busy(dev);
446 if (ret < 0)
447 goto done;
448
449 ret = lan78xx_read_reg(dev, MII_DATA, &val);
450
451 ret = (int)(val & 0xFFFF);
452
453done:
454 mutex_unlock(&dev->phy_mutex);
455 usb_autopm_put_interface(dev->intf);
456 return ret;
457}
458
459static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
460 int idx, int regval)
461{
462 struct lan78xx_net *dev = netdev_priv(netdev);
463 u32 val, addr;
464 int ret;
465
466 if (usb_autopm_get_interface(dev->intf) < 0)
467 return;
468
469 mutex_lock(&dev->phy_mutex);
470
471 /* confirm MII not busy */
472 ret = lan78xx_phy_wait_not_busy(dev);
473 if (ret < 0)
474 goto done;
475
476 val = regval;
477 ret = lan78xx_write_reg(dev, MII_DATA, val);
478
479 /* set the address, index & direction (write to PHY) */
480 phy_id &= dev->mii.phy_id_mask;
481 idx &= dev->mii.reg_num_mask;
482 addr = mii_access(phy_id, idx, MII_WRITE);
483 ret = lan78xx_write_reg(dev, MII_ACC, addr);
484
485 ret = lan78xx_phy_wait_not_busy(dev);
486 if (ret < 0)
487 goto done;
488
489done:
490 mutex_unlock(&dev->phy_mutex);
491 usb_autopm_put_interface(dev->intf);
492}
493
494static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
495 int mmddev, int mmdidx, int regval)
496{
497 struct lan78xx_net *dev = netdev_priv(netdev);
498 u32 val, addr;
499 int ret;
500
501 if (usb_autopm_get_interface(dev->intf) < 0)
502 return;
503
504 mutex_lock(&dev->phy_mutex);
505
506 /* confirm MII not busy */
507 ret = lan78xx_phy_wait_not_busy(dev);
508 if (ret < 0)
509 goto done;
510
511 mmddev &= 0x1F;
512
513 /* set up device address for MMD */
514 ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
515
516 phy_id &= dev->mii.phy_id_mask;
517 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
518 ret = lan78xx_write_reg(dev, MII_ACC, addr);
519
520 ret = lan78xx_phy_wait_not_busy(dev);
521 if (ret < 0)
522 goto done;
523
524 /* select register of MMD */
525 val = mmdidx;
526 ret = lan78xx_write_reg(dev, MII_DATA, val);
527
528 phy_id &= dev->mii.phy_id_mask;
529 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
530 ret = lan78xx_write_reg(dev, MII_ACC, addr);
531
532 ret = lan78xx_phy_wait_not_busy(dev);
533 if (ret < 0)
534 goto done;
535
536 /* select register data for MMD */
537 val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
538 ret = lan78xx_write_reg(dev, MII_DATA, val);
539
540 phy_id &= dev->mii.phy_id_mask;
541 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
542 ret = lan78xx_write_reg(dev, MII_ACC, addr);
543
544 ret = lan78xx_phy_wait_not_busy(dev);
545 if (ret < 0)
546 goto done;
547
548 /* write to MMD */
549 val = regval;
550 ret = lan78xx_write_reg(dev, MII_DATA, val);
551
552 phy_id &= dev->mii.phy_id_mask;
553 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
554 ret = lan78xx_write_reg(dev, MII_ACC, addr);
555
556 ret = lan78xx_phy_wait_not_busy(dev);
557 if (ret < 0)
558 goto done;
559
560done:
561 mutex_unlock(&dev->phy_mutex);
562 usb_autopm_put_interface(dev->intf);
563}
564
565static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
566 int mmddev, int mmdidx)
567{
568 struct lan78xx_net *dev = netdev_priv(netdev);
569 u32 val, addr;
570 int ret;
571
572 ret = usb_autopm_get_interface(dev->intf);
573 if (ret < 0)
574 return ret;
575
576 mutex_lock(&dev->phy_mutex);
577
578 /* confirm MII not busy */
579 ret = lan78xx_phy_wait_not_busy(dev);
580 if (ret < 0)
581 goto done;
582
583 /* set up device address for MMD */
584 ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
585
586 phy_id &= dev->mii.phy_id_mask;
587 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
588 ret = lan78xx_write_reg(dev, MII_ACC, addr);
589
590 ret = lan78xx_phy_wait_not_busy(dev);
591 if (ret < 0)
592 goto done;
593
594 /* select register of MMD */
595 val = mmdidx;
596 ret = lan78xx_write_reg(dev, MII_DATA, val);
597
598 phy_id &= dev->mii.phy_id_mask;
599 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
600 ret = lan78xx_write_reg(dev, MII_ACC, addr);
601
602 ret = lan78xx_phy_wait_not_busy(dev);
603 if (ret < 0)
604 goto done;
605
606 /* select register data for MMD */
607 val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
608 ret = lan78xx_write_reg(dev, MII_DATA, val);
609
610 phy_id &= dev->mii.phy_id_mask;
611 addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
612 ret = lan78xx_write_reg(dev, MII_ACC, addr);
613
614 ret = lan78xx_phy_wait_not_busy(dev);
615 if (ret < 0)
616 goto done;
617
618 /* set the address, index & direction (read from PHY) */
619 phy_id &= dev->mii.phy_id_mask;
620 addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
621 ret = lan78xx_write_reg(dev, MII_ACC, addr);
622
623 ret = lan78xx_phy_wait_not_busy(dev);
624 if (ret < 0)
625 goto done;
626
627 /* read from MMD */
628 ret = lan78xx_read_reg(dev, MII_DATA, &val);
629
630 ret = (int)(val & 0xFFFF);
631
632done:
633 mutex_unlock(&dev->phy_mutex);
634 usb_autopm_put_interface(dev->intf);
635 return ret;
636}
637
638static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
639{
640 unsigned long start_time = jiffies;
641 u32 val;
642 int ret;
643
644 do {
645 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
646 if (unlikely(ret < 0))
647 return -EIO;
648
649 if (!(val & E2P_CMD_EPC_BUSY_) ||
650 (val & E2P_CMD_EPC_TIMEOUT_))
651 break;
652 usleep_range(40, 100);
653 } while (!time_after(jiffies, start_time + HZ));
654
655 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
656 netdev_warn(dev->net, "EEPROM read operation timeout");
657 return -EIO;
658 }
659
660 return 0;
661}
662
663static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
664{
665 unsigned long start_time = jiffies;
666 u32 val;
667 int ret;
668
669 do {
670 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
671 if (unlikely(ret < 0))
672 return -EIO;
673
674 if (!(val & E2P_CMD_EPC_BUSY_))
675 return 0;
676
677 usleep_range(40, 100);
678 } while (!time_after(jiffies, start_time + HZ));
679
680 netdev_warn(dev->net, "EEPROM is busy");
681 return -EIO;
682}
683
684static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
685 u32 length, u8 *data)
686{
687 u32 val;
688 int i, ret;
689
690 BUG_ON(!dev);
691 BUG_ON(!data);
692
693 ret = lan78xx_eeprom_confirm_not_busy(dev);
694 if (ret)
695 return ret;
696
697 for (i = 0; i < length; i++) {
698 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
699 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
700 ret = lan78xx_write_reg(dev, E2P_CMD, val);
701 if (unlikely(ret < 0))
702 return -EIO;
703
704 ret = lan78xx_wait_eeprom(dev);
705 if (ret < 0)
706 return ret;
707
708 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
709 if (unlikely(ret < 0))
710 return -EIO;
711
712 data[i] = val & 0xFF;
713 offset++;
714 }
715
716 return 0;
717}
718
719static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
720 u32 length, u8 *data)
721{
722 u8 sig;
723 int ret;
724
725 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
726 if ((ret == 0) && (sig == EEPROM_INDICATOR))
727 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
728 else
729 ret = -EINVAL;
730
731 return ret;
732}
733
734static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
735 u32 length, u8 *data)
736{
737 u32 val;
738 int i, ret;
739
740 BUG_ON(!dev);
741 BUG_ON(!data);
742
743 ret = lan78xx_eeprom_confirm_not_busy(dev);
744 if (ret)
745 return ret;
746
747 /* Issue write/erase enable command */
748 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
749 ret = lan78xx_write_reg(dev, E2P_CMD, val);
750 if (unlikely(ret < 0))
751 return -EIO;
752
753 ret = lan78xx_wait_eeprom(dev);
754 if (ret < 0)
755 return ret;
756
757 for (i = 0; i < length; i++) {
758 /* Fill data register */
759 val = data[i];
760 ret = lan78xx_write_reg(dev, E2P_DATA, val);
761 if (ret < 0)
762 return ret;
763
764 /* Send "write" command */
765 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
766 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
767 ret = lan78xx_write_reg(dev, E2P_CMD, val);
768 if (ret < 0)
769 return ret;
770
771 ret = lan78xx_wait_eeprom(dev);
772 if (ret < 0)
773 return ret;
774
775 offset++;
776 }
777
778 return 0;
779}
780
781static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
782 u32 length, u8 *data)
783{
784 int i;
785 int ret;
786 u32 buf;
787 unsigned long timeout;
788
789 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
790
791 if (buf & OTP_PWR_DN_PWRDN_N_) {
792 /* clear it and wait to be cleared */
793 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
794
795 timeout = jiffies + HZ;
796 do {
797 usleep_range(1, 10);
798 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
799 if (time_after(jiffies, timeout)) {
800 netdev_warn(dev->net,
801 "timeout on OTP_PWR_DN");
802 return -EIO;
803 }
804 } while (buf & OTP_PWR_DN_PWRDN_N_);
805 }
806
807 for (i = 0; i < length; i++) {
808 ret = lan78xx_write_reg(dev, OTP_ADDR1,
809 ((offset + i) >> 8) & OTP_ADDR1_15_11);
810 ret = lan78xx_write_reg(dev, OTP_ADDR2,
811 ((offset + i) & OTP_ADDR2_10_3));
812
813 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
814 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
815
816 timeout = jiffies + HZ;
817 do {
818 udelay(1);
819 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
820 if (time_after(jiffies, timeout)) {
821 netdev_warn(dev->net,
822 "timeout on OTP_STATUS");
823 return -EIO;
824 }
825 } while (buf & OTP_STATUS_BUSY_);
826
827 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
828
829 data[i] = (u8)(buf & 0xFF);
830 }
831
832 return 0;
833}
834
835static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
836 u32 length, u8 *data)
837{
838 u8 sig;
839 int ret;
840
841 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
842
843 if (ret == 0) {
844 if (sig == OTP_INDICATOR_1)
845 offset = offset;
846 else if (sig == OTP_INDICATOR_2)
847 offset += 0x100;
848 else
849 ret = -EINVAL;
850 ret = lan78xx_read_raw_otp(dev, offset, length, data);
851 }
852
853 return ret;
854}
855
856static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
857{
858 int i, ret;
859
860 for (i = 0; i < 100; i++) {
861 u32 dp_sel;
862
863 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
864 if (unlikely(ret < 0))
865 return -EIO;
866
867 if (dp_sel & DP_SEL_DPRDY_)
868 return 0;
869
870 usleep_range(40, 100);
871 }
872
873 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
874
875 return -EIO;
876}
877
878static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
879 u32 addr, u32 length, u32 *buf)
880{
881 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
882 u32 dp_sel;
883 int i, ret;
884
885 if (usb_autopm_get_interface(dev->intf) < 0)
886 return 0;
887
888 mutex_lock(&pdata->dataport_mutex);
889
890 ret = lan78xx_dataport_wait_not_busy(dev);
891 if (ret < 0)
892 goto done;
893
894 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
895
896 dp_sel &= ~DP_SEL_RSEL_MASK_;
897 dp_sel |= ram_select;
898 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
899
900 for (i = 0; i < length; i++) {
901 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
902
903 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
904
905 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
906
907 ret = lan78xx_dataport_wait_not_busy(dev);
908 if (ret < 0)
909 goto done;
910 }
911
912done:
913 mutex_unlock(&pdata->dataport_mutex);
914 usb_autopm_put_interface(dev->intf);
915
916 return ret;
917}
918
919static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
920 int index, u8 addr[ETH_ALEN])
921{
922 u32 temp;
923
924 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
925 temp = addr[3];
926 temp = addr[2] | (temp << 8);
927 temp = addr[1] | (temp << 8);
928 temp = addr[0] | (temp << 8);
929 pdata->pfilter_table[index][1] = temp;
930 temp = addr[5];
931 temp = addr[4] | (temp << 8);
932 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
933 pdata->pfilter_table[index][0] = temp;
934 }
935}
936
937/* returns hash bit number for given MAC address */
938static inline u32 lan78xx_hash(char addr[ETH_ALEN])
939{
940 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
941}
942
943static void lan78xx_deferred_multicast_write(struct work_struct *param)
944{
945 struct lan78xx_priv *pdata =
946 container_of(param, struct lan78xx_priv, set_multicast);
947 struct lan78xx_net *dev = pdata->dev;
948 int i;
949 int ret;
950
951 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
952 pdata->rfe_ctl);
953
954 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
955 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
956
957 for (i = 1; i < NUM_OF_MAF; i++) {
958 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
959 ret = lan78xx_write_reg(dev, MAF_LO(i),
960 pdata->pfilter_table[i][1]);
961 ret = lan78xx_write_reg(dev, MAF_HI(i),
962 pdata->pfilter_table[i][0]);
963 }
964
965 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
966}
967
968static void lan78xx_set_multicast(struct net_device *netdev)
969{
970 struct lan78xx_net *dev = netdev_priv(netdev);
971 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
972 unsigned long flags;
973 int i;
974
975 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
976
977 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
978 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
979
980 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
981 pdata->mchash_table[i] = 0;
982 /* pfilter_table[0] has own HW address */
983 for (i = 1; i < NUM_OF_MAF; i++) {
984 pdata->pfilter_table[i][0] =
985 pdata->pfilter_table[i][1] = 0;
986 }
987
988 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
989
990 if (dev->net->flags & IFF_PROMISC) {
991 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
992 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
993 } else {
994 if (dev->net->flags & IFF_ALLMULTI) {
995 netif_dbg(dev, drv, dev->net,
996 "receive all multicast enabled");
997 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
998 }
999 }
1000
1001 if (netdev_mc_count(dev->net)) {
1002 struct netdev_hw_addr *ha;
1003 int i;
1004
1005 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1006
1007 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1008
1009 i = 1;
1010 netdev_for_each_mc_addr(ha, netdev) {
1011 /* set first 32 into Perfect Filter */
1012 if (i < 33) {
1013 lan78xx_set_addr_filter(pdata, i, ha->addr);
1014 } else {
1015 u32 bitnum = lan78xx_hash(ha->addr);
1016
1017 pdata->mchash_table[bitnum / 32] |=
1018 (1 << (bitnum % 32));
1019 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1020 }
1021 i++;
1022 }
1023 }
1024
1025 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1026
1027 /* defer register writes to a sleepable context */
1028 schedule_work(&pdata->set_multicast);
1029}
1030
1031static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1032 u16 lcladv, u16 rmtadv)
1033{
1034 u32 flow = 0, fct_flow = 0;
1035 int ret;
1036
1037 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1038
1039 if (cap & FLOW_CTRL_TX)
1040 flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
1041
1042 if (cap & FLOW_CTRL_RX)
1043 flow |= FLOW_CR_RX_FCEN_;
1044
1045 if (dev->udev->speed == USB_SPEED_SUPER)
1046 fct_flow = 0x817;
1047 else if (dev->udev->speed == USB_SPEED_HIGH)
1048 fct_flow = 0x211;
1049
1050 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1051 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1052 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1053
1054 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1055
1056 /* threshold value should be set before enabling flow */
1057 ret = lan78xx_write_reg(dev, FLOW, flow);
1058
1059 return 0;
1060}
1061
1062static int lan78xx_link_reset(struct lan78xx_net *dev)
1063{
1064 struct mii_if_info *mii = &dev->mii;
1065 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1066 u16 ladv, radv;
1067 int ret;
1068 u32 buf;
1069
1070 /* clear PHY interrupt status */
1071 /* VTSE PHY */
1072 ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
1073 if (unlikely(ret < 0))
1074 return -EIO;
1075
1076 /* clear LAN78xx interrupt status */
1077 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1078 if (unlikely(ret < 0))
1079 return -EIO;
1080
1081 if (!mii_link_ok(mii) && dev->link_on) {
1082 dev->link_on = false;
1083 netif_carrier_off(dev->net);
1084
1085 /* reset MAC */
1086 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1087 if (unlikely(ret < 0))
1088 return -EIO;
1089 buf |= MAC_CR_RST_;
1090 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1091 if (unlikely(ret < 0))
1092 return -EIO;
1093 } else if (mii_link_ok(mii) && !dev->link_on) {
1094 dev->link_on = true;
1095
1096 mii_check_media(mii, 1, 1);
1097 mii_ethtool_gset(&dev->mii, &ecmd);
1098
1099 mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1100
1101 if (dev->udev->speed == USB_SPEED_SUPER) {
1102 if (ethtool_cmd_speed(&ecmd) == 1000) {
1103 /* disable U2 */
1104 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1105 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1106 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1107 /* enable U1 */
1108 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1109 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1110 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1111 } else {
1112 /* enable U1 & U2 */
1113 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1114 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1115 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1116 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1117 }
1118 }
1119
1120 ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
1121 if (unlikely(ladv < 0))
1122 return -EIO;
1123
1124 radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
1125 if (unlikely(radv < 0))
1126 return -EIO;
1127
1128 netif_dbg(dev, link, dev->net,
1129 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1130 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1131
1132 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1133 netif_carrier_on(dev->net);
1134 }
1135
1136 return ret;
1137}
1138
1139/* some work can't be done in tasklets, so we use keventd
1140 *
1141 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1142 * but tasklet_schedule() doesn't. hope the failure is rare.
1143 */
1144void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1145{
1146 set_bit(work, &dev->flags);
1147 if (!schedule_delayed_work(&dev->wq, 0))
1148 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1149}
1150
1151static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1152{
1153 u32 intdata;
1154
1155 if (urb->actual_length != 4) {
1156 netdev_warn(dev->net,
1157 "unexpected urb length %d", urb->actual_length);
1158 return;
1159 }
1160
1161 memcpy(&intdata, urb->transfer_buffer, 4);
1162 le32_to_cpus(&intdata);
1163
1164 if (intdata & INT_ENP_PHY_INT) {
1165 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1166 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1167 } else
1168 netdev_warn(dev->net,
1169 "unexpected interrupt: 0x%08x\n", intdata);
1170}
1171
1172static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1173{
1174 return MAX_EEPROM_SIZE;
1175}
1176
1177static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1178 struct ethtool_eeprom *ee, u8 *data)
1179{
1180 struct lan78xx_net *dev = netdev_priv(netdev);
1181
1182 ee->magic = LAN78XX_EEPROM_MAGIC;
1183
1184 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1185}
1186
1187static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1188 struct ethtool_eeprom *ee, u8 *data)
1189{
1190 struct lan78xx_net *dev = netdev_priv(netdev);
1191
1192 /* Allow entire eeprom update only */
1193 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1194 (ee->offset == 0) &&
1195 (ee->len == 512) &&
1196 (data[0] == EEPROM_INDICATOR))
1197 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1198 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1199 (ee->offset == 0) &&
1200 (ee->len == 512) &&
1201 (data[0] == OTP_INDICATOR_1))
1202 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1203
1204 return -EINVAL;
1205}
1206
1207static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1208 u8 *data)
1209{
1210 if (stringset == ETH_SS_STATS)
1211 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1212}
1213
1214static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1215{
1216 if (sset == ETH_SS_STATS)
1217 return ARRAY_SIZE(lan78xx_gstrings);
1218 else
1219 return -EOPNOTSUPP;
1220}
1221
1222static void lan78xx_get_stats(struct net_device *netdev,
1223 struct ethtool_stats *stats, u64 *data)
1224{
1225 struct lan78xx_net *dev = netdev_priv(netdev);
1226 struct lan78xx_statstage lan78xx_stat;
1227 u32 *p;
1228 int i;
1229
1230 if (usb_autopm_get_interface(dev->intf) < 0)
1231 return;
1232
1233 if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1234 p = (u32 *)&lan78xx_stat;
1235 for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1236 data[i] = p[i];
1237 }
1238
1239 usb_autopm_put_interface(dev->intf);
1240}
1241
1242static void lan78xx_get_wol(struct net_device *netdev,
1243 struct ethtool_wolinfo *wol)
1244{
1245 struct lan78xx_net *dev = netdev_priv(netdev);
1246 int ret;
1247 u32 buf;
1248 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1249
1250 if (usb_autopm_get_interface(dev->intf) < 0)
1251 return;
1252
1253 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1254 if (unlikely(ret < 0)) {
1255 wol->supported = 0;
1256 wol->wolopts = 0;
1257 } else {
1258 if (buf & USB_CFG_RMT_WKP_) {
1259 wol->supported = WAKE_ALL;
1260 wol->wolopts = pdata->wol;
1261 } else {
1262 wol->supported = 0;
1263 wol->wolopts = 0;
1264 }
1265 }
1266
1267 usb_autopm_put_interface(dev->intf);
1268}
1269
1270static int lan78xx_set_wol(struct net_device *netdev,
1271 struct ethtool_wolinfo *wol)
1272{
1273 struct lan78xx_net *dev = netdev_priv(netdev);
1274 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1275 int ret;
1276
1277 ret = usb_autopm_get_interface(dev->intf);
1278 if (ret < 0)
1279 return ret;
1280
1281 pdata->wol = 0;
1282 if (wol->wolopts & WAKE_UCAST)
1283 pdata->wol |= WAKE_UCAST;
1284 if (wol->wolopts & WAKE_MCAST)
1285 pdata->wol |= WAKE_MCAST;
1286 if (wol->wolopts & WAKE_BCAST)
1287 pdata->wol |= WAKE_BCAST;
1288 if (wol->wolopts & WAKE_MAGIC)
1289 pdata->wol |= WAKE_MAGIC;
1290 if (wol->wolopts & WAKE_PHY)
1291 pdata->wol |= WAKE_PHY;
1292 if (wol->wolopts & WAKE_ARP)
1293 pdata->wol |= WAKE_ARP;
1294
1295 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1296
1297 usb_autopm_put_interface(dev->intf);
1298
1299 return ret;
1300}
1301
1302static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1303{
1304 struct lan78xx_net *dev = netdev_priv(net);
1305 int ret;
1306 u32 buf;
1307 u32 adv, lpadv;
1308
1309 ret = usb_autopm_get_interface(dev->intf);
1310 if (ret < 0)
1311 return ret;
1312
1313 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1314 if (buf & MAC_CR_EEE_EN_) {
1315 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1316 PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
1317 adv = mmd_eee_adv_to_ethtool_adv_t(buf);
1318 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1319 PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1320 lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1321
1322 edata->eee_enabled = true;
1323 edata->supported = true;
1324 edata->eee_active = !!(adv & lpadv);
1325 edata->advertised = adv;
1326 edata->lp_advertised = lpadv;
1327 edata->tx_lpi_enabled = true;
1328 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1329 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1330 edata->tx_lpi_timer = buf;
1331 } else {
1332 buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
1333 PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
1334 lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
1335
1336 edata->eee_enabled = false;
1337 edata->eee_active = false;
1338 edata->supported = false;
1339 edata->advertised = 0;
1340 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
1341 edata->tx_lpi_enabled = false;
1342 edata->tx_lpi_timer = 0;
1343 }
1344
1345 usb_autopm_put_interface(dev->intf);
1346
1347 return 0;
1348}
1349
1350static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1351{
1352 struct lan78xx_net *dev = netdev_priv(net);
1353 int ret;
1354 u32 buf;
1355
1356 ret = usb_autopm_get_interface(dev->intf);
1357 if (ret < 0)
1358 return ret;
1359
1360 if (edata->eee_enabled) {
1361 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1362 buf |= MAC_CR_EEE_EN_;
1363 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1364
1365 buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
1366 lan78xx_mmd_write(dev->net, dev->mii.phy_id,
1367 PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
1368 } else {
1369 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1370 buf &= ~MAC_CR_EEE_EN_;
1371 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1372 }
1373
1374 usb_autopm_put_interface(dev->intf);
1375
1376 return 0;
1377}
1378
1379static u32 lan78xx_get_link(struct net_device *net)
1380{
1381 struct lan78xx_net *dev = netdev_priv(net);
1382
1383 return mii_link_ok(&dev->mii);
1384}
1385
1386int lan78xx_nway_reset(struct net_device *net)
1387{
1388 struct lan78xx_net *dev = netdev_priv(net);
1389
1390 if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1391 return -EOPNOTSUPP;
1392
1393 return mii_nway_restart(&dev->mii);
1394}
1395
1396static void lan78xx_get_drvinfo(struct net_device *net,
1397 struct ethtool_drvinfo *info)
1398{
1399 struct lan78xx_net *dev = netdev_priv(net);
1400
1401 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1402 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1403 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1404}
1405
1406static u32 lan78xx_get_msglevel(struct net_device *net)
1407{
1408 struct lan78xx_net *dev = netdev_priv(net);
1409
1410 return dev->msg_enable;
1411}
1412
1413static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1414{
1415 struct lan78xx_net *dev = netdev_priv(net);
1416
1417 dev->msg_enable = level;
1418}
1419
1420static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1421{
1422 struct lan78xx_net *dev = netdev_priv(net);
1423 struct mii_if_info *mii = &dev->mii;
1424 int ret;
1425 int buf;
1426
1427 if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1428 return -EOPNOTSUPP;
1429
1430 ret = usb_autopm_get_interface(dev->intf);
1431 if (ret < 0)
1432 return ret;
1433
1434 ret = mii_ethtool_gset(&dev->mii, cmd);
1435
1436 mii->mdio_write(mii->dev, mii->phy_id,
1437 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1438 buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1439 mii->mdio_write(mii->dev, mii->phy_id,
1440 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1441
1442 buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
1443 if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
1444 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1445 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1446 } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
1447 cmd->eth_tp_mdix = ETH_TP_MDI;
1448 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1449 } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
1450 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1451 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1452 }
1453
1454 usb_autopm_put_interface(dev->intf);
1455
1456 return ret;
1457}
1458
1459static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1460{
1461 struct lan78xx_net *dev = netdev_priv(net);
1462 struct mii_if_info *mii = &dev->mii;
1463 int ret = 0;
1464 int temp;
1465
1466 if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
1467 return -EOPNOTSUPP;
1468
1469 ret = usb_autopm_get_interface(dev->intf);
1470 if (ret < 0)
1471 return ret;
1472
1473 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1474 if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
1475 mii->mdio_write(mii->dev, mii->phy_id,
1476 PHY_EXT_GPIO_PAGE,
1477 PHY_EXT_GPIO_PAGE_SPACE_1);
1478 temp = mii->mdio_read(mii->dev, mii->phy_id,
1479 PHY_EXT_MODE_CTRL);
1480 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1481 mii->mdio_write(mii->dev, mii->phy_id,
1482 PHY_EXT_MODE_CTRL,
1483 temp | PHY_EXT_MODE_CTRL_MDI_);
1484 mii->mdio_write(mii->dev, mii->phy_id,
1485 PHY_EXT_GPIO_PAGE,
1486 PHY_EXT_GPIO_PAGE_SPACE_0);
1487 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
1488 mii->mdio_write(mii->dev, mii->phy_id,
1489 PHY_EXT_GPIO_PAGE,
1490 PHY_EXT_GPIO_PAGE_SPACE_1);
1491 temp = mii->mdio_read(mii->dev, mii->phy_id,
1492 PHY_EXT_MODE_CTRL);
1493 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1494 mii->mdio_write(mii->dev, mii->phy_id,
1495 PHY_EXT_MODE_CTRL,
1496 temp | PHY_EXT_MODE_CTRL_MDI_X_);
1497 mii->mdio_write(mii->dev, mii->phy_id,
1498 PHY_EXT_GPIO_PAGE,
1499 PHY_EXT_GPIO_PAGE_SPACE_0);
1500 } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
1501 mii->mdio_write(mii->dev, mii->phy_id,
1502 PHY_EXT_GPIO_PAGE,
1503 PHY_EXT_GPIO_PAGE_SPACE_1);
1504 temp = mii->mdio_read(mii->dev, mii->phy_id,
1505 PHY_EXT_MODE_CTRL);
1506 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1507 mii->mdio_write(mii->dev, mii->phy_id,
1508 PHY_EXT_MODE_CTRL,
1509 temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1510 mii->mdio_write(mii->dev, mii->phy_id,
1511 PHY_EXT_GPIO_PAGE,
1512 PHY_EXT_GPIO_PAGE_SPACE_0);
1513 }
1514 }
1515
1516 /* change speed & duplex */
1517 ret = mii_ethtool_sset(&dev->mii, cmd);
1518
1519 if (!cmd->autoneg) {
1520 /* force link down */
1521 temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
1522 mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
1523 temp | BMCR_LOOPBACK);
1524 mdelay(1);
1525 mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
1526 }
1527
1528 usb_autopm_put_interface(dev->intf);
1529
1530 return ret;
1531}
1532
1533static const struct ethtool_ops lan78xx_ethtool_ops = {
1534 .get_link = lan78xx_get_link,
1535 .nway_reset = lan78xx_nway_reset,
1536 .get_drvinfo = lan78xx_get_drvinfo,
1537 .get_msglevel = lan78xx_get_msglevel,
1538 .set_msglevel = lan78xx_set_msglevel,
1539 .get_settings = lan78xx_get_settings,
1540 .set_settings = lan78xx_set_settings,
1541 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1542 .get_eeprom = lan78xx_ethtool_get_eeprom,
1543 .set_eeprom = lan78xx_ethtool_set_eeprom,
1544 .get_ethtool_stats = lan78xx_get_stats,
1545 .get_sset_count = lan78xx_get_sset_count,
1546 .get_strings = lan78xx_get_strings,
1547 .get_wol = lan78xx_get_wol,
1548 .set_wol = lan78xx_set_wol,
1549 .get_eee = lan78xx_get_eee,
1550 .set_eee = lan78xx_set_eee,
1551};
1552
1553static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1554{
1555 struct lan78xx_net *dev = netdev_priv(netdev);
1556
1557 if (!netif_running(netdev))
1558 return -EINVAL;
1559
1560 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
1561}
1562
1563static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1564{
1565 u32 addr_lo, addr_hi;
1566 int ret;
1567 u8 addr[6];
1568
1569 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1570 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1571
1572 addr[0] = addr_lo & 0xFF;
1573 addr[1] = (addr_lo >> 8) & 0xFF;
1574 addr[2] = (addr_lo >> 16) & 0xFF;
1575 addr[3] = (addr_lo >> 24) & 0xFF;
1576 addr[4] = addr_hi & 0xFF;
1577 addr[5] = (addr_hi >> 8) & 0xFF;
1578
1579 if (!is_valid_ether_addr(addr)) {
1580 /* reading mac address from EEPROM or OTP */
1581 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1582 addr) == 0) ||
1583 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1584 addr) == 0)) {
1585 if (is_valid_ether_addr(addr)) {
1586 /* eeprom values are valid so use them */
1587 netif_dbg(dev, ifup, dev->net,
1588 "MAC address read from EEPROM");
1589 } else {
1590 /* generate random MAC */
1591 random_ether_addr(addr);
1592 netif_dbg(dev, ifup, dev->net,
1593 "MAC address set to random addr");
1594 }
1595
1596 addr_lo = addr[0] | (addr[1] << 8) |
1597 (addr[2] << 16) | (addr[3] << 24);
1598 addr_hi = addr[4] | (addr[5] << 8);
1599
1600 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1601 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1602 } else {
1603 /* generate random MAC */
1604 random_ether_addr(addr);
1605 netif_dbg(dev, ifup, dev->net,
1606 "MAC address set to random addr");
1607 }
1608 }
1609
1610 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1611 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1612
1613 ether_addr_copy(dev->net->dev_addr, addr);
1614}
1615
1616static void lan78xx_mii_init(struct lan78xx_net *dev)
1617{
1618 /* Initialize MII structure */
1619 dev->mii.dev = dev->net;
1620 dev->mii.mdio_read = lan78xx_mdio_read;
1621 dev->mii.mdio_write = lan78xx_mdio_write;
1622 dev->mii.phy_id_mask = 0x1f;
1623 dev->mii.reg_num_mask = 0x1f;
1624 dev->mii.phy_id = INTERNAL_PHY_ID;
1625 dev->mii.supports_gmii = true;
1626}
1627
1628static int lan78xx_phy_init(struct lan78xx_net *dev)
1629{
1630 int temp;
1631 struct mii_if_info *mii = &dev->mii;
1632
1633 if ((!mii->mdio_write) || (!mii->mdio_read))
1634 return -EOPNOTSUPP;
1635
1636 temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
1637 temp |= ADVERTISE_ALL;
1638 mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
1639 temp | ADVERTISE_CSMA |
1640 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1641
1642 /* set to AUTOMDIX */
1643 mii->mdio_write(mii->dev, mii->phy_id,
1644 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
1645 temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
1646 temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
1647 mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
1648 temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
1649 mii->mdio_write(mii->dev, mii->phy_id,
1650 PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
1651 dev->mdix_ctrl = ETH_TP_MDI_AUTO;
1652
1653 /* MAC doesn't support 1000HD */
1654 temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
1655 mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
1656 temp & ~ADVERTISE_1000HALF);
1657
1658 /* clear interrupt */
1659 mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
1660 mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
1661 PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
1662 PHY_VTSE_INT_MASK_LINK_CHANGE_);
1663
1664 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1665
1666 return 0;
1667}
1668
1669static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1670{
1671 int ret = 0;
1672 u32 buf;
1673 bool rxenabled;
1674
1675 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1676
1677 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1678
1679 if (rxenabled) {
1680 buf &= ~MAC_RX_RXEN_;
1681 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1682 }
1683
1684 /* add 4 to size for FCS */
1685 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1686 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1687
1688 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1689
1690 if (rxenabled) {
1691 buf |= MAC_RX_RXEN_;
1692 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1693 }
1694
1695 return 0;
1696}
1697
1698static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1699{
1700 struct sk_buff *skb;
1701 unsigned long flags;
1702 int count = 0;
1703
1704 spin_lock_irqsave(&q->lock, flags);
1705 while (!skb_queue_empty(q)) {
1706 struct skb_data *entry;
1707 struct urb *urb;
1708 int ret;
1709
1710 skb_queue_walk(q, skb) {
1711 entry = (struct skb_data *)skb->cb;
1712 if (entry->state != unlink_start)
1713 goto found;
1714 }
1715 break;
1716found:
1717 entry->state = unlink_start;
1718 urb = entry->urb;
1719
1720 /* Get reference count of the URB to avoid it to be
1721 * freed during usb_unlink_urb, which may trigger
1722 * use-after-free problem inside usb_unlink_urb since
1723 * usb_unlink_urb is always racing with .complete
1724 * handler(include defer_bh).
1725 */
1726 usb_get_urb(urb);
1727 spin_unlock_irqrestore(&q->lock, flags);
1728 /* during some PM-driven resume scenarios,
1729 * these (async) unlinks complete immediately
1730 */
1731 ret = usb_unlink_urb(urb);
1732 if (ret != -EINPROGRESS && ret != 0)
1733 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1734 else
1735 count++;
1736 usb_put_urb(urb);
1737 spin_lock_irqsave(&q->lock, flags);
1738 }
1739 spin_unlock_irqrestore(&q->lock, flags);
1740 return count;
1741}
1742
1743static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1744{
1745 struct lan78xx_net *dev = netdev_priv(netdev);
1746 int ll_mtu = new_mtu + netdev->hard_header_len;
1747 int old_hard_mtu = dev->hard_mtu;
1748 int old_rx_urb_size = dev->rx_urb_size;
1749 int ret;
1750
1751 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1752 return -EINVAL;
1753
1754 if (new_mtu <= 0)
1755 return -EINVAL;
1756 /* no second zero-length packet read wanted after mtu-sized packets */
1757 if ((ll_mtu % dev->maxpacket) == 0)
1758 return -EDOM;
1759
1760 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1761
1762 netdev->mtu = new_mtu;
1763
1764 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1765 if (dev->rx_urb_size == old_hard_mtu) {
1766 dev->rx_urb_size = dev->hard_mtu;
1767 if (dev->rx_urb_size > old_rx_urb_size) {
1768 if (netif_running(dev->net)) {
1769 unlink_urbs(dev, &dev->rxq);
1770 tasklet_schedule(&dev->bh);
1771 }
1772 }
1773 }
1774
1775 return 0;
1776}
1777
1778int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1779{
1780 struct lan78xx_net *dev = netdev_priv(netdev);
1781 struct sockaddr *addr = p;
1782 u32 addr_lo, addr_hi;
1783 int ret;
1784
1785 if (netif_running(netdev))
1786 return -EBUSY;
1787
1788 if (!is_valid_ether_addr(addr->sa_data))
1789 return -EADDRNOTAVAIL;
1790
1791 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1792
1793 addr_lo = netdev->dev_addr[0] |
1794 netdev->dev_addr[1] << 8 |
1795 netdev->dev_addr[2] << 16 |
1796 netdev->dev_addr[3] << 24;
1797 addr_hi = netdev->dev_addr[4] |
1798 netdev->dev_addr[5] << 8;
1799
1800 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1801 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1802
1803 return 0;
1804}
1805
1806/* Enable or disable Rx checksum offload engine */
1807static int lan78xx_set_features(struct net_device *netdev,
1808 netdev_features_t features)
1809{
1810 struct lan78xx_net *dev = netdev_priv(netdev);
1811 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1812 unsigned long flags;
1813 int ret;
1814
1815 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1816
1817 if (features & NETIF_F_RXCSUM) {
1818 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1819 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1820 } else {
1821 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1822 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1823 }
1824
1825 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1826 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1827 else
1828 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1829
1830 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1831
1832 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1833
1834 return 0;
1835}
1836
1837static void lan78xx_deferred_vlan_write(struct work_struct *param)
1838{
1839 struct lan78xx_priv *pdata =
1840 container_of(param, struct lan78xx_priv, set_vlan);
1841 struct lan78xx_net *dev = pdata->dev;
1842
1843 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1844 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1845}
1846
1847static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1848 __be16 proto, u16 vid)
1849{
1850 struct lan78xx_net *dev = netdev_priv(netdev);
1851 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1852 u16 vid_bit_index;
1853 u16 vid_dword_index;
1854
1855 vid_dword_index = (vid >> 5) & 0x7F;
1856 vid_bit_index = vid & 0x1F;
1857
1858 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1859
1860 /* defer register writes to a sleepable context */
1861 schedule_work(&pdata->set_vlan);
1862
1863 return 0;
1864}
1865
1866static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1867 __be16 proto, u16 vid)
1868{
1869 struct lan78xx_net *dev = netdev_priv(netdev);
1870 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1871 u16 vid_bit_index;
1872 u16 vid_dword_index;
1873
1874 vid_dword_index = (vid >> 5) & 0x7F;
1875 vid_bit_index = vid & 0x1F;
1876
1877 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1878
1879 /* defer register writes to a sleepable context */
1880 schedule_work(&pdata->set_vlan);
1881
1882 return 0;
1883}
1884
1885static void lan78xx_init_ltm(struct lan78xx_net *dev)
1886{
1887 int ret;
1888 u32 buf;
1889 u32 regs[6] = { 0 };
1890
1891 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1892 if (buf & USB_CFG1_LTM_ENABLE_) {
1893 u8 temp[2];
1894 /* Get values from EEPROM first */
1895 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1896 if (temp[0] == 24) {
1897 ret = lan78xx_read_raw_eeprom(dev,
1898 temp[1] * 2,
1899 24,
1900 (u8 *)regs);
1901 if (ret < 0)
1902 return;
1903 }
1904 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1905 if (temp[0] == 24) {
1906 ret = lan78xx_read_raw_otp(dev,
1907 temp[1] * 2,
1908 24,
1909 (u8 *)regs);
1910 if (ret < 0)
1911 return;
1912 }
1913 }
1914 }
1915
1916 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1917 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1918 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1919 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1920 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1921 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1922}
1923
1924static int lan78xx_reset(struct lan78xx_net *dev)
1925{
1926 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1927 u32 buf;
1928 int ret = 0;
1929 unsigned long timeout;
1930
1931 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1932 buf |= HW_CFG_LRST_;
1933 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1934
1935 timeout = jiffies + HZ;
1936 do {
1937 mdelay(1);
1938 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1939 if (time_after(jiffies, timeout)) {
1940 netdev_warn(dev->net,
1941 "timeout on completion of LiteReset");
1942 return -EIO;
1943 }
1944 } while (buf & HW_CFG_LRST_);
1945
1946 lan78xx_init_mac_address(dev);
1947
1948 /* Respond to the IN token with a NAK */
1949 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1950 buf |= USB_CFG_BIR_;
1951 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1952
1953 /* Init LTM */
1954 lan78xx_init_ltm(dev);
1955
1956 dev->net->hard_header_len += TX_OVERHEAD;
1957 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1958
1959 if (dev->udev->speed == USB_SPEED_SUPER) {
1960 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1961 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1962 dev->rx_qlen = 4;
1963 dev->tx_qlen = 4;
1964 } else if (dev->udev->speed == USB_SPEED_HIGH) {
1965 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1966 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1967 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1968 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1969 } else {
1970 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1971 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1972 dev->rx_qlen = 4;
1973 }
1974
1975 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1976 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1977
1978 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1979 buf |= HW_CFG_MEF_;
1980 ret = lan78xx_write_reg(dev, HW_CFG, buf);
1981
1982 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1983 buf |= USB_CFG_BCE_;
1984 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1985
1986 /* set FIFO sizes */
1987 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1988 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1989
1990 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1991 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1992
1993 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1994 ret = lan78xx_write_reg(dev, FLOW, 0);
1995 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1996
1997 /* Don't need rfe_ctl_lock during initialisation */
1998 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1999 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2000 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2001
2002 /* Enable or disable checksum offload engines */
2003 lan78xx_set_features(dev->net, dev->net->features);
2004
2005 lan78xx_set_multicast(dev->net);
2006
2007 /* reset PHY */
2008 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2009 buf |= PMT_CTL_PHY_RST_;
2010 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2011
2012 timeout = jiffies + HZ;
2013 do {
2014 mdelay(1);
2015 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2016 if (time_after(jiffies, timeout)) {
2017 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2018 return -EIO;
2019 }
2020 } while (buf & PMT_CTL_PHY_RST_);
2021
2022 lan78xx_mii_init(dev);
2023
2024 ret = lan78xx_phy_init(dev);
2025
2026 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2027
2028 buf |= MAC_CR_GMII_EN_;
2029 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2030
2031 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2032
2033 /* enable on PHY */
2034 if (buf & MAC_CR_EEE_EN_)
2035 lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
2036
2037 /* enable PHY interrupts */
2038 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2039 buf |= INT_ENP_PHY_INT;
2040 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2041
2042 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2043 buf |= MAC_TX_TXEN_;
2044 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2045
2046 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2047 buf |= FCT_TX_CTL_EN_;
2048 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2049
2050 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2051
2052 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2053 buf |= MAC_RX_RXEN_;
2054 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2055
2056 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2057 buf |= FCT_RX_CTL_EN_;
2058 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2059
2060 if (!mii_nway_restart(&dev->mii))
2061 netif_dbg(dev, link, dev->net, "autoneg initiated");
2062
2063 return 0;
2064}
2065
2066static int lan78xx_open(struct net_device *net)
2067{
2068 struct lan78xx_net *dev = netdev_priv(net);
2069 int ret;
2070
2071 ret = usb_autopm_get_interface(dev->intf);
2072 if (ret < 0)
2073 goto out;
2074
2075 ret = lan78xx_reset(dev);
2076 if (ret < 0)
2077 goto done;
2078
2079 /* for Link Check */
2080 if (dev->urb_intr) {
2081 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2082 if (ret < 0) {
2083 netif_err(dev, ifup, dev->net,
2084 "intr submit %d\n", ret);
2085 goto done;
2086 }
2087 }
2088
2089 set_bit(EVENT_DEV_OPEN, &dev->flags);
2090
2091 netif_start_queue(net);
2092
2093 dev->link_on = false;
2094
2095 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2096done:
2097 usb_autopm_put_interface(dev->intf);
2098
2099out:
2100 return ret;
2101}
2102
2103static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2104{
2105 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2106 DECLARE_WAITQUEUE(wait, current);
2107 int temp;
2108
2109 /* ensure there are no more active urbs */
2110 add_wait_queue(&unlink_wakeup, &wait);
2111 set_current_state(TASK_UNINTERRUPTIBLE);
2112 dev->wait = &unlink_wakeup;
2113 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2114
2115 /* maybe wait for deletions to finish. */
2116 while (!skb_queue_empty(&dev->rxq) &&
2117 !skb_queue_empty(&dev->txq) &&
2118 !skb_queue_empty(&dev->done)) {
2119 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2120 set_current_state(TASK_UNINTERRUPTIBLE);
2121 netif_dbg(dev, ifdown, dev->net,
2122 "waited for %d urb completions\n", temp);
2123 }
2124 set_current_state(TASK_RUNNING);
2125 dev->wait = NULL;
2126 remove_wait_queue(&unlink_wakeup, &wait);
2127}
2128
2129int lan78xx_stop(struct net_device *net)
2130{
2131 struct lan78xx_net *dev = netdev_priv(net);
2132
2133 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2134 netif_stop_queue(net);
2135
2136 netif_info(dev, ifdown, dev->net,
2137 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2138 net->stats.rx_packets, net->stats.tx_packets,
2139 net->stats.rx_errors, net->stats.tx_errors);
2140
2141 lan78xx_terminate_urbs(dev);
2142
2143 usb_kill_urb(dev->urb_intr);
2144
2145 skb_queue_purge(&dev->rxq_pause);
2146
2147 /* deferred work (task, timer, softirq) must also stop.
2148 * can't flush_scheduled_work() until we drop rtnl (later),
2149 * else workers could deadlock; so make workers a NOP.
2150 */
2151 dev->flags = 0;
2152 cancel_delayed_work_sync(&dev->wq);
2153 tasklet_kill(&dev->bh);
2154
2155 usb_autopm_put_interface(dev->intf);
2156
2157 return 0;
2158}
2159
2160static int lan78xx_linearize(struct sk_buff *skb)
2161{
2162 return skb_linearize(skb);
2163}
2164
2165static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2166 struct sk_buff *skb, gfp_t flags)
2167{
2168 u32 tx_cmd_a, tx_cmd_b;
2169
2170 if (skb_headroom(skb) < TX_OVERHEAD) {
2171 struct sk_buff *skb2;
2172
2173 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2174 dev_kfree_skb_any(skb);
2175 skb = skb2;
2176 if (!skb)
2177 return NULL;
2178 }
2179
2180 if (lan78xx_linearize(skb) < 0)
2181 return NULL;
2182
2183 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2184
2185 if (skb->ip_summed == CHECKSUM_PARTIAL)
2186 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2187
2188 tx_cmd_b = 0;
2189 if (skb_is_gso(skb)) {
2190 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2191
2192 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2193
2194 tx_cmd_a |= TX_CMD_A_LSO_;
2195 }
2196
2197 if (skb_vlan_tag_present(skb)) {
2198 tx_cmd_a |= TX_CMD_A_IVTG_;
2199 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2200 }
2201
2202 skb_push(skb, 4);
2203 cpu_to_le32s(&tx_cmd_b);
2204 memcpy(skb->data, &tx_cmd_b, 4);
2205
2206 skb_push(skb, 4);
2207 cpu_to_le32s(&tx_cmd_a);
2208 memcpy(skb->data, &tx_cmd_a, 4);
2209
2210 return skb;
2211}
2212
2213static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2214 struct sk_buff_head *list, enum skb_state state)
2215{
2216 unsigned long flags;
2217 enum skb_state old_state;
2218 struct skb_data *entry = (struct skb_data *)skb->cb;
2219
2220 spin_lock_irqsave(&list->lock, flags);
2221 old_state = entry->state;
2222 entry->state = state;
2223 if (!list->prev)
2224 BUG_ON(!list->prev);
2225 if (!list->next)
2226 BUG_ON(!list->next);
2227 if (!skb->prev || !skb->next)
2228 BUG_ON(true);
2229
2230 __skb_unlink(skb, list);
2231 spin_unlock(&list->lock);
2232 spin_lock(&dev->done.lock);
2233 if (!dev->done.prev)
2234 BUG_ON(!dev->done.prev);
2235 if (!dev->done.next)
2236 BUG_ON(!dev->done.next);
2237
2238 __skb_queue_tail(&dev->done, skb);
2239 if (skb_queue_len(&dev->done) == 1)
2240 tasklet_schedule(&dev->bh);
2241 spin_unlock_irqrestore(&dev->done.lock, flags);
2242
2243 return old_state;
2244}
2245
2246static void tx_complete(struct urb *urb)
2247{
2248 struct sk_buff *skb = (struct sk_buff *)urb->context;
2249 struct skb_data *entry = (struct skb_data *)skb->cb;
2250 struct lan78xx_net *dev = entry->dev;
2251
2252 if (urb->status == 0) {
2253 dev->net->stats.tx_packets++;
2254 dev->net->stats.tx_bytes += entry->length;
2255 } else {
2256 dev->net->stats.tx_errors++;
2257
2258 switch (urb->status) {
2259 case -EPIPE:
2260 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2261 break;
2262
2263 /* software-driven interface shutdown */
2264 case -ECONNRESET:
2265 case -ESHUTDOWN:
2266 break;
2267
2268 case -EPROTO:
2269 case -ETIME:
2270 case -EILSEQ:
2271 netif_stop_queue(dev->net);
2272 break;
2273 default:
2274 netif_dbg(dev, tx_err, dev->net,
2275 "tx err %d\n", entry->urb->status);
2276 break;
2277 }
2278 }
2279
2280 usb_autopm_put_interface_async(dev->intf);
2281
2282 if (skb)
2283 defer_bh(dev, skb, &dev->txq, tx_done);
2284}
2285
2286static void lan78xx_queue_skb(struct sk_buff_head *list,
2287 struct sk_buff *newsk, enum skb_state state)
2288{
2289 struct skb_data *entry = (struct skb_data *)newsk->cb;
2290
2291 __skb_queue_tail(list, newsk);
2292 entry->state = state;
2293}
2294
2295netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2296{
2297 struct lan78xx_net *dev = netdev_priv(net);
2298
2299 if (skb)
2300 skb_tx_timestamp(skb);
2301
2302 skb = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2303 if (skb) {
2304 skb_queue_tail(&dev->txq_pend, skb);
2305
2306 if (skb_queue_len(&dev->txq_pend) > 10)
2307 netif_stop_queue(net);
2308 } else {
2309 netif_dbg(dev, tx_err, dev->net,
2310 "lan78xx_tx_prep return NULL\n");
2311 dev->net->stats.tx_errors++;
2312 dev->net->stats.tx_dropped++;
2313 }
2314
2315 tasklet_schedule(&dev->bh);
2316
2317 return NETDEV_TX_OK;
2318}
2319
2320int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2321{
2322 int tmp;
2323 struct usb_host_interface *alt = NULL;
2324 struct usb_host_endpoint *in = NULL, *out = NULL;
2325 struct usb_host_endpoint *status = NULL;
2326
2327 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2328 unsigned ep;
2329
2330 in = NULL;
2331 out = NULL;
2332 status = NULL;
2333 alt = intf->altsetting + tmp;
2334
2335 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2336 struct usb_host_endpoint *e;
2337 int intr = 0;
2338
2339 e = alt->endpoint + ep;
2340 switch (e->desc.bmAttributes) {
2341 case USB_ENDPOINT_XFER_INT:
2342 if (!usb_endpoint_dir_in(&e->desc))
2343 continue;
2344 intr = 1;
2345 /* FALLTHROUGH */
2346 case USB_ENDPOINT_XFER_BULK:
2347 break;
2348 default:
2349 continue;
2350 }
2351 if (usb_endpoint_dir_in(&e->desc)) {
2352 if (!intr && !in)
2353 in = e;
2354 else if (intr && !status)
2355 status = e;
2356 } else {
2357 if (!out)
2358 out = e;
2359 }
2360 }
2361 if (in && out)
2362 break;
2363 }
2364 if (!alt || !in || !out)
2365 return -EINVAL;
2366
2367 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2368 in->desc.bEndpointAddress &
2369 USB_ENDPOINT_NUMBER_MASK);
2370 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2371 out->desc.bEndpointAddress &
2372 USB_ENDPOINT_NUMBER_MASK);
2373 dev->ep_intr = status;
2374
2375 return 0;
2376}
2377
2378static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2379{
2380 struct lan78xx_priv *pdata = NULL;
2381 int ret;
2382 int i;
2383
2384 ret = lan78xx_get_endpoints(dev, intf);
2385
2386 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2387
2388 pdata = (struct lan78xx_priv *)(dev->data[0]);
2389 if (!pdata) {
2390 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2391 return -ENOMEM;
2392 }
2393
2394 pdata->dev = dev;
2395
2396 spin_lock_init(&pdata->rfe_ctl_lock);
2397 mutex_init(&pdata->dataport_mutex);
2398
2399 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2400
2401 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2402 pdata->vlan_table[i] = 0;
2403
2404 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2405
2406 dev->net->features = 0;
2407
2408 if (DEFAULT_TX_CSUM_ENABLE)
2409 dev->net->features |= NETIF_F_HW_CSUM;
2410
2411 if (DEFAULT_RX_CSUM_ENABLE)
2412 dev->net->features |= NETIF_F_RXCSUM;
2413
2414 if (DEFAULT_TSO_CSUM_ENABLE)
2415 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2416
2417 dev->net->hw_features = dev->net->features;
2418
2419 /* Init all registers */
2420 ret = lan78xx_reset(dev);
2421
2422 dev->net->flags |= IFF_MULTICAST;
2423
2424 pdata->wol = WAKE_MAGIC;
2425
2426 return 0;
2427}
2428
2429static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2430{
2431 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2432
2433 if (pdata) {
2434 netif_dbg(dev, ifdown, dev->net, "free pdata");
2435 kfree(pdata);
2436 pdata = NULL;
2437 dev->data[0] = 0;
2438 }
2439}
2440
2441static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2442 struct sk_buff *skb,
2443 u32 rx_cmd_a, u32 rx_cmd_b)
2444{
2445 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2446 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2447 skb->ip_summed = CHECKSUM_NONE;
2448 } else {
2449 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2450 skb->ip_summed = CHECKSUM_COMPLETE;
2451 }
2452}
2453
2454void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2455{
2456 int status;
2457
2458 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2459 skb_queue_tail(&dev->rxq_pause, skb);
2460 return;
2461 }
2462
2463 skb->protocol = eth_type_trans(skb, dev->net);
2464 dev->net->stats.rx_packets++;
2465 dev->net->stats.rx_bytes += skb->len;
2466
2467 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2468 skb->len + sizeof(struct ethhdr), skb->protocol);
2469 memset(skb->cb, 0, sizeof(struct skb_data));
2470
2471 if (skb_defer_rx_timestamp(skb))
2472 return;
2473
2474 status = netif_rx(skb);
2475 if (status != NET_RX_SUCCESS)
2476 netif_dbg(dev, rx_err, dev->net,
2477 "netif_rx status %d\n", status);
2478}
2479
2480static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2481{
2482 if (skb->len < dev->net->hard_header_len)
2483 return 0;
2484
2485 while (skb->len > 0) {
2486 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2487 u16 rx_cmd_c;
2488 struct sk_buff *skb2;
2489 unsigned char *packet;
2490
2491 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2492 le32_to_cpus(&rx_cmd_a);
2493 skb_pull(skb, sizeof(rx_cmd_a));
2494
2495 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2496 le32_to_cpus(&rx_cmd_b);
2497 skb_pull(skb, sizeof(rx_cmd_b));
2498
2499 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2500 le16_to_cpus(&rx_cmd_c);
2501 skb_pull(skb, sizeof(rx_cmd_c));
2502
2503 packet = skb->data;
2504
2505 /* get the packet length */
2506 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2507 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2508
2509 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2510 netif_dbg(dev, rx_err, dev->net,
2511 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2512 } else {
2513 /* last frame in this batch */
2514 if (skb->len == size) {
2515 lan78xx_rx_csum_offload(dev, skb,
2516 rx_cmd_a, rx_cmd_b);
2517
2518 skb_trim(skb, skb->len - 4); /* remove fcs */
2519 skb->truesize = size + sizeof(struct sk_buff);
2520
2521 return 1;
2522 }
2523
2524 skb2 = skb_clone(skb, GFP_ATOMIC);
2525 if (unlikely(!skb2)) {
2526 netdev_warn(dev->net, "Error allocating skb");
2527 return 0;
2528 }
2529
2530 skb2->len = size;
2531 skb2->data = packet;
2532 skb_set_tail_pointer(skb2, size);
2533
2534 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2535
2536 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2537 skb2->truesize = size + sizeof(struct sk_buff);
2538
2539 lan78xx_skb_return(dev, skb2);
2540 }
2541
2542 skb_pull(skb, size);
2543
2544 /* padding bytes before the next frame starts */
2545 if (skb->len)
2546 skb_pull(skb, align_count);
2547 }
2548
2549 if (unlikely(skb->len < 0)) {
2550 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
2551 return 0;
2552 }
2553
2554 return 1;
2555}
2556
2557static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2558{
2559 if (!lan78xx_rx(dev, skb)) {
2560 dev->net->stats.rx_errors++;
2561 goto done;
2562 }
2563
2564 if (skb->len) {
2565 lan78xx_skb_return(dev, skb);
2566 return;
2567 }
2568
2569 netif_dbg(dev, rx_err, dev->net, "drop\n");
2570 dev->net->stats.rx_errors++;
2571done:
2572 skb_queue_tail(&dev->done, skb);
2573}
2574
2575static void rx_complete(struct urb *urb);
2576
2577static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2578{
2579 struct sk_buff *skb;
2580 struct skb_data *entry;
2581 unsigned long lockflags;
2582 size_t size = dev->rx_urb_size;
2583 int ret = 0;
2584
2585 skb = netdev_alloc_skb_ip_align(dev->net, size);
2586 if (!skb) {
2587 usb_free_urb(urb);
2588 return -ENOMEM;
2589 }
2590
2591 entry = (struct skb_data *)skb->cb;
2592 entry->urb = urb;
2593 entry->dev = dev;
2594 entry->length = 0;
2595
2596 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2597 skb->data, size, rx_complete, skb);
2598
2599 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2600
2601 if (netif_device_present(dev->net) &&
2602 netif_running(dev->net) &&
2603 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2604 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2605 ret = usb_submit_urb(urb, GFP_ATOMIC);
2606 switch (ret) {
2607 case 0:
2608 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2609 break;
2610 case -EPIPE:
2611 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2612 break;
2613 case -ENODEV:
2614 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2615 netif_device_detach(dev->net);
2616 break;
2617 case -EHOSTUNREACH:
2618 ret = -ENOLINK;
2619 break;
2620 default:
2621 netif_dbg(dev, rx_err, dev->net,
2622 "rx submit, %d\n", ret);
2623 tasklet_schedule(&dev->bh);
2624 }
2625 } else {
2626 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2627 ret = -ENOLINK;
2628 }
2629 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2630 if (ret) {
2631 dev_kfree_skb_any(skb);
2632 usb_free_urb(urb);
2633 }
2634 return ret;
2635}
2636
2637static void rx_complete(struct urb *urb)
2638{
2639 struct sk_buff *skb = (struct sk_buff *)urb->context;
2640 struct skb_data *entry = (struct skb_data *)skb->cb;
2641 struct lan78xx_net *dev = entry->dev;
2642 int urb_status = urb->status;
2643 enum skb_state state;
2644
2645 skb_put(skb, urb->actual_length);
2646 state = rx_done;
2647 entry->urb = NULL;
2648
2649 switch (urb_status) {
2650 case 0:
2651 if (skb->len < dev->net->hard_header_len) {
2652 state = rx_cleanup;
2653 dev->net->stats.rx_errors++;
2654 dev->net->stats.rx_length_errors++;
2655 netif_dbg(dev, rx_err, dev->net,
2656 "rx length %d\n", skb->len);
2657 }
2658 usb_mark_last_busy(dev->udev);
2659 break;
2660 case -EPIPE:
2661 dev->net->stats.rx_errors++;
2662 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2663 /* FALLTHROUGH */
2664 case -ECONNRESET: /* async unlink */
2665 case -ESHUTDOWN: /* hardware gone */
2666 netif_dbg(dev, ifdown, dev->net,
2667 "rx shutdown, code %d\n", urb_status);
2668 state = rx_cleanup;
2669 entry->urb = urb;
2670 urb = NULL;
2671 break;
2672 case -EPROTO:
2673 case -ETIME:
2674 case -EILSEQ:
2675 dev->net->stats.rx_errors++;
2676 state = rx_cleanup;
2677 entry->urb = urb;
2678 urb = NULL;
2679 break;
2680
2681 /* data overrun ... flush fifo? */
2682 case -EOVERFLOW:
2683 dev->net->stats.rx_over_errors++;
2684 /* FALLTHROUGH */
2685
2686 default:
2687 state = rx_cleanup;
2688 dev->net->stats.rx_errors++;
2689 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2690 break;
2691 }
2692
2693 state = defer_bh(dev, skb, &dev->rxq, state);
2694
2695 if (urb) {
2696 if (netif_running(dev->net) &&
2697 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2698 state != unlink_start) {
2699 rx_submit(dev, urb, GFP_ATOMIC);
2700 return;
2701 }
2702 usb_free_urb(urb);
2703 }
2704 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2705}
2706
2707static void lan78xx_tx_bh(struct lan78xx_net *dev)
2708{
2709 int length;
2710 struct urb *urb = NULL;
2711 struct skb_data *entry;
2712 unsigned long flags;
2713 struct sk_buff_head *tqp = &dev->txq_pend;
2714 struct sk_buff *skb, *skb2;
2715 int ret;
2716 int count, pos;
2717 int skb_totallen, pkt_cnt;
2718
2719 skb_totallen = 0;
2720 pkt_cnt = 0;
2721 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2722 if (skb_is_gso(skb)) {
2723 if (pkt_cnt) {
2724 /* handle previous packets first */
2725 break;
2726 }
2727 length = skb->len;
2728 skb2 = skb_dequeue(tqp);
2729 goto gso_skb;
2730 }
2731
2732 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2733 break;
2734 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2735 pkt_cnt++;
2736 }
2737
2738 /* copy to a single skb */
2739 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2740 if (!skb)
2741 goto drop;
2742
2743 skb_put(skb, skb_totallen);
2744
2745 for (count = pos = 0; count < pkt_cnt; count++) {
2746 skb2 = skb_dequeue(tqp);
2747 if (skb2) {
2748 memcpy(skb->data + pos, skb2->data, skb2->len);
2749 pos += roundup(skb2->len, sizeof(u32));
2750 dev_kfree_skb(skb2);
2751 } else {
2752 BUG_ON(true);
2753 }
2754 }
2755
2756 length = skb_totallen;
2757
2758gso_skb:
2759 urb = usb_alloc_urb(0, GFP_ATOMIC);
2760 if (!urb) {
2761 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2762 goto drop;
2763 }
2764
2765 entry = (struct skb_data *)skb->cb;
2766 entry->urb = urb;
2767 entry->dev = dev;
2768 entry->length = length;
2769
2770 spin_lock_irqsave(&dev->txq.lock, flags);
2771 ret = usb_autopm_get_interface_async(dev->intf);
2772 if (ret < 0) {
2773 spin_unlock_irqrestore(&dev->txq.lock, flags);
2774 goto drop;
2775 }
2776
2777 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2778 skb->data, skb->len, tx_complete, skb);
2779
2780 if (length % dev->maxpacket == 0) {
2781 /* send USB_ZERO_PACKET */
2782 urb->transfer_flags |= URB_ZERO_PACKET;
2783 }
2784
2785#ifdef CONFIG_PM
2786 /* if this triggers the device is still a sleep */
2787 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2788 /* transmission will be done in resume */
2789 usb_anchor_urb(urb, &dev->deferred);
2790 /* no use to process more packets */
2791 netif_stop_queue(dev->net);
2792 usb_put_urb(urb);
2793 spin_unlock_irqrestore(&dev->txq.lock, flags);
2794 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2795 return;
2796 }
2797#endif
2798
2799 ret = usb_submit_urb(urb, GFP_ATOMIC);
2800 switch (ret) {
2801 case 0:
2802 dev->net->trans_start = jiffies;
2803 lan78xx_queue_skb(&dev->txq, skb, tx_start);
2804 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2805 netif_stop_queue(dev->net);
2806 break;
2807 case -EPIPE:
2808 netif_stop_queue(dev->net);
2809 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2810 usb_autopm_put_interface_async(dev->intf);
2811 break;
2812 default:
2813 usb_autopm_put_interface_async(dev->intf);
2814 netif_dbg(dev, tx_err, dev->net,
2815 "tx: submit urb err %d\n", ret);
2816 break;
2817 }
2818
2819 spin_unlock_irqrestore(&dev->txq.lock, flags);
2820
2821 if (ret) {
2822 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2823drop:
2824 dev->net->stats.tx_dropped++;
2825 if (skb)
2826 dev_kfree_skb_any(skb);
2827 usb_free_urb(urb);
2828 } else
2829 netif_dbg(dev, tx_queued, dev->net,
2830 "> tx, len %d, type 0x%x\n", length, skb->protocol);
2831}
2832
2833static void lan78xx_rx_bh(struct lan78xx_net *dev)
2834{
2835 struct urb *urb;
2836 int i;
2837
2838 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2839 for (i = 0; i < 10; i++) {
2840 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2841 break;
2842 urb = usb_alloc_urb(0, GFP_ATOMIC);
2843 if (urb)
2844 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2845 return;
2846 }
2847
2848 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2849 tasklet_schedule(&dev->bh);
2850 }
2851 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2852 netif_wake_queue(dev->net);
2853}
2854
2855static void lan78xx_bh(unsigned long param)
2856{
2857 struct lan78xx_net *dev = (struct lan78xx_net *)param;
2858 struct sk_buff *skb;
2859 struct skb_data *entry;
2860
2861 if (!dev->done.prev)
2862 BUG_ON(!dev->done.prev);
2863 if (!dev->done.next)
2864 BUG_ON(!dev->done.next);
2865
2866 while ((skb = skb_dequeue(&dev->done))) {
2867 entry = (struct skb_data *)(skb->cb);
2868 switch (entry->state) {
2869 case rx_done:
2870 entry->state = rx_cleanup;
2871 rx_process(dev, skb);
2872 continue;
2873 case tx_done:
2874 usb_free_urb(entry->urb);
2875 dev_kfree_skb(skb);
2876 continue;
2877 case rx_cleanup:
2878 usb_free_urb(entry->urb);
2879 dev_kfree_skb(skb);
2880 continue;
2881 default:
2882 netdev_dbg(dev->net, "skb state %d\n", entry->state);
2883 return;
2884 }
2885 if (!dev->done.prev)
2886 BUG_ON(!dev->done.prev);
2887 if (!dev->done.next)
2888 BUG_ON(!dev->done.next);
2889 }
2890
2891 if (netif_device_present(dev->net) && netif_running(dev->net)) {
2892 if (!skb_queue_empty(&dev->txq_pend))
2893 lan78xx_tx_bh(dev);
2894
2895 if (!timer_pending(&dev->delay) &&
2896 !test_bit(EVENT_RX_HALT, &dev->flags))
2897 lan78xx_rx_bh(dev);
2898 }
2899}
2900
2901static void lan78xx_delayedwork(struct work_struct *work)
2902{
2903 int status;
2904 struct lan78xx_net *dev;
2905
2906 dev = container_of(work, struct lan78xx_net, wq.work);
2907
2908 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2909 unlink_urbs(dev, &dev->txq);
2910 status = usb_autopm_get_interface(dev->intf);
2911 if (status < 0)
2912 goto fail_pipe;
2913 status = usb_clear_halt(dev->udev, dev->pipe_out);
2914 usb_autopm_put_interface(dev->intf);
2915 if (status < 0 &&
2916 status != -EPIPE &&
2917 status != -ESHUTDOWN) {
2918 if (netif_msg_tx_err(dev))
2919fail_pipe:
2920 netdev_err(dev->net,
2921 "can't clear tx halt, status %d\n",
2922 status);
2923 } else {
2924 clear_bit(EVENT_TX_HALT, &dev->flags);
2925 if (status != -ESHUTDOWN)
2926 netif_wake_queue(dev->net);
2927 }
2928 }
2929 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2930 unlink_urbs(dev, &dev->rxq);
2931 status = usb_autopm_get_interface(dev->intf);
2932 if (status < 0)
2933 goto fail_halt;
2934 status = usb_clear_halt(dev->udev, dev->pipe_in);
2935 usb_autopm_put_interface(dev->intf);
2936 if (status < 0 &&
2937 status != -EPIPE &&
2938 status != -ESHUTDOWN) {
2939 if (netif_msg_rx_err(dev))
2940fail_halt:
2941 netdev_err(dev->net,
2942 "can't clear rx halt, status %d\n",
2943 status);
2944 } else {
2945 clear_bit(EVENT_RX_HALT, &dev->flags);
2946 tasklet_schedule(&dev->bh);
2947 }
2948 }
2949
2950 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2951 int ret = 0;
2952
2953 clear_bit(EVENT_LINK_RESET, &dev->flags);
2954 status = usb_autopm_get_interface(dev->intf);
2955 if (status < 0)
2956 goto skip_reset;
2957 if (lan78xx_link_reset(dev) < 0) {
2958 usb_autopm_put_interface(dev->intf);
2959skip_reset:
2960 netdev_info(dev->net, "link reset failed (%d)\n",
2961 ret);
2962 } else {
2963 usb_autopm_put_interface(dev->intf);
2964 }
2965 }
2966}
2967
2968static void intr_complete(struct urb *urb)
2969{
2970 struct lan78xx_net *dev = urb->context;
2971 int status = urb->status;
2972
2973 switch (status) {
2974 /* success */
2975 case 0:
2976 lan78xx_status(dev, urb);
2977 break;
2978
2979 /* software-driven interface shutdown */
2980 case -ENOENT: /* urb killed */
2981 case -ESHUTDOWN: /* hardware gone */
2982 netif_dbg(dev, ifdown, dev->net,
2983 "intr shutdown, code %d\n", status);
2984 return;
2985
2986 /* NOTE: not throttling like RX/TX, since this endpoint
2987 * already polls infrequently
2988 */
2989 default:
2990 netdev_dbg(dev->net, "intr status %d\n", status);
2991 break;
2992 }
2993
2994 if (!netif_running(dev->net))
2995 return;
2996
2997 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2998 status = usb_submit_urb(urb, GFP_ATOMIC);
2999 if (status != 0)
3000 netif_err(dev, timer, dev->net,
3001 "intr resubmit --> %d\n", status);
3002}
3003
3004static void lan78xx_disconnect(struct usb_interface *intf)
3005{
3006 struct lan78xx_net *dev;
3007 struct usb_device *udev;
3008 struct net_device *net;
3009
3010 dev = usb_get_intfdata(intf);
3011 usb_set_intfdata(intf, NULL);
3012 if (!dev)
3013 return;
3014
3015 udev = interface_to_usbdev(intf);
3016
3017 net = dev->net;
3018 unregister_netdev(net);
3019
3020 cancel_delayed_work_sync(&dev->wq);
3021
3022 usb_scuttle_anchored_urbs(&dev->deferred);
3023
3024 lan78xx_unbind(dev, intf);
3025
3026 usb_kill_urb(dev->urb_intr);
3027 usb_free_urb(dev->urb_intr);
3028
3029 free_netdev(net);
3030 usb_put_dev(udev);
3031}
3032
3033void lan78xx_tx_timeout(struct net_device *net)
3034{
3035 struct lan78xx_net *dev = netdev_priv(net);
3036
3037 unlink_urbs(dev, &dev->txq);
3038 tasklet_schedule(&dev->bh);
3039}
3040
3041static const struct net_device_ops lan78xx_netdev_ops = {
3042 .ndo_open = lan78xx_open,
3043 .ndo_stop = lan78xx_stop,
3044 .ndo_start_xmit = lan78xx_start_xmit,
3045 .ndo_tx_timeout = lan78xx_tx_timeout,
3046 .ndo_change_mtu = lan78xx_change_mtu,
3047 .ndo_set_mac_address = lan78xx_set_mac_addr,
3048 .ndo_validate_addr = eth_validate_addr,
3049 .ndo_do_ioctl = lan78xx_ioctl,
3050 .ndo_set_rx_mode = lan78xx_set_multicast,
3051 .ndo_set_features = lan78xx_set_features,
3052 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3053 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3054};
3055
3056static int lan78xx_probe(struct usb_interface *intf,
3057 const struct usb_device_id *id)
3058{
3059 struct lan78xx_net *dev;
3060 struct net_device *netdev;
3061 struct usb_device *udev;
3062 int ret;
3063 unsigned maxp;
3064 unsigned period;
3065 u8 *buf = NULL;
3066
3067 udev = interface_to_usbdev(intf);
3068 udev = usb_get_dev(udev);
3069
3070 ret = -ENOMEM;
3071 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3072 if (!netdev) {
3073 dev_err(&intf->dev, "Error: OOM\n");
3074 goto out1;
3075 }
3076
3077 /* netdev_printk() needs this */
3078 SET_NETDEV_DEV(netdev, &intf->dev);
3079
3080 dev = netdev_priv(netdev);
3081 dev->udev = udev;
3082 dev->intf = intf;
3083 dev->net = netdev;
3084 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3085 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3086
3087 skb_queue_head_init(&dev->rxq);
3088 skb_queue_head_init(&dev->txq);
3089 skb_queue_head_init(&dev->done);
3090 skb_queue_head_init(&dev->rxq_pause);
3091 skb_queue_head_init(&dev->txq_pend);
3092 mutex_init(&dev->phy_mutex);
3093
3094 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3095 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3096 init_usb_anchor(&dev->deferred);
3097
3098 netdev->netdev_ops = &lan78xx_netdev_ops;
3099 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3100 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3101
3102 ret = lan78xx_bind(dev, intf);
3103 if (ret < 0)
3104 goto out2;
3105 strcpy(netdev->name, "eth%d");
3106
3107 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3108 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3109
3110 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3111 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3112 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3113
3114 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3115 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3116
3117 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3118 dev->ep_intr->desc.bEndpointAddress &
3119 USB_ENDPOINT_NUMBER_MASK);
3120 period = dev->ep_intr->desc.bInterval;
3121
3122 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3123 buf = kmalloc(maxp, GFP_KERNEL);
3124 if (buf) {
3125 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3126 if (!dev->urb_intr) {
3127 kfree(buf);
3128 goto out3;
3129 } else {
3130 usb_fill_int_urb(dev->urb_intr, dev->udev,
3131 dev->pipe_intr, buf, maxp,
3132 intr_complete, dev, period);
3133 }
3134 }
3135
3136 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3137
3138 /* driver requires remote-wakeup capability during autosuspend. */
3139 intf->needs_remote_wakeup = 1;
3140
3141 ret = register_netdev(netdev);
3142 if (ret != 0) {
3143 netif_err(dev, probe, netdev, "couldn't register the device\n");
3144 goto out2;
3145 }
3146
3147 usb_set_intfdata(intf, dev);
3148
3149 ret = device_set_wakeup_enable(&udev->dev, true);
3150
3151 /* Default delay of 2sec has more overhead than advantage.
3152 * Set to 10sec as default.
3153 */
3154 pm_runtime_set_autosuspend_delay(&udev->dev,
3155 DEFAULT_AUTOSUSPEND_DELAY);
3156
3157 return 0;
3158
3159 usb_set_intfdata(intf, NULL);
3160out3:
3161 lan78xx_unbind(dev, intf);
3162out2:
3163 free_netdev(netdev);
3164out1:
3165 usb_put_dev(udev);
3166
3167 return ret;
3168}
3169
3170static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3171{
3172 const u16 crc16poly = 0x8005;
3173 int i;
3174 u16 bit, crc, msb;
3175 u8 data;
3176
3177 crc = 0xFFFF;
3178 for (i = 0; i < len; i++) {
3179 data = *buf++;
3180 for (bit = 0; bit < 8; bit++) {
3181 msb = crc >> 15;
3182 crc <<= 1;
3183
3184 if (msb ^ (u16)(data & 1)) {
3185 crc ^= crc16poly;
3186 crc |= (u16)0x0001U;
3187 }
3188 data >>= 1;
3189 }
3190 }
3191
3192 return crc;
3193}
3194
3195static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3196{
3197 u32 buf;
3198 int ret;
3199 int mask_index;
3200 u16 crc;
3201 u32 temp_wucsr;
3202 u32 temp_pmt_ctl;
3203 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3204 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3205 const u8 arp_type[2] = { 0x08, 0x06 };
3206
3207 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3208 buf &= ~MAC_TX_TXEN_;
3209 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3210 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3211 buf &= ~MAC_RX_RXEN_;
3212 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3213
3214 ret = lan78xx_write_reg(dev, WUCSR, 0);
3215 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3216 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3217
3218 temp_wucsr = 0;
3219
3220 temp_pmt_ctl = 0;
3221 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3222 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3223 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3224
3225 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3226 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3227
3228 mask_index = 0;
3229 if (wol & WAKE_PHY) {
3230 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3231
3232 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3233 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3234 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3235 }
3236 if (wol & WAKE_MAGIC) {
3237 temp_wucsr |= WUCSR_MPEN_;
3238
3239 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3240 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3241 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3242 }
3243 if (wol & WAKE_BCAST) {
3244 temp_wucsr |= WUCSR_BCST_EN_;
3245
3246 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3247 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3248 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3249 }
3250 if (wol & WAKE_MCAST) {
3251 temp_wucsr |= WUCSR_WAKE_EN_;
3252
3253 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3254 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3255 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3256 WUF_CFGX_EN_ |
3257 WUF_CFGX_TYPE_MCAST_ |
3258 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3259 (crc & WUF_CFGX_CRC16_MASK_));
3260
3261 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3262 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3263 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3264 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3265 mask_index++;
3266
3267 /* for IPv6 Multicast */
3268 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3269 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3270 WUF_CFGX_EN_ |
3271 WUF_CFGX_TYPE_MCAST_ |
3272 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3273 (crc & WUF_CFGX_CRC16_MASK_));
3274
3275 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3276 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3277 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3278 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3279 mask_index++;
3280
3281 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3282 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3283 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3284 }
3285 if (wol & WAKE_UCAST) {
3286 temp_wucsr |= WUCSR_PFDA_EN_;
3287
3288 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3289 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3290 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3291 }
3292 if (wol & WAKE_ARP) {
3293 temp_wucsr |= WUCSR_WAKE_EN_;
3294
3295 /* set WUF_CFG & WUF_MASK
3296 * for packettype (offset 12,13) = ARP (0x0806)
3297 */
3298 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3299 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3300 WUF_CFGX_EN_ |
3301 WUF_CFGX_TYPE_ALL_ |
3302 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3303 (crc & WUF_CFGX_CRC16_MASK_));
3304
3305 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3306 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3307 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3308 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3309 mask_index++;
3310
3311 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3312 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3313 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3314 }
3315
3316 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3317
3318 /* when multiple WOL bits are set */
3319 if (hweight_long((unsigned long)wol) > 1) {
3320 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3321 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3322 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3323 }
3324 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3325
3326 /* clear WUPS */
3327 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3328 buf |= PMT_CTL_WUPS_MASK_;
3329 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3330
3331 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3332 buf |= MAC_RX_RXEN_;
3333 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3334
3335 return 0;
3336}
3337
3338int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3339{
3340 struct lan78xx_net *dev = usb_get_intfdata(intf);
3341 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3342 u32 buf;
3343 int ret;
3344 int event;
3345
3346 ret = 0;
3347 event = message.event;
3348
3349 if (!dev->suspend_count++) {
3350 spin_lock_irq(&dev->txq.lock);
3351 /* don't autosuspend while transmitting */
3352 if ((skb_queue_len(&dev->txq) ||
3353 skb_queue_len(&dev->txq_pend)) &&
3354 PMSG_IS_AUTO(message)) {
3355 spin_unlock_irq(&dev->txq.lock);
3356 ret = -EBUSY;
3357 goto out;
3358 } else {
3359 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3360 spin_unlock_irq(&dev->txq.lock);
3361 }
3362
3363 /* stop TX & RX */
3364 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3365 buf &= ~MAC_TX_TXEN_;
3366 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3367 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3368 buf &= ~MAC_RX_RXEN_;
3369 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3370
3371 /* empty out the rx and queues */
3372 netif_device_detach(dev->net);
3373 lan78xx_terminate_urbs(dev);
3374 usb_kill_urb(dev->urb_intr);
3375
3376 /* reattach */
3377 netif_device_attach(dev->net);
3378 }
3379
3380 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3381 if (PMSG_IS_AUTO(message)) {
3382 /* auto suspend (selective suspend) */
3383 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3384 buf &= ~MAC_TX_TXEN_;
3385 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3386 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3387 buf &= ~MAC_RX_RXEN_;
3388 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3389
3390 ret = lan78xx_write_reg(dev, WUCSR, 0);
3391 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3392 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3393
3394 /* set goodframe wakeup */
3395 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3396
3397 buf |= WUCSR_RFE_WAKE_EN_;
3398 buf |= WUCSR_STORE_WAKE_;
3399
3400 ret = lan78xx_write_reg(dev, WUCSR, buf);
3401
3402 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3403
3404 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3405 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3406
3407 buf |= PMT_CTL_PHY_WAKE_EN_;
3408 buf |= PMT_CTL_WOL_EN_;
3409 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3410 buf |= PMT_CTL_SUS_MODE_3_;
3411
3412 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3413
3414 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3415
3416 buf |= PMT_CTL_WUPS_MASK_;
3417
3418 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3419
3420 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3421 buf |= MAC_RX_RXEN_;
3422 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3423 } else {
3424 lan78xx_set_suspend(dev, pdata->wol);
3425 }
3426 }
3427
3428out:
3429 return ret;
3430}
3431
3432int lan78xx_resume(struct usb_interface *intf)
3433{
3434 struct lan78xx_net *dev = usb_get_intfdata(intf);
3435 struct sk_buff *skb;
3436 struct urb *res;
3437 int ret;
3438 u32 buf;
3439
3440 if (!--dev->suspend_count) {
3441 /* resume interrupt URBs */
3442 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3443 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3444
3445 spin_lock_irq(&dev->txq.lock);
3446 while ((res = usb_get_from_anchor(&dev->deferred))) {
3447 skb = (struct sk_buff *)res->context;
3448 ret = usb_submit_urb(res, GFP_ATOMIC);
3449 if (ret < 0) {
3450 dev_kfree_skb_any(skb);
3451 usb_free_urb(res);
3452 usb_autopm_put_interface_async(dev->intf);
3453 } else {
3454 dev->net->trans_start = jiffies;
3455 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3456 }
3457 }
3458
3459 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3460 spin_unlock_irq(&dev->txq.lock);
3461
3462 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3463 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3464 netif_start_queue(dev->net);
3465 tasklet_schedule(&dev->bh);
3466 }
3467 }
3468
3469 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3470 ret = lan78xx_write_reg(dev, WUCSR, 0);
3471 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3472
3473 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3474 WUCSR2_ARP_RCD_ |
3475 WUCSR2_IPV6_TCPSYN_RCD_ |
3476 WUCSR2_IPV4_TCPSYN_RCD_);
3477
3478 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3479 WUCSR_EEE_RX_WAKE_ |
3480 WUCSR_PFDA_FR_ |
3481 WUCSR_RFE_WAKE_FR_ |
3482 WUCSR_WUFR_ |
3483 WUCSR_MPR_ |
3484 WUCSR_BCST_FR_);
3485
3486 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3487 buf |= MAC_TX_TXEN_;
3488 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3489
3490 return 0;
3491}
3492
3493int lan78xx_reset_resume(struct usb_interface *intf)
3494{
3495 struct lan78xx_net *dev = usb_get_intfdata(intf);
3496
3497 lan78xx_reset(dev);
3498 return lan78xx_resume(intf);
3499}
3500
3501static const struct usb_device_id products[] = {
3502 {
3503 /* LAN7800 USB Gigabit Ethernet Device */
3504 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3505 },
3506 {
3507 /* LAN7850 USB Gigabit Ethernet Device */
3508 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3509 },
3510 {},
3511};
3512MODULE_DEVICE_TABLE(usb, products);
3513
3514static struct usb_driver lan78xx_driver = {
3515 .name = DRIVER_NAME,
3516 .id_table = products,
3517 .probe = lan78xx_probe,
3518 .disconnect = lan78xx_disconnect,
3519 .suspend = lan78xx_suspend,
3520 .resume = lan78xx_resume,
3521 .reset_resume = lan78xx_reset_resume,
3522 .supports_autosuspend = 1,
3523 .disable_hub_initiated_lpm = 1,
3524};
3525
3526module_usb_driver(lan78xx_driver);
3527
3528MODULE_AUTHOR(DRIVER_AUTHOR);
3529MODULE_DESCRIPTION(DRIVER_DESC);
3530MODULE_LICENSE("GPL");