]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/usb/lan78xx.c
net: lan78xx: fix transfer-buffer memory leak
[mirror_ubuntu-jammy-kernel.git] / drivers / net / usb / lan78xx.c
CommitLineData
6be665a5 1// SPDX-License-Identifier: GPL-2.0+
55d7de9d
WH
2/*
3 * Copyright (C) 2015 Microchip Technology
55d7de9d
WH
4 */
5#include <linux/version.h>
6#include <linux/module.h>
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/ethtool.h>
55d7de9d
WH
10#include <linux/usb.h>
11#include <linux/crc32.h>
12#include <linux/signal.h>
13#include <linux/slab.h>
14#include <linux/if_vlan.h>
15#include <linux/uaccess.h>
3c1bcc86 16#include <linux/linkmode.h>
55d7de9d
WH
17#include <linux/list.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/mdio.h>
c6e970a0 21#include <linux/phy.h>
55d7de9d 22#include <net/ip6_checksum.h>
ce896476 23#include <net/vxlan.h>
cc89c323
WH
24#include <linux/interrupt.h>
25#include <linux/irqdomain.h>
26#include <linux/irq.h>
27#include <linux/irqchip/chained_irq.h>
bdfba55e 28#include <linux/microchipphy.h>
89b36fb5 29#include <linux/phy_fixed.h>
1827b067 30#include <linux/of_mdio.h>
760db29b 31#include <linux/of_net.h>
55d7de9d
WH
32#include "lan78xx.h"
33
34#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36#define DRIVER_NAME "lan78xx"
55d7de9d
WH
37
38#define TX_TIMEOUT_JIFFIES (5 * HZ)
39#define THROTTLE_JIFFIES (HZ / 8)
40#define UNLINK_TIMEOUT_MS 3
41
42#define RX_MAX_QUEUE_MEMORY (60 * 1518)
43
44#define SS_USB_PKT_SIZE (1024)
45#define HS_USB_PKT_SIZE (512)
46#define FS_USB_PKT_SIZE (64)
47
48#define MAX_RX_FIFO_SIZE (12 * 1024)
49#define MAX_TX_FIFO_SIZE (12 * 1024)
50#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51#define DEFAULT_BULK_IN_DELAY (0x0800)
52#define MAX_SINGLE_PACKET_SIZE (9000)
53#define DEFAULT_TX_CSUM_ENABLE (true)
54#define DEFAULT_RX_CSUM_ENABLE (true)
55#define DEFAULT_TSO_CSUM_ENABLE (true)
56#define DEFAULT_VLAN_FILTER_ENABLE (true)
ec21ecf0 57#define DEFAULT_VLAN_RX_OFFLOAD (true)
55d7de9d
WH
58#define TX_OVERHEAD (8)
59#define RXW_PADDING 2
60
61#define LAN78XX_USB_VENDOR_ID (0x0424)
62#define LAN7800_USB_PRODUCT_ID (0x7800)
63#define LAN7850_USB_PRODUCT_ID (0x7850)
02dc1f3d 64#define LAN7801_USB_PRODUCT_ID (0x7801)
55d7de9d
WH
65#define LAN78XX_EEPROM_MAGIC (0x78A5)
66#define LAN78XX_OTP_MAGIC (0x78F3)
67
68#define MII_READ 1
69#define MII_WRITE 0
70
71#define EEPROM_INDICATOR (0xA5)
72#define EEPROM_MAC_OFFSET (0x01)
73#define MAX_EEPROM_SIZE 512
74#define OTP_INDICATOR_1 (0xF3)
75#define OTP_INDICATOR_2 (0xF7)
76
77#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
78 WAKE_MCAST | WAKE_BCAST | \
79 WAKE_ARP | WAKE_MAGIC)
80
81/* USB related defines */
82#define BULK_IN_PIPE 1
83#define BULK_OUT_PIPE 2
84
85/* default autosuspend delay (mSec)*/
86#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
87
20ff5565
WH
88/* statistic update interval (mSec) */
89#define STAT_UPDATE_TIMER (1 * 1000)
90
cc89c323
WH
91/* defines interrupts from interrupt EP */
92#define MAX_INT_EP (32)
93#define INT_EP_INTEP (31)
94#define INT_EP_OTP_WR_DONE (28)
95#define INT_EP_EEE_TX_LPI_START (26)
96#define INT_EP_EEE_TX_LPI_STOP (25)
97#define INT_EP_EEE_RX_LPI (24)
98#define INT_EP_MAC_RESET_TIMEOUT (23)
99#define INT_EP_RDFO (22)
100#define INT_EP_TXE (21)
101#define INT_EP_USB_STATUS (20)
102#define INT_EP_TX_DIS (19)
103#define INT_EP_RX_DIS (18)
104#define INT_EP_PHY (17)
105#define INT_EP_DP (16)
106#define INT_EP_MAC_ERR (15)
107#define INT_EP_TDFU (14)
108#define INT_EP_TDFO (13)
109#define INT_EP_UTX (12)
110#define INT_EP_GPIO_11 (11)
111#define INT_EP_GPIO_10 (10)
112#define INT_EP_GPIO_9 (9)
113#define INT_EP_GPIO_8 (8)
114#define INT_EP_GPIO_7 (7)
115#define INT_EP_GPIO_6 (6)
116#define INT_EP_GPIO_5 (5)
117#define INT_EP_GPIO_4 (4)
118#define INT_EP_GPIO_3 (3)
119#define INT_EP_GPIO_2 (2)
120#define INT_EP_GPIO_1 (1)
121#define INT_EP_GPIO_0 (0)
122
55d7de9d
WH
123static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
124 "RX FCS Errors",
125 "RX Alignment Errors",
126 "Rx Fragment Errors",
127 "RX Jabber Errors",
128 "RX Undersize Frame Errors",
129 "RX Oversize Frame Errors",
130 "RX Dropped Frames",
131 "RX Unicast Byte Count",
132 "RX Broadcast Byte Count",
133 "RX Multicast Byte Count",
134 "RX Unicast Frames",
135 "RX Broadcast Frames",
136 "RX Multicast Frames",
137 "RX Pause Frames",
138 "RX 64 Byte Frames",
139 "RX 65 - 127 Byte Frames",
140 "RX 128 - 255 Byte Frames",
141 "RX 256 - 511 Bytes Frames",
142 "RX 512 - 1023 Byte Frames",
143 "RX 1024 - 1518 Byte Frames",
144 "RX Greater 1518 Byte Frames",
145 "EEE RX LPI Transitions",
146 "EEE RX LPI Time",
147 "TX FCS Errors",
148 "TX Excess Deferral Errors",
149 "TX Carrier Errors",
150 "TX Bad Byte Count",
151 "TX Single Collisions",
152 "TX Multiple Collisions",
153 "TX Excessive Collision",
154 "TX Late Collisions",
155 "TX Unicast Byte Count",
156 "TX Broadcast Byte Count",
157 "TX Multicast Byte Count",
158 "TX Unicast Frames",
159 "TX Broadcast Frames",
160 "TX Multicast Frames",
161 "TX Pause Frames",
162 "TX 64 Byte Frames",
163 "TX 65 - 127 Byte Frames",
164 "TX 128 - 255 Byte Frames",
165 "TX 256 - 511 Bytes Frames",
166 "TX 512 - 1023 Byte Frames",
167 "TX 1024 - 1518 Byte Frames",
168 "TX Greater 1518 Byte Frames",
169 "EEE TX LPI Transitions",
170 "EEE TX LPI Time",
171};
172
173struct lan78xx_statstage {
174 u32 rx_fcs_errors;
175 u32 rx_alignment_errors;
176 u32 rx_fragment_errors;
177 u32 rx_jabber_errors;
178 u32 rx_undersize_frame_errors;
179 u32 rx_oversize_frame_errors;
180 u32 rx_dropped_frames;
181 u32 rx_unicast_byte_count;
182 u32 rx_broadcast_byte_count;
183 u32 rx_multicast_byte_count;
184 u32 rx_unicast_frames;
185 u32 rx_broadcast_frames;
186 u32 rx_multicast_frames;
187 u32 rx_pause_frames;
188 u32 rx_64_byte_frames;
189 u32 rx_65_127_byte_frames;
190 u32 rx_128_255_byte_frames;
191 u32 rx_256_511_bytes_frames;
192 u32 rx_512_1023_byte_frames;
193 u32 rx_1024_1518_byte_frames;
194 u32 rx_greater_1518_byte_frames;
195 u32 eee_rx_lpi_transitions;
196 u32 eee_rx_lpi_time;
197 u32 tx_fcs_errors;
198 u32 tx_excess_deferral_errors;
199 u32 tx_carrier_errors;
200 u32 tx_bad_byte_count;
201 u32 tx_single_collisions;
202 u32 tx_multiple_collisions;
203 u32 tx_excessive_collision;
204 u32 tx_late_collisions;
205 u32 tx_unicast_byte_count;
206 u32 tx_broadcast_byte_count;
207 u32 tx_multicast_byte_count;
208 u32 tx_unicast_frames;
209 u32 tx_broadcast_frames;
210 u32 tx_multicast_frames;
211 u32 tx_pause_frames;
212 u32 tx_64_byte_frames;
213 u32 tx_65_127_byte_frames;
214 u32 tx_128_255_byte_frames;
215 u32 tx_256_511_bytes_frames;
216 u32 tx_512_1023_byte_frames;
217 u32 tx_1024_1518_byte_frames;
218 u32 tx_greater_1518_byte_frames;
219 u32 eee_tx_lpi_transitions;
220 u32 eee_tx_lpi_time;
221};
222
20ff5565
WH
223struct lan78xx_statstage64 {
224 u64 rx_fcs_errors;
225 u64 rx_alignment_errors;
226 u64 rx_fragment_errors;
227 u64 rx_jabber_errors;
228 u64 rx_undersize_frame_errors;
229 u64 rx_oversize_frame_errors;
230 u64 rx_dropped_frames;
231 u64 rx_unicast_byte_count;
232 u64 rx_broadcast_byte_count;
233 u64 rx_multicast_byte_count;
234 u64 rx_unicast_frames;
235 u64 rx_broadcast_frames;
236 u64 rx_multicast_frames;
237 u64 rx_pause_frames;
238 u64 rx_64_byte_frames;
239 u64 rx_65_127_byte_frames;
240 u64 rx_128_255_byte_frames;
241 u64 rx_256_511_bytes_frames;
242 u64 rx_512_1023_byte_frames;
243 u64 rx_1024_1518_byte_frames;
244 u64 rx_greater_1518_byte_frames;
245 u64 eee_rx_lpi_transitions;
246 u64 eee_rx_lpi_time;
247 u64 tx_fcs_errors;
248 u64 tx_excess_deferral_errors;
249 u64 tx_carrier_errors;
250 u64 tx_bad_byte_count;
251 u64 tx_single_collisions;
252 u64 tx_multiple_collisions;
253 u64 tx_excessive_collision;
254 u64 tx_late_collisions;
255 u64 tx_unicast_byte_count;
256 u64 tx_broadcast_byte_count;
257 u64 tx_multicast_byte_count;
258 u64 tx_unicast_frames;
259 u64 tx_broadcast_frames;
260 u64 tx_multicast_frames;
261 u64 tx_pause_frames;
262 u64 tx_64_byte_frames;
263 u64 tx_65_127_byte_frames;
264 u64 tx_128_255_byte_frames;
265 u64 tx_256_511_bytes_frames;
266 u64 tx_512_1023_byte_frames;
267 u64 tx_1024_1518_byte_frames;
268 u64 tx_greater_1518_byte_frames;
269 u64 eee_tx_lpi_transitions;
270 u64 eee_tx_lpi_time;
271};
272
49621865
RC
273static u32 lan78xx_regs[] = {
274 ID_REV,
275 INT_STS,
276 HW_CFG,
277 PMT_CTL,
278 E2P_CMD,
279 E2P_DATA,
280 USB_STATUS,
281 VLAN_TYPE,
282 MAC_CR,
283 MAC_RX,
284 MAC_TX,
285 FLOW,
286 ERR_STS,
287 MII_ACC,
288 MII_DATA,
289 EEE_TX_LPI_REQ_DLY,
290 EEE_TW_TX_SYS,
291 EEE_TX_LPI_REM_DLY,
292 WUCSR
293};
294
295#define PHY_REG_SIZE (32 * sizeof(u32))
296
55d7de9d
WH
297struct lan78xx_net;
298
299struct lan78xx_priv {
300 struct lan78xx_net *dev;
301 u32 rfe_ctl;
302 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
303 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
304 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
305 struct mutex dataport_mutex; /* for dataport access */
306 spinlock_t rfe_ctl_lock; /* for rfe register access */
307 struct work_struct set_multicast;
308 struct work_struct set_vlan;
309 u32 wol;
310};
311
312enum skb_state {
313 illegal = 0,
314 tx_start,
315 tx_done,
316 rx_start,
317 rx_done,
318 rx_cleanup,
319 unlink_start
320};
321
322struct skb_data { /* skb->cb is one of these */
323 struct urb *urb;
324 struct lan78xx_net *dev;
325 enum skb_state state;
326 size_t length;
74d79a2e 327 int num_of_packet;
55d7de9d
WH
328};
329
330struct usb_context {
331 struct usb_ctrlrequest req;
332 struct lan78xx_net *dev;
333};
334
335#define EVENT_TX_HALT 0
336#define EVENT_RX_HALT 1
337#define EVENT_RX_MEMORY 2
338#define EVENT_STS_SPLIT 3
339#define EVENT_LINK_RESET 4
340#define EVENT_RX_PAUSED 5
341#define EVENT_DEV_WAKING 6
342#define EVENT_DEV_ASLEEP 7
343#define EVENT_DEV_OPEN 8
20ff5565
WH
344#define EVENT_STAT_UPDATE 9
345
346struct statstage {
347 struct mutex access_lock; /* for stats access */
348 struct lan78xx_statstage saved;
349 struct lan78xx_statstage rollover_count;
350 struct lan78xx_statstage rollover_max;
351 struct lan78xx_statstage64 curr_stat;
352};
55d7de9d 353
cc89c323
WH
354struct irq_domain_data {
355 struct irq_domain *irqdomain;
356 unsigned int phyirq;
357 struct irq_chip *irqchip;
358 irq_flow_handler_t irq_handler;
359 u32 irqenable;
360 struct mutex irq_lock; /* for irq bus access */
361};
362
55d7de9d
WH
363struct lan78xx_net {
364 struct net_device *net;
365 struct usb_device *udev;
366 struct usb_interface *intf;
367 void *driver_priv;
368
369 int rx_qlen;
370 int tx_qlen;
371 struct sk_buff_head rxq;
372 struct sk_buff_head txq;
373 struct sk_buff_head done;
374 struct sk_buff_head rxq_pause;
375 struct sk_buff_head txq_pend;
376
377 struct tasklet_struct bh;
378 struct delayed_work wq;
379
380 struct usb_host_endpoint *ep_blkin;
381 struct usb_host_endpoint *ep_blkout;
382 struct usb_host_endpoint *ep_intr;
383
384 int msg_enable;
385
386 struct urb *urb_intr;
387 struct usb_anchor deferred;
388
389 struct mutex phy_mutex; /* for phy access */
390 unsigned pipe_in, pipe_out, pipe_intr;
391
392 u32 hard_mtu; /* count any extra framing */
393 size_t rx_urb_size; /* size for rx urbs */
394
395 unsigned long flags;
396
397 wait_queue_head_t *wait;
398 unsigned char suspend_count;
399
400 unsigned maxpacket;
401 struct timer_list delay;
20ff5565 402 struct timer_list stat_monitor;
55d7de9d
WH
403
404 unsigned long data[5];
55d7de9d
WH
405
406 int link_on;
407 u8 mdix_ctrl;
ce85e13a 408
87177ba6
WH
409 u32 chipid;
410 u32 chiprev;
ce85e13a 411 struct mii_bus *mdiobus;
02dc1f3d 412 phy_interface_t interface;
349e0c5e
WH
413
414 int fc_autoneg;
415 u8 fc_request_control;
20ff5565
WH
416
417 int delta;
418 struct statstage stats;
cc89c323
WH
419
420 struct irq_domain_data domain_data;
55d7de9d
WH
421};
422
02dc1f3d
WH
423/* define external phy id */
424#define PHY_LAN8835 (0x0007C130)
425#define PHY_KSZ9031RNX (0x00221620)
426
55d7de9d
WH
427/* use ethtool to change the level for any given device */
428static int msg_level = -1;
429module_param(msg_level, int, 0);
430MODULE_PARM_DESC(msg_level, "Override default message level");
431
432static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
433{
434 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
435 int ret;
436
55d7de9d
WH
437 if (!buf)
438 return -ENOMEM;
439
440 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
441 USB_VENDOR_REQUEST_READ_REGISTER,
442 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
443 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
444 if (likely(ret >= 0)) {
445 le32_to_cpus(buf);
446 *data = *buf;
447 } else {
448 netdev_warn(dev->net,
449 "Failed to read register index 0x%08x. ret = %d",
450 index, ret);
451 }
452
453 kfree(buf);
454
455 return ret;
456}
457
458static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
459{
460 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
461 int ret;
462
55d7de9d
WH
463 if (!buf)
464 return -ENOMEM;
465
466 *buf = data;
467 cpu_to_le32s(buf);
468
469 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
470 USB_VENDOR_REQUEST_WRITE_REGISTER,
471 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
472 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
473 if (unlikely(ret < 0)) {
474 netdev_warn(dev->net,
475 "Failed to write register index 0x%08x. ret = %d",
476 index, ret);
477 }
478
479 kfree(buf);
480
481 return ret;
482}
483
484static int lan78xx_read_stats(struct lan78xx_net *dev,
485 struct lan78xx_statstage *data)
486{
487 int ret = 0;
488 int i;
489 struct lan78xx_statstage *stats;
490 u32 *src;
491 u32 *dst;
492
55d7de9d
WH
493 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
494 if (!stats)
495 return -ENOMEM;
496
497 ret = usb_control_msg(dev->udev,
498 usb_rcvctrlpipe(dev->udev, 0),
499 USB_VENDOR_REQUEST_GET_STATS,
500 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
501 0,
502 0,
503 (void *)stats,
504 sizeof(*stats),
505 USB_CTRL_SET_TIMEOUT);
506 if (likely(ret >= 0)) {
507 src = (u32 *)stats;
508 dst = (u32 *)data;
509 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
510 le32_to_cpus(&src[i]);
511 dst[i] = src[i];
512 }
513 } else {
514 netdev_warn(dev->net,
858ce8ca 515 "Failed to read stat ret = %d", ret);
55d7de9d
WH
516 }
517
518 kfree(stats);
519
520 return ret;
521}
522
20ff5565
WH
523#define check_counter_rollover(struct1, dev_stats, member) { \
524 if (struct1->member < dev_stats.saved.member) \
525 dev_stats.rollover_count.member++; \
526 }
527
528static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
529 struct lan78xx_statstage *stats)
530{
531 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
532 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
533 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
534 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
535 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
536 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
537 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
538 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
539 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
540 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
541 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
542 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
543 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
544 check_counter_rollover(stats, dev->stats, rx_pause_frames);
545 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
546 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
547 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
548 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
549 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
550 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
551 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
552 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
553 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
554 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
555 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
556 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
557 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
558 check_counter_rollover(stats, dev->stats, tx_single_collisions);
559 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
560 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
561 check_counter_rollover(stats, dev->stats, tx_late_collisions);
562 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
563 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
564 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
565 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
566 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
567 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
568 check_counter_rollover(stats, dev->stats, tx_pause_frames);
569 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
570 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
571 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
572 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
573 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
574 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
575 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
576 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
577 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
578
579 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
580}
581
582static void lan78xx_update_stats(struct lan78xx_net *dev)
583{
584 u32 *p, *count, *max;
585 u64 *data;
586 int i;
587 struct lan78xx_statstage lan78xx_stats;
588
589 if (usb_autopm_get_interface(dev->intf) < 0)
590 return;
591
592 p = (u32 *)&lan78xx_stats;
593 count = (u32 *)&dev->stats.rollover_count;
594 max = (u32 *)&dev->stats.rollover_max;
595 data = (u64 *)&dev->stats.curr_stat;
596
597 mutex_lock(&dev->stats.access_lock);
598
599 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
600 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
601
602 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
603 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
604
605 mutex_unlock(&dev->stats.access_lock);
606
607 usb_autopm_put_interface(dev->intf);
608}
609
55d7de9d
WH
610/* Loop until the read is completed with timeout called with phy_mutex held */
611static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
612{
613 unsigned long start_time = jiffies;
614 u32 val;
615 int ret;
616
617 do {
618 ret = lan78xx_read_reg(dev, MII_ACC, &val);
619 if (unlikely(ret < 0))
620 return -EIO;
621
622 if (!(val & MII_ACC_MII_BUSY_))
623 return 0;
624 } while (!time_after(jiffies, start_time + HZ));
625
626 return -EIO;
627}
628
629static inline u32 mii_access(int id, int index, int read)
630{
631 u32 ret;
632
633 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
634 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
635 if (read)
636 ret |= MII_ACC_MII_READ_;
637 else
638 ret |= MII_ACC_MII_WRITE_;
639 ret |= MII_ACC_MII_BUSY_;
640
641 return ret;
642}
643
55d7de9d
WH
644static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
645{
646 unsigned long start_time = jiffies;
647 u32 val;
648 int ret;
649
650 do {
651 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
652 if (unlikely(ret < 0))
653 return -EIO;
654
655 if (!(val & E2P_CMD_EPC_BUSY_) ||
656 (val & E2P_CMD_EPC_TIMEOUT_))
657 break;
658 usleep_range(40, 100);
659 } while (!time_after(jiffies, start_time + HZ));
660
661 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
662 netdev_warn(dev->net, "EEPROM read operation timeout");
663 return -EIO;
664 }
665
666 return 0;
667}
668
669static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
670{
671 unsigned long start_time = jiffies;
672 u32 val;
673 int ret;
674
675 do {
676 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
677 if (unlikely(ret < 0))
678 return -EIO;
679
680 if (!(val & E2P_CMD_EPC_BUSY_))
681 return 0;
682
683 usleep_range(40, 100);
684 } while (!time_after(jiffies, start_time + HZ));
685
686 netdev_warn(dev->net, "EEPROM is busy");
687 return -EIO;
688}
689
690static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
691 u32 length, u8 *data)
692{
693 u32 val;
a0db7d10 694 u32 saved;
55d7de9d 695 int i, ret;
a0db7d10
WH
696 int retval;
697
698 /* depends on chip, some EEPROM pins are muxed with LED function.
699 * disable & restore LED function to access EEPROM.
700 */
701 ret = lan78xx_read_reg(dev, HW_CFG, &val);
702 saved = val;
87177ba6 703 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
704 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
705 ret = lan78xx_write_reg(dev, HW_CFG, val);
706 }
55d7de9d 707
a0db7d10
WH
708 retval = lan78xx_eeprom_confirm_not_busy(dev);
709 if (retval)
710 return retval;
55d7de9d
WH
711
712 for (i = 0; i < length; i++) {
713 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
714 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
715 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
716 if (unlikely(ret < 0)) {
717 retval = -EIO;
718 goto exit;
719 }
55d7de9d 720
a0db7d10
WH
721 retval = lan78xx_wait_eeprom(dev);
722 if (retval < 0)
723 goto exit;
55d7de9d
WH
724
725 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
726 if (unlikely(ret < 0)) {
727 retval = -EIO;
728 goto exit;
729 }
55d7de9d
WH
730
731 data[i] = val & 0xFF;
732 offset++;
733 }
734
a0db7d10
WH
735 retval = 0;
736exit:
87177ba6 737 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
738 ret = lan78xx_write_reg(dev, HW_CFG, saved);
739
740 return retval;
55d7de9d
WH
741}
742
743static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
744 u32 length, u8 *data)
745{
746 u8 sig;
747 int ret;
748
749 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
750 if ((ret == 0) && (sig == EEPROM_INDICATOR))
751 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
752 else
753 ret = -EINVAL;
754
755 return ret;
756}
757
758static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
759 u32 length, u8 *data)
760{
761 u32 val;
a0db7d10 762 u32 saved;
55d7de9d 763 int i, ret;
a0db7d10
WH
764 int retval;
765
766 /* depends on chip, some EEPROM pins are muxed with LED function.
767 * disable & restore LED function to access EEPROM.
768 */
769 ret = lan78xx_read_reg(dev, HW_CFG, &val);
770 saved = val;
87177ba6 771 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
772 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
773 ret = lan78xx_write_reg(dev, HW_CFG, val);
774 }
55d7de9d 775
a0db7d10
WH
776 retval = lan78xx_eeprom_confirm_not_busy(dev);
777 if (retval)
778 goto exit;
55d7de9d
WH
779
780 /* Issue write/erase enable command */
781 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
782 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
783 if (unlikely(ret < 0)) {
784 retval = -EIO;
785 goto exit;
786 }
55d7de9d 787
a0db7d10
WH
788 retval = lan78xx_wait_eeprom(dev);
789 if (retval < 0)
790 goto exit;
55d7de9d
WH
791
792 for (i = 0; i < length; i++) {
793 /* Fill data register */
794 val = data[i];
795 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
796 if (ret < 0) {
797 retval = -EIO;
798 goto exit;
799 }
55d7de9d
WH
800
801 /* Send "write" command */
802 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
803 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
804 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
805 if (ret < 0) {
806 retval = -EIO;
807 goto exit;
808 }
55d7de9d 809
a0db7d10
WH
810 retval = lan78xx_wait_eeprom(dev);
811 if (retval < 0)
812 goto exit;
55d7de9d
WH
813
814 offset++;
815 }
816
a0db7d10
WH
817 retval = 0;
818exit:
87177ba6 819 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
820 ret = lan78xx_write_reg(dev, HW_CFG, saved);
821
822 return retval;
55d7de9d
WH
823}
824
825static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
826 u32 length, u8 *data)
827{
828 int i;
829 int ret;
830 u32 buf;
831 unsigned long timeout;
832
833 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
834
835 if (buf & OTP_PWR_DN_PWRDN_N_) {
836 /* clear it and wait to be cleared */
837 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
838
839 timeout = jiffies + HZ;
840 do {
841 usleep_range(1, 10);
842 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
843 if (time_after(jiffies, timeout)) {
844 netdev_warn(dev->net,
845 "timeout on OTP_PWR_DN");
846 return -EIO;
847 }
848 } while (buf & OTP_PWR_DN_PWRDN_N_);
849 }
850
851 for (i = 0; i < length; i++) {
852 ret = lan78xx_write_reg(dev, OTP_ADDR1,
853 ((offset + i) >> 8) & OTP_ADDR1_15_11);
854 ret = lan78xx_write_reg(dev, OTP_ADDR2,
855 ((offset + i) & OTP_ADDR2_10_3));
856
857 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
858 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
859
860 timeout = jiffies + HZ;
861 do {
862 udelay(1);
863 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
864 if (time_after(jiffies, timeout)) {
865 netdev_warn(dev->net,
866 "timeout on OTP_STATUS");
867 return -EIO;
868 }
869 } while (buf & OTP_STATUS_BUSY_);
870
871 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
872
873 data[i] = (u8)(buf & 0xFF);
874 }
875
876 return 0;
877}
878
9fb6066d
WH
879static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
880 u32 length, u8 *data)
881{
882 int i;
883 int ret;
884 u32 buf;
885 unsigned long timeout;
886
887 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
888
889 if (buf & OTP_PWR_DN_PWRDN_N_) {
890 /* clear it and wait to be cleared */
891 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
892
893 timeout = jiffies + HZ;
894 do {
895 udelay(1);
896 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
897 if (time_after(jiffies, timeout)) {
898 netdev_warn(dev->net,
899 "timeout on OTP_PWR_DN completion");
900 return -EIO;
901 }
902 } while (buf & OTP_PWR_DN_PWRDN_N_);
903 }
904
905 /* set to BYTE program mode */
906 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
907
908 for (i = 0; i < length; i++) {
909 ret = lan78xx_write_reg(dev, OTP_ADDR1,
910 ((offset + i) >> 8) & OTP_ADDR1_15_11);
911 ret = lan78xx_write_reg(dev, OTP_ADDR2,
912 ((offset + i) & OTP_ADDR2_10_3));
913 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
914 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
915 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
916
917 timeout = jiffies + HZ;
918 do {
919 udelay(1);
920 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
921 if (time_after(jiffies, timeout)) {
922 netdev_warn(dev->net,
923 "Timeout on OTP_STATUS completion");
924 return -EIO;
925 }
926 } while (buf & OTP_STATUS_BUSY_);
927 }
928
929 return 0;
930}
931
55d7de9d
WH
932static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
933 u32 length, u8 *data)
934{
935 u8 sig;
936 int ret;
937
938 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
939
940 if (ret == 0) {
94e7c844 941 if (sig == OTP_INDICATOR_2)
55d7de9d 942 offset += 0x100;
94e7c844 943 else if (sig != OTP_INDICATOR_1)
55d7de9d 944 ret = -EINVAL;
4bfc3380
PE
945 if (!ret)
946 ret = lan78xx_read_raw_otp(dev, offset, length, data);
55d7de9d
WH
947 }
948
949 return ret;
950}
951
952static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
953{
954 int i, ret;
955
956 for (i = 0; i < 100; i++) {
957 u32 dp_sel;
958
959 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
960 if (unlikely(ret < 0))
961 return -EIO;
962
963 if (dp_sel & DP_SEL_DPRDY_)
964 return 0;
965
966 usleep_range(40, 100);
967 }
968
969 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
970
971 return -EIO;
972}
973
974static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
975 u32 addr, u32 length, u32 *buf)
976{
977 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
978 u32 dp_sel;
979 int i, ret;
980
981 if (usb_autopm_get_interface(dev->intf) < 0)
982 return 0;
983
984 mutex_lock(&pdata->dataport_mutex);
985
986 ret = lan78xx_dataport_wait_not_busy(dev);
987 if (ret < 0)
988 goto done;
989
990 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
991
992 dp_sel &= ~DP_SEL_RSEL_MASK_;
993 dp_sel |= ram_select;
994 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
995
996 for (i = 0; i < length; i++) {
997 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
998
999 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1000
1001 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1002
1003 ret = lan78xx_dataport_wait_not_busy(dev);
1004 if (ret < 0)
1005 goto done;
1006 }
1007
1008done:
1009 mutex_unlock(&pdata->dataport_mutex);
1010 usb_autopm_put_interface(dev->intf);
1011
1012 return ret;
1013}
1014
1015static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1016 int index, u8 addr[ETH_ALEN])
1017{
51ceac9f 1018 u32 temp;
55d7de9d
WH
1019
1020 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1021 temp = addr[3];
1022 temp = addr[2] | (temp << 8);
1023 temp = addr[1] | (temp << 8);
1024 temp = addr[0] | (temp << 8);
1025 pdata->pfilter_table[index][1] = temp;
1026 temp = addr[5];
1027 temp = addr[4] | (temp << 8);
1028 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1029 pdata->pfilter_table[index][0] = temp;
1030 }
1031}
1032
1033/* returns hash bit number for given MAC address */
1034static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1035{
1036 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1037}
1038
1039static void lan78xx_deferred_multicast_write(struct work_struct *param)
1040{
1041 struct lan78xx_priv *pdata =
1042 container_of(param, struct lan78xx_priv, set_multicast);
1043 struct lan78xx_net *dev = pdata->dev;
1044 int i;
1045 int ret;
1046
1047 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1048 pdata->rfe_ctl);
1049
1050 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1051 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1052
1053 for (i = 1; i < NUM_OF_MAF; i++) {
1054 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1055 ret = lan78xx_write_reg(dev, MAF_LO(i),
1056 pdata->pfilter_table[i][1]);
1057 ret = lan78xx_write_reg(dev, MAF_HI(i),
1058 pdata->pfilter_table[i][0]);
1059 }
1060
1061 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1062}
1063
1064static void lan78xx_set_multicast(struct net_device *netdev)
1065{
1066 struct lan78xx_net *dev = netdev_priv(netdev);
1067 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1068 unsigned long flags;
1069 int i;
1070
1071 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1072
1073 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1074 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1075
1076 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1077 pdata->mchash_table[i] = 0;
1078 /* pfilter_table[0] has own HW address */
1079 for (i = 1; i < NUM_OF_MAF; i++) {
1080 pdata->pfilter_table[i][0] =
1081 pdata->pfilter_table[i][1] = 0;
1082 }
1083
1084 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1085
1086 if (dev->net->flags & IFF_PROMISC) {
1087 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1088 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1089 } else {
1090 if (dev->net->flags & IFF_ALLMULTI) {
1091 netif_dbg(dev, drv, dev->net,
1092 "receive all multicast enabled");
1093 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1094 }
1095 }
1096
1097 if (netdev_mc_count(dev->net)) {
1098 struct netdev_hw_addr *ha;
1099 int i;
1100
1101 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1102
1103 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1104
1105 i = 1;
1106 netdev_for_each_mc_addr(ha, netdev) {
1107 /* set first 32 into Perfect Filter */
1108 if (i < 33) {
1109 lan78xx_set_addr_filter(pdata, i, ha->addr);
1110 } else {
1111 u32 bitnum = lan78xx_hash(ha->addr);
1112
1113 pdata->mchash_table[bitnum / 32] |=
1114 (1 << (bitnum % 32));
1115 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1116 }
1117 i++;
1118 }
1119 }
1120
1121 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1122
1123 /* defer register writes to a sleepable context */
1124 schedule_work(&pdata->set_multicast);
1125}
1126
1127static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1128 u16 lcladv, u16 rmtadv)
1129{
1130 u32 flow = 0, fct_flow = 0;
1131 int ret;
349e0c5e 1132 u8 cap;
55d7de9d 1133
349e0c5e
WH
1134 if (dev->fc_autoneg)
1135 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1136 else
1137 cap = dev->fc_request_control;
55d7de9d
WH
1138
1139 if (cap & FLOW_CTRL_TX)
349e0c5e 1140 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
55d7de9d
WH
1141
1142 if (cap & FLOW_CTRL_RX)
1143 flow |= FLOW_CR_RX_FCEN_;
1144
1145 if (dev->udev->speed == USB_SPEED_SUPER)
1146 fct_flow = 0x817;
1147 else if (dev->udev->speed == USB_SPEED_HIGH)
1148 fct_flow = 0x211;
1149
1150 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1151 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1152 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1153
1154 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1155
1156 /* threshold value should be set before enabling flow */
1157 ret = lan78xx_write_reg(dev, FLOW, flow);
1158
1159 return 0;
1160}
1161
1162static int lan78xx_link_reset(struct lan78xx_net *dev)
1163{
ce85e13a 1164 struct phy_device *phydev = dev->net->phydev;
6e76510e 1165 struct ethtool_link_ksettings ecmd;
99c79ece 1166 int ladv, radv, ret;
55d7de9d
WH
1167 u32 buf;
1168
55d7de9d
WH
1169 /* clear LAN78xx interrupt status */
1170 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1171 if (unlikely(ret < 0))
1172 return -EIO;
1173
ce85e13a
WH
1174 phy_read_status(phydev);
1175
1176 if (!phydev->link && dev->link_on) {
55d7de9d 1177 dev->link_on = false;
55d7de9d
WH
1178
1179 /* reset MAC */
1180 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1181 if (unlikely(ret < 0))
1182 return -EIO;
1183 buf |= MAC_CR_RST_;
1184 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1185 if (unlikely(ret < 0))
1186 return -EIO;
e4953910 1187
20ff5565 1188 del_timer(&dev->stat_monitor);
ce85e13a 1189 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
1190 dev->link_on = true;
1191
6e76510e 1192 phy_ethtool_ksettings_get(phydev, &ecmd);
55d7de9d 1193
55d7de9d 1194 if (dev->udev->speed == USB_SPEED_SUPER) {
6e76510e 1195 if (ecmd.base.speed == 1000) {
55d7de9d
WH
1196 /* disable U2 */
1197 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1198 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1199 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1200 /* enable U1 */
1201 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1202 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1203 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1204 } else {
1205 /* enable U1 & U2 */
1206 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1207 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1208 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1209 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1210 }
1211 }
1212
ce85e13a 1213 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
1214 if (ladv < 0)
1215 return ladv;
55d7de9d 1216
ce85e13a 1217 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
1218 if (radv < 0)
1219 return radv;
55d7de9d
WH
1220
1221 netif_dbg(dev, link, dev->net,
1222 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
6e76510e 1223 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
55d7de9d 1224
6e76510e
PR
1225 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1226 radv);
20ff5565
WH
1227
1228 if (!timer_pending(&dev->stat_monitor)) {
1229 dev->delta = 1;
1230 mod_timer(&dev->stat_monitor,
1231 jiffies + STAT_UPDATE_TIMER);
1232 }
136f55f6
SW
1233
1234 tasklet_schedule(&dev->bh);
55d7de9d
WH
1235 }
1236
1237 return ret;
1238}
1239
1240/* some work can't be done in tasklets, so we use keventd
1241 *
1242 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1243 * but tasklet_schedule() doesn't. hope the failure is rare.
1244 */
e0c79ff6 1245static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
55d7de9d
WH
1246{
1247 set_bit(work, &dev->flags);
1248 if (!schedule_delayed_work(&dev->wq, 0))
1249 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1250}
1251
1252static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1253{
1254 u32 intdata;
1255
1256 if (urb->actual_length != 4) {
1257 netdev_warn(dev->net,
1258 "unexpected urb length %d", urb->actual_length);
1259 return;
1260 }
1261
bb448f8a 1262 intdata = get_unaligned_le32(urb->transfer_buffer);
55d7de9d
WH
1263
1264 if (intdata & INT_ENP_PHY_INT) {
1265 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
cc89c323
WH
1266 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1267
0a29ac5b
DW
1268 if (dev->domain_data.phyirq > 0) {
1269 local_irq_disable();
cc89c323 1270 generic_handle_irq(dev->domain_data.phyirq);
0a29ac5b
DW
1271 local_irq_enable();
1272 }
55d7de9d
WH
1273 } else
1274 netdev_warn(dev->net,
1275 "unexpected interrupt: 0x%08x\n", intdata);
1276}
1277
1278static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1279{
1280 return MAX_EEPROM_SIZE;
1281}
1282
1283static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1284 struct ethtool_eeprom *ee, u8 *data)
1285{
1286 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1287 int ret;
1288
1289 ret = usb_autopm_get_interface(dev->intf);
1290 if (ret)
1291 return ret;
55d7de9d
WH
1292
1293 ee->magic = LAN78XX_EEPROM_MAGIC;
1294
8a7ffeb7
NS
1295 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1296
1297 usb_autopm_put_interface(dev->intf);
1298
1299 return ret;
55d7de9d
WH
1300}
1301
1302static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1303 struct ethtool_eeprom *ee, u8 *data)
1304{
1305 struct lan78xx_net *dev = netdev_priv(netdev);
8a7ffeb7
NS
1306 int ret;
1307
1308 ret = usb_autopm_get_interface(dev->intf);
1309 if (ret)
1310 return ret;
55d7de9d 1311
c0776822
NS
1312 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1313 * to load data from EEPROM
1314 */
1315 if (ee->magic == LAN78XX_EEPROM_MAGIC)
8a7ffeb7 1316 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
55d7de9d
WH
1317 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1318 (ee->offset == 0) &&
1319 (ee->len == 512) &&
1320 (data[0] == OTP_INDICATOR_1))
8a7ffeb7 1321 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d 1322
8a7ffeb7
NS
1323 usb_autopm_put_interface(dev->intf);
1324
1325 return ret;
55d7de9d
WH
1326}
1327
1328static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1329 u8 *data)
1330{
1331 if (stringset == ETH_SS_STATS)
1332 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1333}
1334
1335static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1336{
1337 if (sset == ETH_SS_STATS)
1338 return ARRAY_SIZE(lan78xx_gstrings);
1339 else
1340 return -EOPNOTSUPP;
1341}
1342
1343static void lan78xx_get_stats(struct net_device *netdev,
1344 struct ethtool_stats *stats, u64 *data)
1345{
1346 struct lan78xx_net *dev = netdev_priv(netdev);
55d7de9d 1347
20ff5565 1348 lan78xx_update_stats(dev);
55d7de9d 1349
20ff5565
WH
1350 mutex_lock(&dev->stats.access_lock);
1351 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1352 mutex_unlock(&dev->stats.access_lock);
55d7de9d
WH
1353}
1354
1355static void lan78xx_get_wol(struct net_device *netdev,
1356 struct ethtool_wolinfo *wol)
1357{
1358 struct lan78xx_net *dev = netdev_priv(netdev);
1359 int ret;
1360 u32 buf;
1361 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1362
1363 if (usb_autopm_get_interface(dev->intf) < 0)
1364 return;
1365
1366 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1367 if (unlikely(ret < 0)) {
1368 wol->supported = 0;
1369 wol->wolopts = 0;
1370 } else {
1371 if (buf & USB_CFG_RMT_WKP_) {
1372 wol->supported = WAKE_ALL;
1373 wol->wolopts = pdata->wol;
1374 } else {
1375 wol->supported = 0;
1376 wol->wolopts = 0;
1377 }
1378 }
1379
1380 usb_autopm_put_interface(dev->intf);
1381}
1382
1383static int lan78xx_set_wol(struct net_device *netdev,
1384 struct ethtool_wolinfo *wol)
1385{
1386 struct lan78xx_net *dev = netdev_priv(netdev);
1387 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1388 int ret;
1389
1390 ret = usb_autopm_get_interface(dev->intf);
1391 if (ret < 0)
1392 return ret;
1393
eb9ad088
FF
1394 if (wol->wolopts & ~WAKE_ALL)
1395 return -EINVAL;
1396
1397 pdata->wol = wol->wolopts;
55d7de9d
WH
1398
1399 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1400
ce85e13a
WH
1401 phy_ethtool_set_wol(netdev->phydev, wol);
1402
55d7de9d
WH
1403 usb_autopm_put_interface(dev->intf);
1404
1405 return ret;
1406}
1407
1408static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1409{
1410 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1411 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1412 int ret;
1413 u32 buf;
55d7de9d
WH
1414
1415 ret = usb_autopm_get_interface(dev->intf);
1416 if (ret < 0)
1417 return ret;
1418
ce85e13a
WH
1419 ret = phy_ethtool_get_eee(phydev, edata);
1420 if (ret < 0)
1421 goto exit;
1422
55d7de9d
WH
1423 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1424 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1425 edata->eee_enabled = true;
ce85e13a
WH
1426 edata->eee_active = !!(edata->advertised &
1427 edata->lp_advertised);
55d7de9d
WH
1428 edata->tx_lpi_enabled = true;
1429 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1430 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1431 edata->tx_lpi_timer = buf;
1432 } else {
55d7de9d
WH
1433 edata->eee_enabled = false;
1434 edata->eee_active = false;
55d7de9d
WH
1435 edata->tx_lpi_enabled = false;
1436 edata->tx_lpi_timer = 0;
1437 }
1438
ce85e13a
WH
1439 ret = 0;
1440exit:
55d7de9d
WH
1441 usb_autopm_put_interface(dev->intf);
1442
ce85e13a 1443 return ret;
55d7de9d
WH
1444}
1445
1446static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1447{
1448 struct lan78xx_net *dev = netdev_priv(net);
1449 int ret;
1450 u32 buf;
1451
1452 ret = usb_autopm_get_interface(dev->intf);
1453 if (ret < 0)
1454 return ret;
1455
1456 if (edata->eee_enabled) {
1457 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1458 buf |= MAC_CR_EEE_EN_;
1459 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1460
ce85e13a
WH
1461 phy_ethtool_set_eee(net->phydev, edata);
1462
1463 buf = (u32)edata->tx_lpi_timer;
1464 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1465 } else {
1466 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1467 buf &= ~MAC_CR_EEE_EN_;
1468 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1469 }
1470
1471 usb_autopm_put_interface(dev->intf);
1472
1473 return 0;
1474}
1475
1476static u32 lan78xx_get_link(struct net_device *net)
1477{
ce85e13a 1478 phy_read_status(net->phydev);
55d7de9d 1479
ce85e13a 1480 return net->phydev->link;
55d7de9d
WH
1481}
1482
55d7de9d
WH
1483static void lan78xx_get_drvinfo(struct net_device *net,
1484 struct ethtool_drvinfo *info)
1485{
1486 struct lan78xx_net *dev = netdev_priv(net);
1487
1488 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
55d7de9d
WH
1489 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1490}
1491
1492static u32 lan78xx_get_msglevel(struct net_device *net)
1493{
1494 struct lan78xx_net *dev = netdev_priv(net);
1495
1496 return dev->msg_enable;
1497}
1498
1499static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1500{
1501 struct lan78xx_net *dev = netdev_priv(net);
1502
1503 dev->msg_enable = level;
1504}
1505
6e76510e
PR
1506static int lan78xx_get_link_ksettings(struct net_device *net,
1507 struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1508{
1509 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1510 struct phy_device *phydev = net->phydev;
55d7de9d 1511 int ret;
55d7de9d 1512
55d7de9d
WH
1513 ret = usb_autopm_get_interface(dev->intf);
1514 if (ret < 0)
1515 return ret;
1516
5514174f 1517 phy_ethtool_ksettings_get(phydev, cmd);
55d7de9d 1518
55d7de9d
WH
1519 usb_autopm_put_interface(dev->intf);
1520
1521 return ret;
1522}
1523
6e76510e
PR
1524static int lan78xx_set_link_ksettings(struct net_device *net,
1525 const struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1526{
1527 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1528 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1529 int ret = 0;
1530 int temp;
1531
55d7de9d
WH
1532 ret = usb_autopm_get_interface(dev->intf);
1533 if (ret < 0)
1534 return ret;
1535
55d7de9d 1536 /* change speed & duplex */
6e76510e 1537 ret = phy_ethtool_ksettings_set(phydev, cmd);
55d7de9d 1538
6e76510e 1539 if (!cmd->base.autoneg) {
55d7de9d 1540 /* force link down */
ce85e13a
WH
1541 temp = phy_read(phydev, MII_BMCR);
1542 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1543 mdelay(1);
ce85e13a 1544 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1545 }
1546
1547 usb_autopm_put_interface(dev->intf);
1548
1549 return ret;
1550}
1551
349e0c5e
WH
1552static void lan78xx_get_pause(struct net_device *net,
1553 struct ethtool_pauseparam *pause)
1554{
1555 struct lan78xx_net *dev = netdev_priv(net);
1556 struct phy_device *phydev = net->phydev;
6e76510e 1557 struct ethtool_link_ksettings ecmd;
349e0c5e 1558
6e76510e 1559 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e
WH
1560
1561 pause->autoneg = dev->fc_autoneg;
1562
1563 if (dev->fc_request_control & FLOW_CTRL_TX)
1564 pause->tx_pause = 1;
1565
1566 if (dev->fc_request_control & FLOW_CTRL_RX)
1567 pause->rx_pause = 1;
1568}
1569
1570static int lan78xx_set_pause(struct net_device *net,
1571 struct ethtool_pauseparam *pause)
1572{
1573 struct lan78xx_net *dev = netdev_priv(net);
1574 struct phy_device *phydev = net->phydev;
6e76510e 1575 struct ethtool_link_ksettings ecmd;
349e0c5e
WH
1576 int ret;
1577
6e76510e 1578 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e 1579
6e76510e 1580 if (pause->autoneg && !ecmd.base.autoneg) {
349e0c5e
WH
1581 ret = -EINVAL;
1582 goto exit;
1583 }
1584
1585 dev->fc_request_control = 0;
1586 if (pause->rx_pause)
1587 dev->fc_request_control |= FLOW_CTRL_RX;
1588
1589 if (pause->tx_pause)
1590 dev->fc_request_control |= FLOW_CTRL_TX;
1591
6e76510e 1592 if (ecmd.base.autoneg) {
3c1bcc86 1593 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
349e0c5e 1594 u32 mii_adv;
6e76510e 1595
3c1bcc86
AL
1596 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1597 ecmd.link_modes.advertising);
1598 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1599 ecmd.link_modes.advertising);
349e0c5e 1600 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
3c1bcc86
AL
1601 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1602 linkmode_or(ecmd.link_modes.advertising, fc,
1603 ecmd.link_modes.advertising);
6e76510e
PR
1604
1605 phy_ethtool_ksettings_set(phydev, &ecmd);
349e0c5e
WH
1606 }
1607
1608 dev->fc_autoneg = pause->autoneg;
1609
1610 ret = 0;
1611exit:
1612 return ret;
1613}
1614
49621865
RC
1615static int lan78xx_get_regs_len(struct net_device *netdev)
1616{
1617 if (!netdev->phydev)
1618 return (sizeof(lan78xx_regs));
1619 else
1620 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1621}
1622
1623static void
1624lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1625 void *buf)
1626{
1627 u32 *data = buf;
1628 int i, j;
1629 struct lan78xx_net *dev = netdev_priv(netdev);
1630
1631 /* Read Device/MAC registers */
bf37afce 1632 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
49621865
RC
1633 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1634
1635 if (!netdev->phydev)
1636 return;
1637
1638 /* Read PHY registers */
1639 for (j = 0; j < 32; i++, j++)
1640 data[i] = phy_read(netdev->phydev, j);
1641}
1642
55d7de9d
WH
1643static const struct ethtool_ops lan78xx_ethtool_ops = {
1644 .get_link = lan78xx_get_link,
860ce4b4 1645 .nway_reset = phy_ethtool_nway_reset,
55d7de9d
WH
1646 .get_drvinfo = lan78xx_get_drvinfo,
1647 .get_msglevel = lan78xx_get_msglevel,
1648 .set_msglevel = lan78xx_set_msglevel,
55d7de9d
WH
1649 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1650 .get_eeprom = lan78xx_ethtool_get_eeprom,
1651 .set_eeprom = lan78xx_ethtool_set_eeprom,
1652 .get_ethtool_stats = lan78xx_get_stats,
1653 .get_sset_count = lan78xx_get_sset_count,
1654 .get_strings = lan78xx_get_strings,
1655 .get_wol = lan78xx_get_wol,
1656 .set_wol = lan78xx_set_wol,
1657 .get_eee = lan78xx_get_eee,
1658 .set_eee = lan78xx_set_eee,
349e0c5e
WH
1659 .get_pauseparam = lan78xx_get_pause,
1660 .set_pauseparam = lan78xx_set_pause,
6e76510e
PR
1661 .get_link_ksettings = lan78xx_get_link_ksettings,
1662 .set_link_ksettings = lan78xx_set_link_ksettings,
49621865
RC
1663 .get_regs_len = lan78xx_get_regs_len,
1664 .get_regs = lan78xx_get_regs,
55d7de9d
WH
1665};
1666
55d7de9d
WH
1667static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1668{
1669 u32 addr_lo, addr_hi;
1670 int ret;
1671 u8 addr[6];
1672
1673 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1674 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1675
1676 addr[0] = addr_lo & 0xFF;
1677 addr[1] = (addr_lo >> 8) & 0xFF;
1678 addr[2] = (addr_lo >> 16) & 0xFF;
1679 addr[3] = (addr_lo >> 24) & 0xFF;
1680 addr[4] = addr_hi & 0xFF;
1681 addr[5] = (addr_hi >> 8) & 0xFF;
1682
1683 if (!is_valid_ether_addr(addr)) {
760db29b
PE
1684 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1685 /* valid address present in Device Tree */
1686 netif_dbg(dev, ifup, dev->net,
1687 "MAC address read from Device Tree");
1688 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1689 ETH_ALEN, addr) == 0) ||
1690 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1691 ETH_ALEN, addr) == 0)) &&
1692 is_valid_ether_addr(addr)) {
1693 /* eeprom values are valid so use them */
1694 netif_dbg(dev, ifup, dev->net,
1695 "MAC address read from EEPROM");
55d7de9d
WH
1696 } else {
1697 /* generate random MAC */
6c1f0a1f 1698 eth_random_addr(addr);
55d7de9d
WH
1699 netif_dbg(dev, ifup, dev->net,
1700 "MAC address set to random addr");
1701 }
760db29b
PE
1702
1703 addr_lo = addr[0] | (addr[1] << 8) |
1704 (addr[2] << 16) | (addr[3] << 24);
1705 addr_hi = addr[4] | (addr[5] << 8);
1706
1707 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1708 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
55d7de9d
WH
1709 }
1710
1711 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1712 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1713
1714 ether_addr_copy(dev->net->dev_addr, addr);
1715}
1716
ce85e13a
WH
1717/* MDIO read and write wrappers for phylib */
1718static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1719{
1720 struct lan78xx_net *dev = bus->priv;
1721 u32 val, addr;
1722 int ret;
1723
1724 ret = usb_autopm_get_interface(dev->intf);
1725 if (ret < 0)
1726 return ret;
1727
1728 mutex_lock(&dev->phy_mutex);
1729
1730 /* confirm MII not busy */
1731 ret = lan78xx_phy_wait_not_busy(dev);
1732 if (ret < 0)
1733 goto done;
1734
1735 /* set the address, index & direction (read from PHY) */
1736 addr = mii_access(phy_id, idx, MII_READ);
1737 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1738
1739 ret = lan78xx_phy_wait_not_busy(dev);
1740 if (ret < 0)
1741 goto done;
1742
1743 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1744
1745 ret = (int)(val & 0xFFFF);
1746
1747done:
1748 mutex_unlock(&dev->phy_mutex);
1749 usb_autopm_put_interface(dev->intf);
02dc1f3d 1750
ce85e13a
WH
1751 return ret;
1752}
1753
1754static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1755 u16 regval)
1756{
1757 struct lan78xx_net *dev = bus->priv;
1758 u32 val, addr;
1759 int ret;
1760
1761 ret = usb_autopm_get_interface(dev->intf);
1762 if (ret < 0)
1763 return ret;
1764
1765 mutex_lock(&dev->phy_mutex);
1766
1767 /* confirm MII not busy */
1768 ret = lan78xx_phy_wait_not_busy(dev);
1769 if (ret < 0)
1770 goto done;
1771
1772 val = (u32)regval;
1773 ret = lan78xx_write_reg(dev, MII_DATA, val);
1774
1775 /* set the address, index & direction (write to PHY) */
1776 addr = mii_access(phy_id, idx, MII_WRITE);
1777 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1778
1779 ret = lan78xx_phy_wait_not_busy(dev);
1780 if (ret < 0)
1781 goto done;
1782
1783done:
1784 mutex_unlock(&dev->phy_mutex);
1785 usb_autopm_put_interface(dev->intf);
1786 return 0;
1787}
1788
1789static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1790{
1827b067 1791 struct device_node *node;
ce85e13a 1792 int ret;
ce85e13a
WH
1793
1794 dev->mdiobus = mdiobus_alloc();
1795 if (!dev->mdiobus) {
1796 netdev_err(dev->net, "can't allocate MDIO bus\n");
1797 return -ENOMEM;
1798 }
1799
1800 dev->mdiobus->priv = (void *)dev;
1801 dev->mdiobus->read = lan78xx_mdiobus_read;
1802 dev->mdiobus->write = lan78xx_mdiobus_write;
1803 dev->mdiobus->name = "lan78xx-mdiobus";
20032b63 1804 dev->mdiobus->parent = &dev->udev->dev;
ce85e13a
WH
1805
1806 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1807 dev->udev->bus->busnum, dev->udev->devnum);
1808
87177ba6
WH
1809 switch (dev->chipid) {
1810 case ID_REV_CHIP_ID_7800_:
1811 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1812 /* set to internal PHY id */
1813 dev->mdiobus->phy_mask = ~(1 << 1);
1814 break;
02dc1f3d
WH
1815 case ID_REV_CHIP_ID_7801_:
1816 /* scan thru PHYAD[2..0] */
1817 dev->mdiobus->phy_mask = ~(0xFF);
1818 break;
ce85e13a
WH
1819 }
1820
1827b067 1821 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
00e798c7 1822 ret = of_mdiobus_register(dev->mdiobus, node);
764ea371 1823 of_node_put(node);
ce85e13a
WH
1824 if (ret) {
1825 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1826 goto exit1;
ce85e13a
WH
1827 }
1828
1829 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1830 return 0;
ce85e13a
WH
1831exit1:
1832 mdiobus_free(dev->mdiobus);
1833 return ret;
1834}
1835
1836static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1837{
1838 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1839 mdiobus_free(dev->mdiobus);
1840}
1841
1842static void lan78xx_link_status_change(struct net_device *net)
1843{
14437e3f
WH
1844 struct phy_device *phydev = net->phydev;
1845 int ret, temp;
1846
1847 /* At forced 100 F/H mode, chip may fail to set mode correctly
1848 * when cable is switched between long(~50+m) and short one.
1849 * As workaround, set to 10 before setting to 100
1850 * at forced 100 F/H mode.
1851 */
1852 if (!phydev->autoneg && (phydev->speed == 100)) {
1853 /* disable phy interrupt */
1854 temp = phy_read(phydev, LAN88XX_INT_MASK);
1855 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1856 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1857
1858 temp = phy_read(phydev, MII_BMCR);
1859 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1860 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1861 temp |= BMCR_SPEED100;
1862 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1863
1864 /* clear pending interrupt generated while workaround */
1865 temp = phy_read(phydev, LAN88XX_INT_STS);
1866
1867 /* enable phy interrupt back */
1868 temp = phy_read(phydev, LAN88XX_INT_MASK);
1869 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1870 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1871 }
55d7de9d
WH
1872}
1873
cc89c323
WH
1874static int irq_map(struct irq_domain *d, unsigned int irq,
1875 irq_hw_number_t hwirq)
1876{
1877 struct irq_domain_data *data = d->host_data;
1878
1879 irq_set_chip_data(irq, data);
1880 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1881 irq_set_noprobe(irq);
1882
1883 return 0;
1884}
1885
1886static void irq_unmap(struct irq_domain *d, unsigned int irq)
1887{
1888 irq_set_chip_and_handler(irq, NULL, NULL);
1889 irq_set_chip_data(irq, NULL);
1890}
1891
1892static const struct irq_domain_ops chip_domain_ops = {
1893 .map = irq_map,
1894 .unmap = irq_unmap,
1895};
1896
1897static void lan78xx_irq_mask(struct irq_data *irqd)
1898{
1899 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1900
1901 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1902}
1903
1904static void lan78xx_irq_unmask(struct irq_data *irqd)
1905{
1906 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1907
1908 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1909}
1910
1911static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1912{
1913 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1914
1915 mutex_lock(&data->irq_lock);
1916}
1917
1918static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1919{
1920 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1921 struct lan78xx_net *dev =
1922 container_of(data, struct lan78xx_net, domain_data);
1923 u32 buf;
1924 int ret;
1925
1926 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1927 * are only two callbacks executed in non-atomic contex.
1928 */
1929 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1930 if (buf != data->irqenable)
1931 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1932
1933 mutex_unlock(&data->irq_lock);
1934}
1935
1936static struct irq_chip lan78xx_irqchip = {
1937 .name = "lan78xx-irqs",
1938 .irq_mask = lan78xx_irq_mask,
1939 .irq_unmask = lan78xx_irq_unmask,
1940 .irq_bus_lock = lan78xx_irq_bus_lock,
1941 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1942};
1943
1944static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1945{
1946 struct device_node *of_node;
1947 struct irq_domain *irqdomain;
1948 unsigned int irqmap = 0;
1949 u32 buf;
1950 int ret = 0;
1951
1952 of_node = dev->udev->dev.parent->of_node;
1953
1954 mutex_init(&dev->domain_data.irq_lock);
1955
1956 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1957 dev->domain_data.irqenable = buf;
1958
1959 dev->domain_data.irqchip = &lan78xx_irqchip;
1960 dev->domain_data.irq_handler = handle_simple_irq;
1961
1962 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1963 &chip_domain_ops, &dev->domain_data);
1964 if (irqdomain) {
1965 /* create mapping for PHY interrupt */
1966 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1967 if (!irqmap) {
1968 irq_domain_remove(irqdomain);
1969
1970 irqdomain = NULL;
1971 ret = -EINVAL;
1972 }
1973 } else {
1974 ret = -EINVAL;
1975 }
1976
1977 dev->domain_data.irqdomain = irqdomain;
1978 dev->domain_data.phyirq = irqmap;
1979
1980 return ret;
1981}
1982
1983static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1984{
1985 if (dev->domain_data.phyirq > 0) {
1986 irq_dispose_mapping(dev->domain_data.phyirq);
1987
1988 if (dev->domain_data.irqdomain)
1989 irq_domain_remove(dev->domain_data.irqdomain);
1990 }
1991 dev->domain_data.phyirq = 0;
1992 dev->domain_data.irqdomain = NULL;
1993}
1994
02dc1f3d
WH
1995static int lan8835_fixup(struct phy_device *phydev)
1996{
1997 int buf;
1998 int ret;
1999 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2000
2001 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
5f613677 2002 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
02dc1f3d
WH
2003 buf &= ~0x1800;
2004 buf |= 0x0800;
5f613677 2005 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
02dc1f3d
WH
2006
2007 /* RGMII MAC TXC Delay Enable */
2008 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2009 MAC_RGMII_ID_TXC_DELAY_EN_);
2010
2011 /* RGMII TX DLL Tune Adjust */
2012 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2013
2014 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2015
2016 return 1;
2017}
2018
2019static int ksz9031rnx_fixup(struct phy_device *phydev)
2020{
2021 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2022
2023 /* Micrel9301RNX PHY configuration */
2024 /* RGMII Control Signal Pad Skew */
5f613677 2025 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
02dc1f3d 2026 /* RGMII RX Data Pad Skew */
5f613677 2027 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
02dc1f3d 2028 /* RGMII RX Clock Pad Skew */
5f613677 2029 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
02dc1f3d
WH
2030
2031 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2032
2033 return 1;
2034}
2035
89b36fb5 2036static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
55d7de9d 2037{
89b36fb5 2038 u32 buf;
ce85e13a 2039 int ret;
89b36fb5
RC
2040 struct fixed_phy_status fphy_status = {
2041 .link = 1,
2042 .speed = SPEED_1000,
2043 .duplex = DUPLEX_FULL,
2044 };
3b51cc75 2045 struct phy_device *phydev;
55d7de9d 2046
ce85e13a
WH
2047 phydev = phy_find_first(dev->mdiobus);
2048 if (!phydev) {
89b36fb5 2049 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
5468e82f 2050 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
89b36fb5
RC
2051 if (IS_ERR(phydev)) {
2052 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2053 return NULL;
2054 }
2055 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2056 dev->interface = PHY_INTERFACE_MODE_RGMII;
2057 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2058 MAC_RGMII_ID_TXC_DELAY_EN_);
2059 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2060 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2061 buf |= HW_CFG_CLK125_EN_;
2062 buf |= HW_CFG_REFCLK25_EN_;
2063 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2064 } else {
02dc1f3d
WH
2065 if (!phydev->drv) {
2066 netdev_err(dev->net, "no PHY driver found\n");
89b36fb5 2067 return NULL;
02dc1f3d 2068 }
02dc1f3d 2069 dev->interface = PHY_INTERFACE_MODE_RGMII;
02dc1f3d
WH
2070 /* external PHY fixup for KSZ9031RNX */
2071 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2072 ksz9031rnx_fixup);
2073 if (ret < 0) {
7670ed7a 2074 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
89b36fb5 2075 return NULL;
02dc1f3d
WH
2076 }
2077 /* external PHY fixup for LAN8835 */
2078 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2079 lan8835_fixup);
2080 if (ret < 0) {
7670ed7a 2081 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
89b36fb5 2082 return NULL;
02dc1f3d
WH
2083 }
2084 /* add more external PHY fixup here if needed */
2085
2086 phydev->is_internal = false;
89b36fb5
RC
2087 }
2088 return phydev;
2089}
2090
2091static int lan78xx_phy_init(struct lan78xx_net *dev)
2092{
3c1bcc86 2093 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
89b36fb5
RC
2094 int ret;
2095 u32 mii_adv;
2096 struct phy_device *phydev;
2097
2098 switch (dev->chipid) {
2099 case ID_REV_CHIP_ID_7801_:
2100 phydev = lan7801_phy_init(dev);
2101 if (!phydev) {
2102 netdev_err(dev->net, "lan7801: PHY Init Failed");
2103 return -EIO;
2104 }
2105 break;
2106
2107 case ID_REV_CHIP_ID_7800_:
2108 case ID_REV_CHIP_ID_7850_:
2109 phydev = phy_find_first(dev->mdiobus);
2110 if (!phydev) {
2111 netdev_err(dev->net, "no PHY found\n");
2112 return -EIO;
2113 }
2114 phydev->is_internal = true;
2115 dev->interface = PHY_INTERFACE_MODE_GMII;
2116 break;
2117
2118 default:
2119 netdev_err(dev->net, "Unknown CHIP ID found\n");
2120 return -EIO;
02dc1f3d
WH
2121 }
2122
cc89c323
WH
2123 /* if phyirq is not set, use polling mode in phylib */
2124 if (dev->domain_data.phyirq > 0)
2125 phydev->irq = dev->domain_data.phyirq;
2126 else
2127 phydev->irq = 0;
2128 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
e4953910 2129
f6e3ef3e
WH
2130 /* set to AUTOMDIX */
2131 phydev->mdix = ETH_TP_MDI_AUTO;
2132
ce85e13a
WH
2133 ret = phy_connect_direct(dev->net, phydev,
2134 lan78xx_link_status_change,
02dc1f3d 2135 dev->interface);
ce85e13a
WH
2136 if (ret) {
2137 netdev_err(dev->net, "can't attach PHY to %s\n",
2138 dev->mdiobus->id);
89b36fb5
RC
2139 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2140 if (phy_is_pseudo_fixed_link(phydev)) {
2141 fixed_phy_unregister(phydev);
2142 } else {
2143 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2144 0xfffffff0);
2145 phy_unregister_fixup_for_uid(PHY_LAN8835,
2146 0xfffffff0);
2147 }
2148 }
ce85e13a
WH
2149 return -EIO;
2150 }
55d7de9d 2151
ce85e13a 2152 /* MAC doesn't support 1000T Half */
41124fa6 2153 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
e270b2db 2154
349e0c5e
WH
2155 /* support both flow controls */
2156 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
3c1bcc86
AL
2157 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2158 phydev->advertising);
2159 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2160 phydev->advertising);
349e0c5e 2161 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
3c1bcc86
AL
2162 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2163 linkmode_or(phydev->advertising, fc, phydev->advertising);
349e0c5e 2164
1827b067
PE
2165 if (phydev->mdio.dev.of_node) {
2166 u32 reg;
2167 int len;
2168
2169 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2170 "microchip,led-modes",
2171 sizeof(u32));
2172 if (len >= 0) {
2173 /* Ensure the appropriate LEDs are enabled */
2174 lan78xx_read_reg(dev, HW_CFG, &reg);
2175 reg &= ~(HW_CFG_LED0_EN_ |
2176 HW_CFG_LED1_EN_ |
2177 HW_CFG_LED2_EN_ |
2178 HW_CFG_LED3_EN_);
2179 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2180 (len > 1) * HW_CFG_LED1_EN_ |
2181 (len > 2) * HW_CFG_LED2_EN_ |
2182 (len > 3) * HW_CFG_LED3_EN_;
2183 lan78xx_write_reg(dev, HW_CFG, reg);
2184 }
2185 }
2186
ce85e13a
WH
2187 genphy_config_aneg(phydev);
2188
349e0c5e
WH
2189 dev->fc_autoneg = phydev->autoneg;
2190
55d7de9d
WH
2191 return 0;
2192}
2193
2194static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2195{
2196 int ret = 0;
2197 u32 buf;
2198 bool rxenabled;
2199
2200 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2201
2202 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2203
2204 if (rxenabled) {
2205 buf &= ~MAC_RX_RXEN_;
2206 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2207 }
2208
2209 /* add 4 to size for FCS */
2210 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2211 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2212
2213 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2214
2215 if (rxenabled) {
2216 buf |= MAC_RX_RXEN_;
2217 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2218 }
2219
2220 return 0;
2221}
2222
2223static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2224{
2225 struct sk_buff *skb;
2226 unsigned long flags;
2227 int count = 0;
2228
2229 spin_lock_irqsave(&q->lock, flags);
2230 while (!skb_queue_empty(q)) {
2231 struct skb_data *entry;
2232 struct urb *urb;
2233 int ret;
2234
2235 skb_queue_walk(q, skb) {
2236 entry = (struct skb_data *)skb->cb;
2237 if (entry->state != unlink_start)
2238 goto found;
2239 }
2240 break;
2241found:
2242 entry->state = unlink_start;
2243 urb = entry->urb;
2244
2245 /* Get reference count of the URB to avoid it to be
2246 * freed during usb_unlink_urb, which may trigger
2247 * use-after-free problem inside usb_unlink_urb since
2248 * usb_unlink_urb is always racing with .complete
2249 * handler(include defer_bh).
2250 */
2251 usb_get_urb(urb);
2252 spin_unlock_irqrestore(&q->lock, flags);
2253 /* during some PM-driven resume scenarios,
2254 * these (async) unlinks complete immediately
2255 */
2256 ret = usb_unlink_urb(urb);
2257 if (ret != -EINPROGRESS && ret != 0)
2258 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2259 else
2260 count++;
2261 usb_put_urb(urb);
2262 spin_lock_irqsave(&q->lock, flags);
2263 }
2264 spin_unlock_irqrestore(&q->lock, flags);
2265 return count;
2266}
2267
2268static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2269{
2270 struct lan78xx_net *dev = netdev_priv(netdev);
2271 int ll_mtu = new_mtu + netdev->hard_header_len;
2272 int old_hard_mtu = dev->hard_mtu;
2273 int old_rx_urb_size = dev->rx_urb_size;
2274 int ret;
2275
55d7de9d
WH
2276 /* no second zero-length packet read wanted after mtu-sized packets */
2277 if ((ll_mtu % dev->maxpacket) == 0)
2278 return -EDOM;
2279
2259b7a6 2280 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
55d7de9d
WH
2281
2282 netdev->mtu = new_mtu;
2283
2284 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2285 if (dev->rx_urb_size == old_hard_mtu) {
2286 dev->rx_urb_size = dev->hard_mtu;
2287 if (dev->rx_urb_size > old_rx_urb_size) {
2288 if (netif_running(dev->net)) {
2289 unlink_urbs(dev, &dev->rxq);
2290 tasklet_schedule(&dev->bh);
2291 }
2292 }
2293 }
2294
2295 return 0;
2296}
2297
e0c79ff6 2298static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
55d7de9d
WH
2299{
2300 struct lan78xx_net *dev = netdev_priv(netdev);
2301 struct sockaddr *addr = p;
2302 u32 addr_lo, addr_hi;
2303 int ret;
2304
2305 if (netif_running(netdev))
2306 return -EBUSY;
2307
2308 if (!is_valid_ether_addr(addr->sa_data))
2309 return -EADDRNOTAVAIL;
2310
2311 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2312
2313 addr_lo = netdev->dev_addr[0] |
2314 netdev->dev_addr[1] << 8 |
2315 netdev->dev_addr[2] << 16 |
2316 netdev->dev_addr[3] << 24;
2317 addr_hi = netdev->dev_addr[4] |
2318 netdev->dev_addr[5] << 8;
2319
2320 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2321 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2322
15515aaa
JM
2323 /* Added to support MAC address changes */
2324 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2325 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2326
55d7de9d
WH
2327 return 0;
2328}
2329
2330/* Enable or disable Rx checksum offload engine */
2331static int lan78xx_set_features(struct net_device *netdev,
2332 netdev_features_t features)
2333{
2334 struct lan78xx_net *dev = netdev_priv(netdev);
2335 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2336 unsigned long flags;
2337 int ret;
2338
2339 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2340
2341 if (features & NETIF_F_RXCSUM) {
2342 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2343 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2344 } else {
2345 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2346 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2347 }
2348
2349 if (features & NETIF_F_HW_VLAN_CTAG_RX)
ec21ecf0
DS
2350 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2351 else
2352 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2353
4a27327b 2354 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
55d7de9d
WH
2355 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2356 else
2357 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2358
2359 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2360
2361 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2362
2363 return 0;
2364}
2365
2366static void lan78xx_deferred_vlan_write(struct work_struct *param)
2367{
2368 struct lan78xx_priv *pdata =
2369 container_of(param, struct lan78xx_priv, set_vlan);
2370 struct lan78xx_net *dev = pdata->dev;
2371
2372 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2373 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2374}
2375
2376static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2377 __be16 proto, u16 vid)
2378{
2379 struct lan78xx_net *dev = netdev_priv(netdev);
2380 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2381 u16 vid_bit_index;
2382 u16 vid_dword_index;
2383
2384 vid_dword_index = (vid >> 5) & 0x7F;
2385 vid_bit_index = vid & 0x1F;
2386
2387 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2388
2389 /* defer register writes to a sleepable context */
2390 schedule_work(&pdata->set_vlan);
2391
2392 return 0;
2393}
2394
2395static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2396 __be16 proto, u16 vid)
2397{
2398 struct lan78xx_net *dev = netdev_priv(netdev);
2399 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2400 u16 vid_bit_index;
2401 u16 vid_dword_index;
2402
2403 vid_dword_index = (vid >> 5) & 0x7F;
2404 vid_bit_index = vid & 0x1F;
2405
2406 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2407
2408 /* defer register writes to a sleepable context */
2409 schedule_work(&pdata->set_vlan);
2410
2411 return 0;
2412}
2413
2414static void lan78xx_init_ltm(struct lan78xx_net *dev)
2415{
2416 int ret;
2417 u32 buf;
2418 u32 regs[6] = { 0 };
2419
2420 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2421 if (buf & USB_CFG1_LTM_ENABLE_) {
2422 u8 temp[2];
2423 /* Get values from EEPROM first */
2424 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2425 if (temp[0] == 24) {
2426 ret = lan78xx_read_raw_eeprom(dev,
2427 temp[1] * 2,
2428 24,
2429 (u8 *)regs);
2430 if (ret < 0)
2431 return;
2432 }
2433 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2434 if (temp[0] == 24) {
2435 ret = lan78xx_read_raw_otp(dev,
2436 temp[1] * 2,
2437 24,
2438 (u8 *)regs);
2439 if (ret < 0)
2440 return;
2441 }
2442 }
2443 }
2444
2445 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2446 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2447 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2448 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2449 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2450 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2451}
2452
2453static int lan78xx_reset(struct lan78xx_net *dev)
2454{
2455 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2456 u32 buf;
2457 int ret = 0;
2458 unsigned long timeout;
e69647a1 2459 u8 sig;
55d7de9d
WH
2460
2461 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2462 buf |= HW_CFG_LRST_;
2463 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2464
2465 timeout = jiffies + HZ;
2466 do {
2467 mdelay(1);
2468 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2469 if (time_after(jiffies, timeout)) {
2470 netdev_warn(dev->net,
2471 "timeout on completion of LiteReset");
2472 return -EIO;
2473 }
2474 } while (buf & HW_CFG_LRST_);
2475
2476 lan78xx_init_mac_address(dev);
2477
ce85e13a
WH
2478 /* save DEVID for later usage */
2479 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
2480 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2481 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 2482
55d7de9d
WH
2483 /* Respond to the IN token with a NAK */
2484 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2485 buf |= USB_CFG_BIR_;
2486 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2487
2488 /* Init LTM */
2489 lan78xx_init_ltm(dev);
2490
55d7de9d
WH
2491 if (dev->udev->speed == USB_SPEED_SUPER) {
2492 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2493 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2494 dev->rx_qlen = 4;
2495 dev->tx_qlen = 4;
2496 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2497 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2498 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2499 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2500 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2501 } else {
2502 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2503 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2504 dev->rx_qlen = 4;
a5b1379a 2505 dev->tx_qlen = 4;
55d7de9d
WH
2506 }
2507
2508 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2509 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2510
2511 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2512 buf |= HW_CFG_MEF_;
2513 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2514
2515 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2516 buf |= USB_CFG_BCE_;
2517 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2518
2519 /* set FIFO sizes */
2520 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2521 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2522
2523 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2524 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2525
2526 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2527 ret = lan78xx_write_reg(dev, FLOW, 0);
2528 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2529
2530 /* Don't need rfe_ctl_lock during initialisation */
2531 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2532 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2533 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2534
2535 /* Enable or disable checksum offload engines */
2536 lan78xx_set_features(dev->net, dev->net->features);
2537
2538 lan78xx_set_multicast(dev->net);
2539
2540 /* reset PHY */
2541 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2542 buf |= PMT_CTL_PHY_RST_;
2543 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2544
2545 timeout = jiffies + HZ;
2546 do {
2547 mdelay(1);
2548 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2549 if (time_after(jiffies, timeout)) {
2550 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2551 return -EIO;
2552 }
6c595b03 2553 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 2554
55d7de9d 2555 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
02dc1f3d
WH
2556 /* LAN7801 only has RGMII mode */
2557 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2558 buf &= ~MAC_CR_GMII_EN_;
e69647a1
RC
2559
2560 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2561 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2562 if (!ret && sig != EEPROM_INDICATOR) {
2563 /* Implies there is no external eeprom. Set mac speed */
2564 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2565 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2566 }
2567 }
55d7de9d
WH
2568 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2569
55d7de9d
WH
2570 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2571 buf |= MAC_TX_TXEN_;
2572 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2573
2574 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2575 buf |= FCT_TX_CTL_EN_;
2576 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2577
2259b7a6
DS
2578 ret = lan78xx_set_rx_max_frame_length(dev,
2579 dev->net->mtu + VLAN_ETH_HLEN);
55d7de9d
WH
2580
2581 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2582 buf |= MAC_RX_RXEN_;
2583 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2584
2585 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2586 buf |= FCT_RX_CTL_EN_;
2587 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2588
55d7de9d
WH
2589 return 0;
2590}
2591
20ff5565
WH
2592static void lan78xx_init_stats(struct lan78xx_net *dev)
2593{
2594 u32 *p;
2595 int i;
2596
2597 /* initialize for stats update
2598 * some counters are 20bits and some are 32bits
2599 */
2600 p = (u32 *)&dev->stats.rollover_max;
2601 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2602 p[i] = 0xFFFFF;
2603
2604 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2605 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2606 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2607 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2608 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2609 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2610 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2611 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2612 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2613 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2614
fed56079 2615 set_bit(EVENT_STAT_UPDATE, &dev->flags);
20ff5565
WH
2616}
2617
55d7de9d
WH
2618static int lan78xx_open(struct net_device *net)
2619{
2620 struct lan78xx_net *dev = netdev_priv(net);
2621 int ret;
2622
2623 ret = usb_autopm_get_interface(dev->intf);
2624 if (ret < 0)
2625 goto out;
2626
92571a1a
AG
2627 phy_start(net->phydev);
2628
2629 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
ce85e13a 2630
55d7de9d
WH
2631 /* for Link Check */
2632 if (dev->urb_intr) {
2633 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2634 if (ret < 0) {
2635 netif_err(dev, ifup, dev->net,
2636 "intr submit %d\n", ret);
2637 goto done;
2638 }
2639 }
2640
20ff5565
WH
2641 lan78xx_init_stats(dev);
2642
55d7de9d
WH
2643 set_bit(EVENT_DEV_OPEN, &dev->flags);
2644
2645 netif_start_queue(net);
2646
2647 dev->link_on = false;
2648
2649 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2650done:
2651 usb_autopm_put_interface(dev->intf);
2652
2653out:
2654 return ret;
2655}
2656
2657static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2658{
2659 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2660 DECLARE_WAITQUEUE(wait, current);
2661 int temp;
2662
2663 /* ensure there are no more active urbs */
2664 add_wait_queue(&unlink_wakeup, &wait);
2665 set_current_state(TASK_UNINTERRUPTIBLE);
2666 dev->wait = &unlink_wakeup;
2667 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2668
2669 /* maybe wait for deletions to finish. */
2670 while (!skb_queue_empty(&dev->rxq) &&
2671 !skb_queue_empty(&dev->txq) &&
2672 !skb_queue_empty(&dev->done)) {
2673 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2674 set_current_state(TASK_UNINTERRUPTIBLE);
2675 netif_dbg(dev, ifdown, dev->net,
2676 "waited for %d urb completions\n", temp);
2677 }
2678 set_current_state(TASK_RUNNING);
2679 dev->wait = NULL;
2680 remove_wait_queue(&unlink_wakeup, &wait);
2681}
2682
e0c79ff6 2683static int lan78xx_stop(struct net_device *net)
55d7de9d 2684{
51ceac9f 2685 struct lan78xx_net *dev = netdev_priv(net);
55d7de9d 2686
20ff5565
WH
2687 if (timer_pending(&dev->stat_monitor))
2688 del_timer_sync(&dev->stat_monitor);
2689
92571a1a
AG
2690 if (net->phydev)
2691 phy_stop(net->phydev);
ce85e13a 2692
55d7de9d
WH
2693 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2694 netif_stop_queue(net);
2695
2696 netif_info(dev, ifdown, dev->net,
2697 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2698 net->stats.rx_packets, net->stats.tx_packets,
2699 net->stats.rx_errors, net->stats.tx_errors);
2700
2701 lan78xx_terminate_urbs(dev);
2702
2703 usb_kill_urb(dev->urb_intr);
2704
2705 skb_queue_purge(&dev->rxq_pause);
2706
2707 /* deferred work (task, timer, softirq) must also stop.
2708 * can't flush_scheduled_work() until we drop rtnl (later),
2709 * else workers could deadlock; so make workers a NOP.
2710 */
2711 dev->flags = 0;
2712 cancel_delayed_work_sync(&dev->wq);
2713 tasklet_kill(&dev->bh);
2714
2715 usb_autopm_put_interface(dev->intf);
2716
2717 return 0;
2718}
2719
55d7de9d
WH
2720static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2721 struct sk_buff *skb, gfp_t flags)
2722{
2723 u32 tx_cmd_a, tx_cmd_b;
7e24b4ed 2724 void *ptr;
55d7de9d 2725
d4ca7359 2726 if (skb_cow_head(skb, TX_OVERHEAD)) {
55d7de9d 2727 dev_kfree_skb_any(skb);
d4ca7359 2728 return NULL;
55d7de9d
WH
2729 }
2730
47240ba0
ED
2731 if (skb_linearize(skb)) {
2732 dev_kfree_skb_any(skb);
55d7de9d 2733 return NULL;
47240ba0 2734 }
55d7de9d
WH
2735
2736 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2737
2738 if (skb->ip_summed == CHECKSUM_PARTIAL)
2739 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2740
2741 tx_cmd_b = 0;
2742 if (skb_is_gso(skb)) {
2743 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2744
2745 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2746
2747 tx_cmd_a |= TX_CMD_A_LSO_;
2748 }
2749
2750 if (skb_vlan_tag_present(skb)) {
2751 tx_cmd_a |= TX_CMD_A_IVTG_;
2752 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2753 }
2754
7e24b4ed
CY
2755 ptr = skb_push(skb, 8);
2756 put_unaligned_le32(tx_cmd_a, ptr);
2757 put_unaligned_le32(tx_cmd_b, ptr + 4);
55d7de9d
WH
2758
2759 return skb;
2760}
2761
2762static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2763 struct sk_buff_head *list, enum skb_state state)
2764{
2765 unsigned long flags;
2766 enum skb_state old_state;
2767 struct skb_data *entry = (struct skb_data *)skb->cb;
2768
2769 spin_lock_irqsave(&list->lock, flags);
2770 old_state = entry->state;
2771 entry->state = state;
55d7de9d
WH
2772
2773 __skb_unlink(skb, list);
2774 spin_unlock(&list->lock);
2775 spin_lock(&dev->done.lock);
55d7de9d
WH
2776
2777 __skb_queue_tail(&dev->done, skb);
2778 if (skb_queue_len(&dev->done) == 1)
2779 tasklet_schedule(&dev->bh);
2780 spin_unlock_irqrestore(&dev->done.lock, flags);
2781
2782 return old_state;
2783}
2784
2785static void tx_complete(struct urb *urb)
2786{
2787 struct sk_buff *skb = (struct sk_buff *)urb->context;
2788 struct skb_data *entry = (struct skb_data *)skb->cb;
2789 struct lan78xx_net *dev = entry->dev;
2790
2791 if (urb->status == 0) {
74d79a2e 2792 dev->net->stats.tx_packets += entry->num_of_packet;
55d7de9d
WH
2793 dev->net->stats.tx_bytes += entry->length;
2794 } else {
2795 dev->net->stats.tx_errors++;
2796
2797 switch (urb->status) {
2798 case -EPIPE:
2799 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2800 break;
2801
2802 /* software-driven interface shutdown */
2803 case -ECONNRESET:
2804 case -ESHUTDOWN:
2805 break;
2806
2807 case -EPROTO:
2808 case -ETIME:
2809 case -EILSEQ:
2810 netif_stop_queue(dev->net);
2811 break;
2812 default:
2813 netif_dbg(dev, tx_err, dev->net,
2814 "tx err %d\n", entry->urb->status);
2815 break;
2816 }
2817 }
2818
2819 usb_autopm_put_interface_async(dev->intf);
2820
81c38e81 2821 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2822}
2823
2824static void lan78xx_queue_skb(struct sk_buff_head *list,
2825 struct sk_buff *newsk, enum skb_state state)
2826{
2827 struct skb_data *entry = (struct skb_data *)newsk->cb;
2828
2829 __skb_queue_tail(list, newsk);
2830 entry->state = state;
2831}
2832
e0c79ff6
BX
2833static netdev_tx_t
2834lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
55d7de9d
WH
2835{
2836 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2837 struct sk_buff *skb2 = NULL;
55d7de9d 2838
81c38e81 2839 if (skb) {
55d7de9d 2840 skb_tx_timestamp(skb);
81c38e81
WH
2841 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2842 }
55d7de9d 2843
81c38e81
WH
2844 if (skb2) {
2845 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2846
4b2a4a96
WH
2847 /* throttle TX patch at slower than SUPER SPEED USB */
2848 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2849 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2850 netif_stop_queue(net);
2851 } else {
2852 netif_dbg(dev, tx_err, dev->net,
2853 "lan78xx_tx_prep return NULL\n");
2854 dev->net->stats.tx_errors++;
2855 dev->net->stats.tx_dropped++;
2856 }
2857
2858 tasklet_schedule(&dev->bh);
2859
2860 return NETDEV_TX_OK;
2861}
2862
e0c79ff6
BX
2863static int
2864lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
55d7de9d
WH
2865{
2866 int tmp;
2867 struct usb_host_interface *alt = NULL;
2868 struct usb_host_endpoint *in = NULL, *out = NULL;
2869 struct usb_host_endpoint *status = NULL;
2870
2871 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2872 unsigned ep;
2873
2874 in = NULL;
2875 out = NULL;
2876 status = NULL;
2877 alt = intf->altsetting + tmp;
2878
2879 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2880 struct usb_host_endpoint *e;
2881 int intr = 0;
2882
2883 e = alt->endpoint + ep;
2884 switch (e->desc.bmAttributes) {
2885 case USB_ENDPOINT_XFER_INT:
2886 if (!usb_endpoint_dir_in(&e->desc))
2887 continue;
2888 intr = 1;
2889 /* FALLTHROUGH */
2890 case USB_ENDPOINT_XFER_BULK:
2891 break;
2892 default:
2893 continue;
2894 }
2895 if (usb_endpoint_dir_in(&e->desc)) {
2896 if (!intr && !in)
2897 in = e;
2898 else if (intr && !status)
2899 status = e;
2900 } else {
2901 if (!out)
2902 out = e;
2903 }
2904 }
2905 if (in && out)
2906 break;
2907 }
2908 if (!alt || !in || !out)
2909 return -EINVAL;
2910
2911 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2912 in->desc.bEndpointAddress &
2913 USB_ENDPOINT_NUMBER_MASK);
2914 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2915 out->desc.bEndpointAddress &
2916 USB_ENDPOINT_NUMBER_MASK);
2917 dev->ep_intr = status;
2918
2919 return 0;
2920}
2921
2922static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2923{
2924 struct lan78xx_priv *pdata = NULL;
2925 int ret;
2926 int i;
2927
2928 ret = lan78xx_get_endpoints(dev, intf);
fa8cd98c
SW
2929 if (ret) {
2930 netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2931 ret);
2932 return ret;
2933 }
55d7de9d
WH
2934
2935 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2936
2937 pdata = (struct lan78xx_priv *)(dev->data[0]);
2938 if (!pdata) {
2939 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2940 return -ENOMEM;
2941 }
2942
2943 pdata->dev = dev;
2944
2945 spin_lock_init(&pdata->rfe_ctl_lock);
2946 mutex_init(&pdata->dataport_mutex);
2947
2948 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2949
2950 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2951 pdata->vlan_table[i] = 0;
2952
2953 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2954
2955 dev->net->features = 0;
2956
2957 if (DEFAULT_TX_CSUM_ENABLE)
2958 dev->net->features |= NETIF_F_HW_CSUM;
2959
2960 if (DEFAULT_RX_CSUM_ENABLE)
2961 dev->net->features |= NETIF_F_RXCSUM;
2962
2963 if (DEFAULT_TSO_CSUM_ENABLE)
2964 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2965
ec21ecf0
DS
2966 if (DEFAULT_VLAN_RX_OFFLOAD)
2967 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2968
4a27327b
DS
2969 if (DEFAULT_VLAN_FILTER_ENABLE)
2970 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2971
55d7de9d
WH
2972 dev->net->hw_features = dev->net->features;
2973
cc89c323
WH
2974 ret = lan78xx_setup_irq_domain(dev);
2975 if (ret < 0) {
2976 netdev_warn(dev->net,
2977 "lan78xx_setup_irq_domain() failed : %d", ret);
2d2d99ec 2978 goto out1;
cc89c323
WH
2979 }
2980
0573f94b
NS
2981 dev->net->hard_header_len += TX_OVERHEAD;
2982 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2983
55d7de9d
WH
2984 /* Init all registers */
2985 ret = lan78xx_reset(dev);
2d2d99ec
RC
2986 if (ret) {
2987 netdev_warn(dev->net, "Registers INIT FAILED....");
2988 goto out2;
2989 }
55d7de9d 2990
fb52c3b5 2991 ret = lan78xx_mdio_init(dev);
2d2d99ec
RC
2992 if (ret) {
2993 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2994 goto out2;
2995 }
ce85e13a 2996
55d7de9d
WH
2997 dev->net->flags |= IFF_MULTICAST;
2998
2999 pdata->wol = WAKE_MAGIC;
3000
fb52c3b5 3001 return ret;
2d2d99ec
RC
3002
3003out2:
3004 lan78xx_remove_irq_domain(dev);
3005
3006out1:
3007 netdev_warn(dev->net, "Bind routine FAILED");
3008 cancel_work_sync(&pdata->set_multicast);
3009 cancel_work_sync(&pdata->set_vlan);
3010 kfree(pdata);
3011 return ret;
55d7de9d
WH
3012}
3013
3014static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3015{
3016 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3017
cc89c323
WH
3018 lan78xx_remove_irq_domain(dev);
3019
ce85e13a
WH
3020 lan78xx_remove_mdio(dev);
3021
55d7de9d 3022 if (pdata) {
2d2d99ec
RC
3023 cancel_work_sync(&pdata->set_multicast);
3024 cancel_work_sync(&pdata->set_vlan);
55d7de9d
WH
3025 netif_dbg(dev, ifdown, dev->net, "free pdata");
3026 kfree(pdata);
3027 pdata = NULL;
3028 dev->data[0] = 0;
3029 }
3030}
3031
3032static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3033 struct sk_buff *skb,
3034 u32 rx_cmd_a, u32 rx_cmd_b)
3035{
9343ac87
DS
3036 /* HW Checksum offload appears to be flawed if used when not stripping
3037 * VLAN headers. Drop back to S/W checksums under these conditions.
3038 */
55d7de9d 3039 if (!(dev->net->features & NETIF_F_RXCSUM) ||
9343ac87
DS
3040 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3041 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3042 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
55d7de9d
WH
3043 skb->ip_summed = CHECKSUM_NONE;
3044 } else {
3045 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3046 skb->ip_summed = CHECKSUM_COMPLETE;
3047 }
3048}
3049
ec21ecf0
DS
3050static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3051 struct sk_buff *skb,
3052 u32 rx_cmd_a, u32 rx_cmd_b)
3053{
3054 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3055 (rx_cmd_a & RX_CMD_A_FVTG_))
3056 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3057 (rx_cmd_b & 0xffff));
3058}
3059
e0c79ff6 3060static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
55d7de9d 3061{
51ceac9f 3062 int status;
55d7de9d
WH
3063
3064 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3065 skb_queue_tail(&dev->rxq_pause, skb);
3066 return;
3067 }
3068
55d7de9d
WH
3069 dev->net->stats.rx_packets++;
3070 dev->net->stats.rx_bytes += skb->len;
3071
74d79a2e
WH
3072 skb->protocol = eth_type_trans(skb, dev->net);
3073
55d7de9d
WH
3074 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3075 skb->len + sizeof(struct ethhdr), skb->protocol);
3076 memset(skb->cb, 0, sizeof(struct skb_data));
3077
3078 if (skb_defer_rx_timestamp(skb))
3079 return;
3080
3081 status = netif_rx(skb);
3082 if (status != NET_RX_SUCCESS)
3083 netif_dbg(dev, rx_err, dev->net,
3084 "netif_rx status %d\n", status);
3085}
3086
3087static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3088{
3089 if (skb->len < dev->net->hard_header_len)
3090 return 0;
3091
3092 while (skb->len > 0) {
3093 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3094 u16 rx_cmd_c;
3095 struct sk_buff *skb2;
3096 unsigned char *packet;
3097
bb448f8a 3098 rx_cmd_a = get_unaligned_le32(skb->data);
55d7de9d
WH
3099 skb_pull(skb, sizeof(rx_cmd_a));
3100
bb448f8a 3101 rx_cmd_b = get_unaligned_le32(skb->data);
55d7de9d
WH
3102 skb_pull(skb, sizeof(rx_cmd_b));
3103
bb448f8a 3104 rx_cmd_c = get_unaligned_le16(skb->data);
55d7de9d
WH
3105 skb_pull(skb, sizeof(rx_cmd_c));
3106
3107 packet = skb->data;
3108
3109 /* get the packet length */
3110 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3111 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3112
3113 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3114 netif_dbg(dev, rx_err, dev->net,
3115 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3116 } else {
3117 /* last frame in this batch */
3118 if (skb->len == size) {
3119 lan78xx_rx_csum_offload(dev, skb,
3120 rx_cmd_a, rx_cmd_b);
ec21ecf0
DS
3121 lan78xx_rx_vlan_offload(dev, skb,
3122 rx_cmd_a, rx_cmd_b);
55d7de9d
WH
3123
3124 skb_trim(skb, skb->len - 4); /* remove fcs */
3125 skb->truesize = size + sizeof(struct sk_buff);
3126
3127 return 1;
3128 }
3129
3130 skb2 = skb_clone(skb, GFP_ATOMIC);
3131 if (unlikely(!skb2)) {
3132 netdev_warn(dev->net, "Error allocating skb");
3133 return 0;
3134 }
3135
3136 skb2->len = size;
3137 skb2->data = packet;
3138 skb_set_tail_pointer(skb2, size);
3139
3140 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
ec21ecf0 3141 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
55d7de9d
WH
3142
3143 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3144 skb2->truesize = size + sizeof(struct sk_buff);
3145
3146 lan78xx_skb_return(dev, skb2);
3147 }
3148
3149 skb_pull(skb, size);
3150
3151 /* padding bytes before the next frame starts */
3152 if (skb->len)
3153 skb_pull(skb, align_count);
3154 }
3155
55d7de9d
WH
3156 return 1;
3157}
3158
3159static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3160{
3161 if (!lan78xx_rx(dev, skb)) {
3162 dev->net->stats.rx_errors++;
3163 goto done;
3164 }
3165
3166 if (skb->len) {
3167 lan78xx_skb_return(dev, skb);
3168 return;
3169 }
3170
3171 netif_dbg(dev, rx_err, dev->net, "drop\n");
3172 dev->net->stats.rx_errors++;
3173done:
3174 skb_queue_tail(&dev->done, skb);
3175}
3176
3177static void rx_complete(struct urb *urb);
3178
3179static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3180{
3181 struct sk_buff *skb;
3182 struct skb_data *entry;
3183 unsigned long lockflags;
3184 size_t size = dev->rx_urb_size;
3185 int ret = 0;
3186
3187 skb = netdev_alloc_skb_ip_align(dev->net, size);
3188 if (!skb) {
3189 usb_free_urb(urb);
3190 return -ENOMEM;
3191 }
3192
3193 entry = (struct skb_data *)skb->cb;
3194 entry->urb = urb;
3195 entry->dev = dev;
3196 entry->length = 0;
3197
3198 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3199 skb->data, size, rx_complete, skb);
3200
3201 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3202
3203 if (netif_device_present(dev->net) &&
3204 netif_running(dev->net) &&
3205 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3206 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3207 ret = usb_submit_urb(urb, GFP_ATOMIC);
3208 switch (ret) {
3209 case 0:
3210 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3211 break;
3212 case -EPIPE:
3213 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3214 break;
3215 case -ENODEV:
3216 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3217 netif_device_detach(dev->net);
3218 break;
3219 case -EHOSTUNREACH:
3220 ret = -ENOLINK;
3221 break;
3222 default:
3223 netif_dbg(dev, rx_err, dev->net,
3224 "rx submit, %d\n", ret);
3225 tasklet_schedule(&dev->bh);
3226 }
3227 } else {
3228 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3229 ret = -ENOLINK;
3230 }
3231 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3232 if (ret) {
3233 dev_kfree_skb_any(skb);
3234 usb_free_urb(urb);
3235 }
3236 return ret;
3237}
3238
3239static void rx_complete(struct urb *urb)
3240{
3241 struct sk_buff *skb = (struct sk_buff *)urb->context;
3242 struct skb_data *entry = (struct skb_data *)skb->cb;
3243 struct lan78xx_net *dev = entry->dev;
3244 int urb_status = urb->status;
3245 enum skb_state state;
3246
3247 skb_put(skb, urb->actual_length);
3248 state = rx_done;
3249 entry->urb = NULL;
3250
3251 switch (urb_status) {
3252 case 0:
3253 if (skb->len < dev->net->hard_header_len) {
3254 state = rx_cleanup;
3255 dev->net->stats.rx_errors++;
3256 dev->net->stats.rx_length_errors++;
3257 netif_dbg(dev, rx_err, dev->net,
3258 "rx length %d\n", skb->len);
3259 }
3260 usb_mark_last_busy(dev->udev);
3261 break;
3262 case -EPIPE:
3263 dev->net->stats.rx_errors++;
3264 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3265 /* FALLTHROUGH */
3266 case -ECONNRESET: /* async unlink */
3267 case -ESHUTDOWN: /* hardware gone */
3268 netif_dbg(dev, ifdown, dev->net,
3269 "rx shutdown, code %d\n", urb_status);
3270 state = rx_cleanup;
3271 entry->urb = urb;
3272 urb = NULL;
3273 break;
3274 case -EPROTO:
3275 case -ETIME:
3276 case -EILSEQ:
3277 dev->net->stats.rx_errors++;
3278 state = rx_cleanup;
3279 entry->urb = urb;
3280 urb = NULL;
3281 break;
3282
3283 /* data overrun ... flush fifo? */
3284 case -EOVERFLOW:
3285 dev->net->stats.rx_over_errors++;
3286 /* FALLTHROUGH */
3287
3288 default:
3289 state = rx_cleanup;
3290 dev->net->stats.rx_errors++;
3291 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3292 break;
3293 }
3294
3295 state = defer_bh(dev, skb, &dev->rxq, state);
3296
3297 if (urb) {
3298 if (netif_running(dev->net) &&
3299 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3300 state != unlink_start) {
3301 rx_submit(dev, urb, GFP_ATOMIC);
3302 return;
3303 }
3304 usb_free_urb(urb);
3305 }
3306 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3307}
3308
3309static void lan78xx_tx_bh(struct lan78xx_net *dev)
3310{
3311 int length;
3312 struct urb *urb = NULL;
3313 struct skb_data *entry;
3314 unsigned long flags;
3315 struct sk_buff_head *tqp = &dev->txq_pend;
3316 struct sk_buff *skb, *skb2;
3317 int ret;
3318 int count, pos;
3319 int skb_totallen, pkt_cnt;
3320
3321 skb_totallen = 0;
3322 pkt_cnt = 0;
74d79a2e
WH
3323 count = 0;
3324 length = 0;
dea39aca 3325 spin_lock_irqsave(&tqp->lock, flags);
e42a43a5 3326 skb_queue_walk(tqp, skb) {
55d7de9d 3327 if (skb_is_gso(skb)) {
e42a43a5 3328 if (!skb_queue_is_first(tqp, skb)) {
55d7de9d
WH
3329 /* handle previous packets first */
3330 break;
3331 }
74d79a2e
WH
3332 count = 1;
3333 length = skb->len - TX_OVERHEAD;
dea39aca
SW
3334 __skb_unlink(skb, tqp);
3335 spin_unlock_irqrestore(&tqp->lock, flags);
55d7de9d
WH
3336 goto gso_skb;
3337 }
3338
3339 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3340 break;
3341 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3342 pkt_cnt++;
3343 }
dea39aca 3344 spin_unlock_irqrestore(&tqp->lock, flags);
55d7de9d
WH
3345
3346 /* copy to a single skb */
3347 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3348 if (!skb)
3349 goto drop;
3350
3351 skb_put(skb, skb_totallen);
3352
3353 for (count = pos = 0; count < pkt_cnt; count++) {
3354 skb2 = skb_dequeue(tqp);
3355 if (skb2) {
74d79a2e 3356 length += (skb2->len - TX_OVERHEAD);
55d7de9d
WH
3357 memcpy(skb->data + pos, skb2->data, skb2->len);
3358 pos += roundup(skb2->len, sizeof(u32));
3359 dev_kfree_skb(skb2);
55d7de9d
WH
3360 }
3361 }
3362
55d7de9d
WH
3363gso_skb:
3364 urb = usb_alloc_urb(0, GFP_ATOMIC);
d7c4e84e 3365 if (!urb)
55d7de9d 3366 goto drop;
55d7de9d
WH
3367
3368 entry = (struct skb_data *)skb->cb;
3369 entry->urb = urb;
3370 entry->dev = dev;
3371 entry->length = length;
74d79a2e 3372 entry->num_of_packet = count;
55d7de9d
WH
3373
3374 spin_lock_irqsave(&dev->txq.lock, flags);
3375 ret = usb_autopm_get_interface_async(dev->intf);
3376 if (ret < 0) {
3377 spin_unlock_irqrestore(&dev->txq.lock, flags);
3378 goto drop;
3379 }
3380
3381 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3382 skb->data, skb->len, tx_complete, skb);
3383
3384 if (length % dev->maxpacket == 0) {
3385 /* send USB_ZERO_PACKET */
3386 urb->transfer_flags |= URB_ZERO_PACKET;
3387 }
3388
3389#ifdef CONFIG_PM
3390 /* if this triggers the device is still a sleep */
3391 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3392 /* transmission will be done in resume */
3393 usb_anchor_urb(urb, &dev->deferred);
3394 /* no use to process more packets */
3395 netif_stop_queue(dev->net);
3396 usb_put_urb(urb);
3397 spin_unlock_irqrestore(&dev->txq.lock, flags);
3398 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3399 return;
3400 }
3401#endif
3402
3403 ret = usb_submit_urb(urb, GFP_ATOMIC);
3404 switch (ret) {
3405 case 0:
860e9538 3406 netif_trans_update(dev->net);
55d7de9d
WH
3407 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3408 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3409 netif_stop_queue(dev->net);
3410 break;
3411 case -EPIPE:
3412 netif_stop_queue(dev->net);
3413 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3414 usb_autopm_put_interface_async(dev->intf);
3415 break;
3416 default:
3417 usb_autopm_put_interface_async(dev->intf);
3418 netif_dbg(dev, tx_err, dev->net,
3419 "tx: submit urb err %d\n", ret);
3420 break;
3421 }
3422
3423 spin_unlock_irqrestore(&dev->txq.lock, flags);
3424
3425 if (ret) {
3426 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3427drop:
3428 dev->net->stats.tx_dropped++;
3429 if (skb)
3430 dev_kfree_skb_any(skb);
3431 usb_free_urb(urb);
3432 } else
3433 netif_dbg(dev, tx_queued, dev->net,
3434 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3435}
3436
3437static void lan78xx_rx_bh(struct lan78xx_net *dev)
3438{
3439 struct urb *urb;
3440 int i;
3441
3442 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3443 for (i = 0; i < 10; i++) {
3444 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3445 break;
3446 urb = usb_alloc_urb(0, GFP_ATOMIC);
3447 if (urb)
3448 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3449 return;
3450 }
3451
3452 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3453 tasklet_schedule(&dev->bh);
3454 }
3455 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3456 netif_wake_queue(dev->net);
3457}
3458
3459static void lan78xx_bh(unsigned long param)
3460{
3461 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3462 struct sk_buff *skb;
3463 struct skb_data *entry;
3464
55d7de9d
WH
3465 while ((skb = skb_dequeue(&dev->done))) {
3466 entry = (struct skb_data *)(skb->cb);
3467 switch (entry->state) {
3468 case rx_done:
3469 entry->state = rx_cleanup;
3470 rx_process(dev, skb);
3471 continue;
3472 case tx_done:
3473 usb_free_urb(entry->urb);
3474 dev_kfree_skb(skb);
3475 continue;
3476 case rx_cleanup:
3477 usb_free_urb(entry->urb);
3478 dev_kfree_skb(skb);
3479 continue;
3480 default:
3481 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3482 return;
3483 }
55d7de9d
WH
3484 }
3485
3486 if (netif_device_present(dev->net) && netif_running(dev->net)) {
20ff5565
WH
3487 /* reset update timer delta */
3488 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3489 dev->delta = 1;
3490 mod_timer(&dev->stat_monitor,
3491 jiffies + STAT_UPDATE_TIMER);
3492 }
3493
55d7de9d
WH
3494 if (!skb_queue_empty(&dev->txq_pend))
3495 lan78xx_tx_bh(dev);
3496
3497 if (!timer_pending(&dev->delay) &&
3498 !test_bit(EVENT_RX_HALT, &dev->flags))
3499 lan78xx_rx_bh(dev);
3500 }
3501}
3502
3503static void lan78xx_delayedwork(struct work_struct *work)
3504{
3505 int status;
3506 struct lan78xx_net *dev;
3507
3508 dev = container_of(work, struct lan78xx_net, wq.work);
3509
3510 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3511 unlink_urbs(dev, &dev->txq);
3512 status = usb_autopm_get_interface(dev->intf);
3513 if (status < 0)
3514 goto fail_pipe;
3515 status = usb_clear_halt(dev->udev, dev->pipe_out);
3516 usb_autopm_put_interface(dev->intf);
3517 if (status < 0 &&
3518 status != -EPIPE &&
3519 status != -ESHUTDOWN) {
3520 if (netif_msg_tx_err(dev))
3521fail_pipe:
3522 netdev_err(dev->net,
3523 "can't clear tx halt, status %d\n",
3524 status);
3525 } else {
3526 clear_bit(EVENT_TX_HALT, &dev->flags);
3527 if (status != -ESHUTDOWN)
3528 netif_wake_queue(dev->net);
3529 }
3530 }
3531 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3532 unlink_urbs(dev, &dev->rxq);
3533 status = usb_autopm_get_interface(dev->intf);
3534 if (status < 0)
3535 goto fail_halt;
3536 status = usb_clear_halt(dev->udev, dev->pipe_in);
3537 usb_autopm_put_interface(dev->intf);
3538 if (status < 0 &&
3539 status != -EPIPE &&
3540 status != -ESHUTDOWN) {
3541 if (netif_msg_rx_err(dev))
3542fail_halt:
3543 netdev_err(dev->net,
3544 "can't clear rx halt, status %d\n",
3545 status);
3546 } else {
3547 clear_bit(EVENT_RX_HALT, &dev->flags);
3548 tasklet_schedule(&dev->bh);
3549 }
3550 }
3551
3552 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3553 int ret = 0;
3554
3555 clear_bit(EVENT_LINK_RESET, &dev->flags);
3556 status = usb_autopm_get_interface(dev->intf);
3557 if (status < 0)
3558 goto skip_reset;
3559 if (lan78xx_link_reset(dev) < 0) {
3560 usb_autopm_put_interface(dev->intf);
3561skip_reset:
3562 netdev_info(dev->net, "link reset failed (%d)\n",
3563 ret);
3564 } else {
3565 usb_autopm_put_interface(dev->intf);
3566 }
3567 }
20ff5565
WH
3568
3569 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3570 lan78xx_update_stats(dev);
3571
3572 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3573
3574 mod_timer(&dev->stat_monitor,
3575 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3576
3577 dev->delta = min((dev->delta * 2), 50);
3578 }
55d7de9d
WH
3579}
3580
3581static void intr_complete(struct urb *urb)
3582{
3583 struct lan78xx_net *dev = urb->context;
3584 int status = urb->status;
3585
3586 switch (status) {
3587 /* success */
3588 case 0:
3589 lan78xx_status(dev, urb);
3590 break;
3591
3592 /* software-driven interface shutdown */
3593 case -ENOENT: /* urb killed */
3594 case -ESHUTDOWN: /* hardware gone */
3595 netif_dbg(dev, ifdown, dev->net,
3596 "intr shutdown, code %d\n", status);
3597 return;
3598
3599 /* NOTE: not throttling like RX/TX, since this endpoint
3600 * already polls infrequently
3601 */
3602 default:
3603 netdev_dbg(dev->net, "intr status %d\n", status);
3604 break;
3605 }
3606
3607 if (!netif_running(dev->net))
3608 return;
3609
3610 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3611 status = usb_submit_urb(urb, GFP_ATOMIC);
3612 if (status != 0)
3613 netif_err(dev, timer, dev->net,
3614 "intr resubmit --> %d\n", status);
3615}
3616
3617static void lan78xx_disconnect(struct usb_interface *intf)
3618{
51ceac9f
SW
3619 struct lan78xx_net *dev;
3620 struct usb_device *udev;
3621 struct net_device *net;
3622 struct phy_device *phydev;
55d7de9d
WH
3623
3624 dev = usb_get_intfdata(intf);
3625 usb_set_intfdata(intf, NULL);
3626 if (!dev)
3627 return;
3628
3629 udev = interface_to_usbdev(intf);
55d7de9d 3630 net = dev->net;
89b36fb5 3631 phydev = net->phydev;
92571a1a
AG
3632
3633 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3634 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3635
3636 phy_disconnect(net->phydev);
3637
89b36fb5
RC
3638 if (phy_is_pseudo_fixed_link(phydev))
3639 fixed_phy_unregister(phydev);
3640
55d7de9d
WH
3641 unregister_netdev(net);
3642
3643 cancel_delayed_work_sync(&dev->wq);
3644
3645 usb_scuttle_anchored_urbs(&dev->deferred);
3646
3647 lan78xx_unbind(dev, intf);
3648
3649 usb_kill_urb(dev->urb_intr);
3650 usb_free_urb(dev->urb_intr);
3651
3652 free_netdev(net);
3653 usb_put_dev(udev);
3654}
3655
0290bd29 3656static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
55d7de9d
WH
3657{
3658 struct lan78xx_net *dev = netdev_priv(net);
3659
3660 unlink_urbs(dev, &dev->txq);
3661 tasklet_schedule(&dev->bh);
3662}
3663
ce896476
JH
3664static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3665 struct net_device *netdev,
3666 netdev_features_t features)
3667{
3668 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3669 features &= ~NETIF_F_GSO_MASK;
3670
3671 features = vlan_features_check(skb, features);
3672 features = vxlan_features_check(skb, features);
3673
3674 return features;
3675}
3676
55d7de9d
WH
3677static const struct net_device_ops lan78xx_netdev_ops = {
3678 .ndo_open = lan78xx_open,
3679 .ndo_stop = lan78xx_stop,
3680 .ndo_start_xmit = lan78xx_start_xmit,
3681 .ndo_tx_timeout = lan78xx_tx_timeout,
3682 .ndo_change_mtu = lan78xx_change_mtu,
3683 .ndo_set_mac_address = lan78xx_set_mac_addr,
3684 .ndo_validate_addr = eth_validate_addr,
fd786fb1 3685 .ndo_do_ioctl = phy_do_ioctl_running,
55d7de9d
WH
3686 .ndo_set_rx_mode = lan78xx_set_multicast,
3687 .ndo_set_features = lan78xx_set_features,
3688 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3689 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
ce896476 3690 .ndo_features_check = lan78xx_features_check,
55d7de9d
WH
3691};
3692
d28bb967 3693static void lan78xx_stat_monitor(struct timer_list *t)
20ff5565 3694{
d28bb967 3695 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
20ff5565
WH
3696
3697 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3698}
3699
55d7de9d
WH
3700static int lan78xx_probe(struct usb_interface *intf,
3701 const struct usb_device_id *id)
3702{
3703 struct lan78xx_net *dev;
3704 struct net_device *netdev;
3705 struct usb_device *udev;
3706 int ret;
3707 unsigned maxp;
3708 unsigned period;
3709 u8 *buf = NULL;
3710
3711 udev = interface_to_usbdev(intf);
3712 udev = usb_get_dev(udev);
3713
55d7de9d
WH
3714 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3715 if (!netdev) {
fb52c3b5
NS
3716 dev_err(&intf->dev, "Error: OOM\n");
3717 ret = -ENOMEM;
3718 goto out1;
55d7de9d
WH
3719 }
3720
3721 /* netdev_printk() needs this */
3722 SET_NETDEV_DEV(netdev, &intf->dev);
3723
3724 dev = netdev_priv(netdev);
3725 dev->udev = udev;
3726 dev->intf = intf;
3727 dev->net = netdev;
3728 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3729 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3730
3731 skb_queue_head_init(&dev->rxq);
3732 skb_queue_head_init(&dev->txq);
3733 skb_queue_head_init(&dev->done);
3734 skb_queue_head_init(&dev->rxq_pause);
3735 skb_queue_head_init(&dev->txq_pend);
3736 mutex_init(&dev->phy_mutex);
3737
3738 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3739 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3740 init_usb_anchor(&dev->deferred);
3741
3742 netdev->netdev_ops = &lan78xx_netdev_ops;
3743 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3744 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3745
20ff5565 3746 dev->delta = 1;
d28bb967 3747 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
20ff5565
WH
3748
3749 mutex_init(&dev->stats.access_lock);
3750
55d7de9d
WH
3751 ret = lan78xx_bind(dev, intf);
3752 if (ret < 0)
3753 goto out2;
55d7de9d
WH
3754
3755 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3756 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3757
f77f0aee
JW
3758 /* MTU range: 68 - 9000 */
3759 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
f8d7408a 3760 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
f77f0aee 3761
8d8e95fd
JH
3762 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3763 ret = -ENODEV;
3764 goto out3;
3765 }
3766
55d7de9d
WH
3767 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3768 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3769 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3770
3771 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3772 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3773
3774 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3775 dev->ep_intr->desc.bEndpointAddress &
3776 USB_ENDPOINT_NUMBER_MASK);
3777 period = dev->ep_intr->desc.bInterval;
3778
3779 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3780 buf = kmalloc(maxp, GFP_KERNEL);
3781 if (buf) {
3782 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3783 if (!dev->urb_intr) {
51920830 3784 ret = -ENOMEM;
55d7de9d
WH
3785 kfree(buf);
3786 goto out3;
3787 } else {
3788 usb_fill_int_urb(dev->urb_intr, dev->udev,
3789 dev->pipe_intr, buf, maxp,
3790 intr_complete, dev, period);
63634aa6 3791 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
55d7de9d
WH
3792 }
3793 }
3794
3795 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3796
3797 /* driver requires remote-wakeup capability during autosuspend. */
3798 intf->needs_remote_wakeup = 1;
3799
38b4fe32
AL
3800 ret = lan78xx_phy_init(dev);
3801 if (ret < 0)
3802 goto out4;
3803
55d7de9d
WH
3804 ret = register_netdev(netdev);
3805 if (ret != 0) {
3806 netif_err(dev, probe, netdev, "couldn't register the device\n");
38b4fe32 3807 goto out5;
55d7de9d
WH
3808 }
3809
3810 usb_set_intfdata(intf, dev);
3811
3812 ret = device_set_wakeup_enable(&udev->dev, true);
3813
3814 /* Default delay of 2sec has more overhead than advantage.
3815 * Set to 10sec as default.
3816 */
3817 pm_runtime_set_autosuspend_delay(&udev->dev,
3818 DEFAULT_AUTOSUSPEND_DELAY);
3819
3820 return 0;
3821
b9cbf8a6 3822out5:
38b4fe32 3823 phy_disconnect(netdev->phydev);
b9cbf8a6
WW
3824out4:
3825 usb_free_urb(dev->urb_intr);
55d7de9d
WH
3826out3:
3827 lan78xx_unbind(dev, intf);
3828out2:
3829 free_netdev(netdev);
3830out1:
3831 usb_put_dev(udev);
3832
3833 return ret;
3834}
3835
3836static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3837{
3838 const u16 crc16poly = 0x8005;
3839 int i;
3840 u16 bit, crc, msb;
3841 u8 data;
3842
3843 crc = 0xFFFF;
3844 for (i = 0; i < len; i++) {
3845 data = *buf++;
3846 for (bit = 0; bit < 8; bit++) {
3847 msb = crc >> 15;
3848 crc <<= 1;
3849
3850 if (msb ^ (u16)(data & 1)) {
3851 crc ^= crc16poly;
3852 crc |= (u16)0x0001U;
3853 }
3854 data >>= 1;
3855 }
3856 }
3857
3858 return crc;
3859}
3860
3861static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3862{
3863 u32 buf;
3864 int ret;
3865 int mask_index;
3866 u16 crc;
3867 u32 temp_wucsr;
3868 u32 temp_pmt_ctl;
3869 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3870 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3871 const u8 arp_type[2] = { 0x08, 0x06 };
3872
3873 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3874 buf &= ~MAC_TX_TXEN_;
3875 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3876 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3877 buf &= ~MAC_RX_RXEN_;
3878 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3879
3880 ret = lan78xx_write_reg(dev, WUCSR, 0);
3881 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3882 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3883
3884 temp_wucsr = 0;
3885
3886 temp_pmt_ctl = 0;
3887 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3888 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3889 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3890
3891 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3892 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3893
3894 mask_index = 0;
3895 if (wol & WAKE_PHY) {
3896 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3897
3898 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3899 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3900 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3901 }
3902 if (wol & WAKE_MAGIC) {
3903 temp_wucsr |= WUCSR_MPEN_;
3904
3905 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3906 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3907 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3908 }
3909 if (wol & WAKE_BCAST) {
3910 temp_wucsr |= WUCSR_BCST_EN_;
3911
3912 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3913 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3914 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3915 }
3916 if (wol & WAKE_MCAST) {
3917 temp_wucsr |= WUCSR_WAKE_EN_;
3918
3919 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3920 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3921 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3922 WUF_CFGX_EN_ |
3923 WUF_CFGX_TYPE_MCAST_ |
3924 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3925 (crc & WUF_CFGX_CRC16_MASK_));
3926
3927 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3928 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3929 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3930 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3931 mask_index++;
3932
3933 /* for IPv6 Multicast */
3934 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3935 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3936 WUF_CFGX_EN_ |
3937 WUF_CFGX_TYPE_MCAST_ |
3938 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3939 (crc & WUF_CFGX_CRC16_MASK_));
3940
3941 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3942 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3943 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3944 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3945 mask_index++;
3946
3947 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3948 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3949 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3950 }
3951 if (wol & WAKE_UCAST) {
3952 temp_wucsr |= WUCSR_PFDA_EN_;
3953
3954 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3955 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3956 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3957 }
3958 if (wol & WAKE_ARP) {
3959 temp_wucsr |= WUCSR_WAKE_EN_;
3960
3961 /* set WUF_CFG & WUF_MASK
3962 * for packettype (offset 12,13) = ARP (0x0806)
3963 */
3964 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3965 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3966 WUF_CFGX_EN_ |
3967 WUF_CFGX_TYPE_ALL_ |
3968 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3969 (crc & WUF_CFGX_CRC16_MASK_));
3970
3971 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3972 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3973 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3974 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3975 mask_index++;
3976
3977 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3978 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3979 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3980 }
3981
3982 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3983
3984 /* when multiple WOL bits are set */
3985 if (hweight_long((unsigned long)wol) > 1) {
3986 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3987 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3988 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3989 }
3990 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3991
3992 /* clear WUPS */
3993 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3994 buf |= PMT_CTL_WUPS_MASK_;
3995 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3996
3997 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3998 buf |= MAC_RX_RXEN_;
3999 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4000
4001 return 0;
4002}
4003
e0c79ff6 4004static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
55d7de9d
WH
4005{
4006 struct lan78xx_net *dev = usb_get_intfdata(intf);
4007 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4008 u32 buf;
4009 int ret;
55d7de9d
WH
4010
4011 if (!dev->suspend_count++) {
4012 spin_lock_irq(&dev->txq.lock);
4013 /* don't autosuspend while transmitting */
4014 if ((skb_queue_len(&dev->txq) ||
4015 skb_queue_len(&dev->txq_pend)) &&
4016 PMSG_IS_AUTO(message)) {
4017 spin_unlock_irq(&dev->txq.lock);
4018 ret = -EBUSY;
4019 goto out;
4020 } else {
4021 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4022 spin_unlock_irq(&dev->txq.lock);
4023 }
4024
4025 /* stop TX & RX */
4026 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4027 buf &= ~MAC_TX_TXEN_;
4028 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4029 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4030 buf &= ~MAC_RX_RXEN_;
4031 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4032
4033 /* empty out the rx and queues */
4034 netif_device_detach(dev->net);
4035 lan78xx_terminate_urbs(dev);
4036 usb_kill_urb(dev->urb_intr);
4037
4038 /* reattach */
4039 netif_device_attach(dev->net);
4040 }
4041
4042 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
20ff5565
WH
4043 del_timer(&dev->stat_monitor);
4044
55d7de9d
WH
4045 if (PMSG_IS_AUTO(message)) {
4046 /* auto suspend (selective suspend) */
4047 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4048 buf &= ~MAC_TX_TXEN_;
4049 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4050 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4051 buf &= ~MAC_RX_RXEN_;
4052 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4053
4054 ret = lan78xx_write_reg(dev, WUCSR, 0);
4055 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4056 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4057
4058 /* set goodframe wakeup */
4059 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4060
4061 buf |= WUCSR_RFE_WAKE_EN_;
4062 buf |= WUCSR_STORE_WAKE_;
4063
4064 ret = lan78xx_write_reg(dev, WUCSR, buf);
4065
4066 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4067
4068 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4069 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4070
4071 buf |= PMT_CTL_PHY_WAKE_EN_;
4072 buf |= PMT_CTL_WOL_EN_;
4073 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4074 buf |= PMT_CTL_SUS_MODE_3_;
4075
4076 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4077
4078 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4079
4080 buf |= PMT_CTL_WUPS_MASK_;
4081
4082 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4083
4084 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4085 buf |= MAC_RX_RXEN_;
4086 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4087 } else {
4088 lan78xx_set_suspend(dev, pdata->wol);
4089 }
4090 }
4091
49d28b56 4092 ret = 0;
55d7de9d
WH
4093out:
4094 return ret;
4095}
4096
e0c79ff6 4097static int lan78xx_resume(struct usb_interface *intf)
55d7de9d
WH
4098{
4099 struct lan78xx_net *dev = usb_get_intfdata(intf);
4100 struct sk_buff *skb;
4101 struct urb *res;
4102 int ret;
4103 u32 buf;
4104
20ff5565
WH
4105 if (!timer_pending(&dev->stat_monitor)) {
4106 dev->delta = 1;
4107 mod_timer(&dev->stat_monitor,
4108 jiffies + STAT_UPDATE_TIMER);
4109 }
4110
55d7de9d
WH
4111 if (!--dev->suspend_count) {
4112 /* resume interrupt URBs */
4113 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4114 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4115
4116 spin_lock_irq(&dev->txq.lock);
4117 while ((res = usb_get_from_anchor(&dev->deferred))) {
4118 skb = (struct sk_buff *)res->context;
4119 ret = usb_submit_urb(res, GFP_ATOMIC);
4120 if (ret < 0) {
4121 dev_kfree_skb_any(skb);
4122 usb_free_urb(res);
4123 usb_autopm_put_interface_async(dev->intf);
4124 } else {
860e9538 4125 netif_trans_update(dev->net);
55d7de9d
WH
4126 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4127 }
4128 }
4129
4130 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4131 spin_unlock_irq(&dev->txq.lock);
4132
4133 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4134 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4135 netif_start_queue(dev->net);
4136 tasklet_schedule(&dev->bh);
4137 }
4138 }
4139
4140 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4141 ret = lan78xx_write_reg(dev, WUCSR, 0);
4142 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4143
4144 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4145 WUCSR2_ARP_RCD_ |
4146 WUCSR2_IPV6_TCPSYN_RCD_ |
4147 WUCSR2_IPV4_TCPSYN_RCD_);
4148
4149 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4150 WUCSR_EEE_RX_WAKE_ |
4151 WUCSR_PFDA_FR_ |
4152 WUCSR_RFE_WAKE_FR_ |
4153 WUCSR_WUFR_ |
4154 WUCSR_MPR_ |
4155 WUCSR_BCST_FR_);
4156
4157 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4158 buf |= MAC_TX_TXEN_;
4159 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4160
4161 return 0;
4162}
4163
e0c79ff6 4164static int lan78xx_reset_resume(struct usb_interface *intf)
55d7de9d
WH
4165{
4166 struct lan78xx_net *dev = usb_get_intfdata(intf);
4167
4168 lan78xx_reset(dev);
ce85e13a 4169
92571a1a 4170 phy_start(dev->net->phydev);
ce85e13a 4171
55d7de9d
WH
4172 return lan78xx_resume(intf);
4173}
4174
4175static const struct usb_device_id products[] = {
4176 {
4177 /* LAN7800 USB Gigabit Ethernet Device */
4178 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4179 },
4180 {
4181 /* LAN7850 USB Gigabit Ethernet Device */
4182 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4183 },
02dc1f3d
WH
4184 {
4185 /* LAN7801 USB Gigabit Ethernet Device */
4186 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4187 },
55d7de9d
WH
4188 {},
4189};
4190MODULE_DEVICE_TABLE(usb, products);
4191
4192static struct usb_driver lan78xx_driver = {
4193 .name = DRIVER_NAME,
4194 .id_table = products,
4195 .probe = lan78xx_probe,
4196 .disconnect = lan78xx_disconnect,
4197 .suspend = lan78xx_suspend,
4198 .resume = lan78xx_resume,
4199 .reset_resume = lan78xx_reset_resume,
4200 .supports_autosuspend = 1,
4201 .disable_hub_initiated_lpm = 1,
4202};
4203
4204module_usb_driver(lan78xx_driver);
4205
4206MODULE_AUTHOR(DRIVER_AUTHOR);
4207MODULE_DESCRIPTION(DRIVER_DESC);
4208MODULE_LICENSE("GPL");