]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/usb/lan78xx.c
net: usb: ax88172x: Utilize phy_ethtool_nway_reset
[mirror_ubuntu-artful-kernel.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
cc89c323
WH
33#include <linux/interrupt.h>
34#include <linux/irqdomain.h>
35#include <linux/irq.h>
36#include <linux/irqchip/chained_irq.h>
bdfba55e 37#include <linux/microchipphy.h>
55d7de9d
WH
38#include "lan78xx.h"
39
40#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
41#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
42#define DRIVER_NAME "lan78xx"
cc89c323 43#define DRIVER_VERSION "1.0.5"
55d7de9d
WH
44
45#define TX_TIMEOUT_JIFFIES (5 * HZ)
46#define THROTTLE_JIFFIES (HZ / 8)
47#define UNLINK_TIMEOUT_MS 3
48
49#define RX_MAX_QUEUE_MEMORY (60 * 1518)
50
51#define SS_USB_PKT_SIZE (1024)
52#define HS_USB_PKT_SIZE (512)
53#define FS_USB_PKT_SIZE (64)
54
55#define MAX_RX_FIFO_SIZE (12 * 1024)
56#define MAX_TX_FIFO_SIZE (12 * 1024)
57#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
58#define DEFAULT_BULK_IN_DELAY (0x0800)
59#define MAX_SINGLE_PACKET_SIZE (9000)
60#define DEFAULT_TX_CSUM_ENABLE (true)
61#define DEFAULT_RX_CSUM_ENABLE (true)
62#define DEFAULT_TSO_CSUM_ENABLE (true)
63#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
64#define TX_OVERHEAD (8)
65#define RXW_PADDING 2
66
67#define LAN78XX_USB_VENDOR_ID (0x0424)
68#define LAN7800_USB_PRODUCT_ID (0x7800)
69#define LAN7850_USB_PRODUCT_ID (0x7850)
70#define LAN78XX_EEPROM_MAGIC (0x78A5)
71#define LAN78XX_OTP_MAGIC (0x78F3)
72
73#define MII_READ 1
74#define MII_WRITE 0
75
76#define EEPROM_INDICATOR (0xA5)
77#define EEPROM_MAC_OFFSET (0x01)
78#define MAX_EEPROM_SIZE 512
79#define OTP_INDICATOR_1 (0xF3)
80#define OTP_INDICATOR_2 (0xF7)
81
82#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
83 WAKE_MCAST | WAKE_BCAST | \
84 WAKE_ARP | WAKE_MAGIC)
85
86/* USB related defines */
87#define BULK_IN_PIPE 1
88#define BULK_OUT_PIPE 2
89
90/* default autosuspend delay (mSec)*/
91#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
92
20ff5565
WH
93/* statistic update interval (mSec) */
94#define STAT_UPDATE_TIMER (1 * 1000)
95
cc89c323
WH
96/* defines interrupts from interrupt EP */
97#define MAX_INT_EP (32)
98#define INT_EP_INTEP (31)
99#define INT_EP_OTP_WR_DONE (28)
100#define INT_EP_EEE_TX_LPI_START (26)
101#define INT_EP_EEE_TX_LPI_STOP (25)
102#define INT_EP_EEE_RX_LPI (24)
103#define INT_EP_MAC_RESET_TIMEOUT (23)
104#define INT_EP_RDFO (22)
105#define INT_EP_TXE (21)
106#define INT_EP_USB_STATUS (20)
107#define INT_EP_TX_DIS (19)
108#define INT_EP_RX_DIS (18)
109#define INT_EP_PHY (17)
110#define INT_EP_DP (16)
111#define INT_EP_MAC_ERR (15)
112#define INT_EP_TDFU (14)
113#define INT_EP_TDFO (13)
114#define INT_EP_UTX (12)
115#define INT_EP_GPIO_11 (11)
116#define INT_EP_GPIO_10 (10)
117#define INT_EP_GPIO_9 (9)
118#define INT_EP_GPIO_8 (8)
119#define INT_EP_GPIO_7 (7)
120#define INT_EP_GPIO_6 (6)
121#define INT_EP_GPIO_5 (5)
122#define INT_EP_GPIO_4 (4)
123#define INT_EP_GPIO_3 (3)
124#define INT_EP_GPIO_2 (2)
125#define INT_EP_GPIO_1 (1)
126#define INT_EP_GPIO_0 (0)
127
55d7de9d
WH
128static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
129 "RX FCS Errors",
130 "RX Alignment Errors",
131 "Rx Fragment Errors",
132 "RX Jabber Errors",
133 "RX Undersize Frame Errors",
134 "RX Oversize Frame Errors",
135 "RX Dropped Frames",
136 "RX Unicast Byte Count",
137 "RX Broadcast Byte Count",
138 "RX Multicast Byte Count",
139 "RX Unicast Frames",
140 "RX Broadcast Frames",
141 "RX Multicast Frames",
142 "RX Pause Frames",
143 "RX 64 Byte Frames",
144 "RX 65 - 127 Byte Frames",
145 "RX 128 - 255 Byte Frames",
146 "RX 256 - 511 Bytes Frames",
147 "RX 512 - 1023 Byte Frames",
148 "RX 1024 - 1518 Byte Frames",
149 "RX Greater 1518 Byte Frames",
150 "EEE RX LPI Transitions",
151 "EEE RX LPI Time",
152 "TX FCS Errors",
153 "TX Excess Deferral Errors",
154 "TX Carrier Errors",
155 "TX Bad Byte Count",
156 "TX Single Collisions",
157 "TX Multiple Collisions",
158 "TX Excessive Collision",
159 "TX Late Collisions",
160 "TX Unicast Byte Count",
161 "TX Broadcast Byte Count",
162 "TX Multicast Byte Count",
163 "TX Unicast Frames",
164 "TX Broadcast Frames",
165 "TX Multicast Frames",
166 "TX Pause Frames",
167 "TX 64 Byte Frames",
168 "TX 65 - 127 Byte Frames",
169 "TX 128 - 255 Byte Frames",
170 "TX 256 - 511 Bytes Frames",
171 "TX 512 - 1023 Byte Frames",
172 "TX 1024 - 1518 Byte Frames",
173 "TX Greater 1518 Byte Frames",
174 "EEE TX LPI Transitions",
175 "EEE TX LPI Time",
176};
177
178struct lan78xx_statstage {
179 u32 rx_fcs_errors;
180 u32 rx_alignment_errors;
181 u32 rx_fragment_errors;
182 u32 rx_jabber_errors;
183 u32 rx_undersize_frame_errors;
184 u32 rx_oversize_frame_errors;
185 u32 rx_dropped_frames;
186 u32 rx_unicast_byte_count;
187 u32 rx_broadcast_byte_count;
188 u32 rx_multicast_byte_count;
189 u32 rx_unicast_frames;
190 u32 rx_broadcast_frames;
191 u32 rx_multicast_frames;
192 u32 rx_pause_frames;
193 u32 rx_64_byte_frames;
194 u32 rx_65_127_byte_frames;
195 u32 rx_128_255_byte_frames;
196 u32 rx_256_511_bytes_frames;
197 u32 rx_512_1023_byte_frames;
198 u32 rx_1024_1518_byte_frames;
199 u32 rx_greater_1518_byte_frames;
200 u32 eee_rx_lpi_transitions;
201 u32 eee_rx_lpi_time;
202 u32 tx_fcs_errors;
203 u32 tx_excess_deferral_errors;
204 u32 tx_carrier_errors;
205 u32 tx_bad_byte_count;
206 u32 tx_single_collisions;
207 u32 tx_multiple_collisions;
208 u32 tx_excessive_collision;
209 u32 tx_late_collisions;
210 u32 tx_unicast_byte_count;
211 u32 tx_broadcast_byte_count;
212 u32 tx_multicast_byte_count;
213 u32 tx_unicast_frames;
214 u32 tx_broadcast_frames;
215 u32 tx_multicast_frames;
216 u32 tx_pause_frames;
217 u32 tx_64_byte_frames;
218 u32 tx_65_127_byte_frames;
219 u32 tx_128_255_byte_frames;
220 u32 tx_256_511_bytes_frames;
221 u32 tx_512_1023_byte_frames;
222 u32 tx_1024_1518_byte_frames;
223 u32 tx_greater_1518_byte_frames;
224 u32 eee_tx_lpi_transitions;
225 u32 eee_tx_lpi_time;
226};
227
20ff5565
WH
228struct lan78xx_statstage64 {
229 u64 rx_fcs_errors;
230 u64 rx_alignment_errors;
231 u64 rx_fragment_errors;
232 u64 rx_jabber_errors;
233 u64 rx_undersize_frame_errors;
234 u64 rx_oversize_frame_errors;
235 u64 rx_dropped_frames;
236 u64 rx_unicast_byte_count;
237 u64 rx_broadcast_byte_count;
238 u64 rx_multicast_byte_count;
239 u64 rx_unicast_frames;
240 u64 rx_broadcast_frames;
241 u64 rx_multicast_frames;
242 u64 rx_pause_frames;
243 u64 rx_64_byte_frames;
244 u64 rx_65_127_byte_frames;
245 u64 rx_128_255_byte_frames;
246 u64 rx_256_511_bytes_frames;
247 u64 rx_512_1023_byte_frames;
248 u64 rx_1024_1518_byte_frames;
249 u64 rx_greater_1518_byte_frames;
250 u64 eee_rx_lpi_transitions;
251 u64 eee_rx_lpi_time;
252 u64 tx_fcs_errors;
253 u64 tx_excess_deferral_errors;
254 u64 tx_carrier_errors;
255 u64 tx_bad_byte_count;
256 u64 tx_single_collisions;
257 u64 tx_multiple_collisions;
258 u64 tx_excessive_collision;
259 u64 tx_late_collisions;
260 u64 tx_unicast_byte_count;
261 u64 tx_broadcast_byte_count;
262 u64 tx_multicast_byte_count;
263 u64 tx_unicast_frames;
264 u64 tx_broadcast_frames;
265 u64 tx_multicast_frames;
266 u64 tx_pause_frames;
267 u64 tx_64_byte_frames;
268 u64 tx_65_127_byte_frames;
269 u64 tx_128_255_byte_frames;
270 u64 tx_256_511_bytes_frames;
271 u64 tx_512_1023_byte_frames;
272 u64 tx_1024_1518_byte_frames;
273 u64 tx_greater_1518_byte_frames;
274 u64 eee_tx_lpi_transitions;
275 u64 eee_tx_lpi_time;
276};
277
55d7de9d
WH
278struct lan78xx_net;
279
280struct lan78xx_priv {
281 struct lan78xx_net *dev;
282 u32 rfe_ctl;
283 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
284 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
285 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
286 struct mutex dataport_mutex; /* for dataport access */
287 spinlock_t rfe_ctl_lock; /* for rfe register access */
288 struct work_struct set_multicast;
289 struct work_struct set_vlan;
290 u32 wol;
291};
292
293enum skb_state {
294 illegal = 0,
295 tx_start,
296 tx_done,
297 rx_start,
298 rx_done,
299 rx_cleanup,
300 unlink_start
301};
302
303struct skb_data { /* skb->cb is one of these */
304 struct urb *urb;
305 struct lan78xx_net *dev;
306 enum skb_state state;
307 size_t length;
74d79a2e 308 int num_of_packet;
55d7de9d
WH
309};
310
311struct usb_context {
312 struct usb_ctrlrequest req;
313 struct lan78xx_net *dev;
314};
315
316#define EVENT_TX_HALT 0
317#define EVENT_RX_HALT 1
318#define EVENT_RX_MEMORY 2
319#define EVENT_STS_SPLIT 3
320#define EVENT_LINK_RESET 4
321#define EVENT_RX_PAUSED 5
322#define EVENT_DEV_WAKING 6
323#define EVENT_DEV_ASLEEP 7
324#define EVENT_DEV_OPEN 8
20ff5565
WH
325#define EVENT_STAT_UPDATE 9
326
327struct statstage {
328 struct mutex access_lock; /* for stats access */
329 struct lan78xx_statstage saved;
330 struct lan78xx_statstage rollover_count;
331 struct lan78xx_statstage rollover_max;
332 struct lan78xx_statstage64 curr_stat;
333};
55d7de9d 334
cc89c323
WH
335struct irq_domain_data {
336 struct irq_domain *irqdomain;
337 unsigned int phyirq;
338 struct irq_chip *irqchip;
339 irq_flow_handler_t irq_handler;
340 u32 irqenable;
341 struct mutex irq_lock; /* for irq bus access */
342};
343
55d7de9d
WH
344struct lan78xx_net {
345 struct net_device *net;
346 struct usb_device *udev;
347 struct usb_interface *intf;
348 void *driver_priv;
349
350 int rx_qlen;
351 int tx_qlen;
352 struct sk_buff_head rxq;
353 struct sk_buff_head txq;
354 struct sk_buff_head done;
355 struct sk_buff_head rxq_pause;
356 struct sk_buff_head txq_pend;
357
358 struct tasklet_struct bh;
359 struct delayed_work wq;
360
361 struct usb_host_endpoint *ep_blkin;
362 struct usb_host_endpoint *ep_blkout;
363 struct usb_host_endpoint *ep_intr;
364
365 int msg_enable;
366
367 struct urb *urb_intr;
368 struct usb_anchor deferred;
369
370 struct mutex phy_mutex; /* for phy access */
371 unsigned pipe_in, pipe_out, pipe_intr;
372
373 u32 hard_mtu; /* count any extra framing */
374 size_t rx_urb_size; /* size for rx urbs */
375
376 unsigned long flags;
377
378 wait_queue_head_t *wait;
379 unsigned char suspend_count;
380
381 unsigned maxpacket;
382 struct timer_list delay;
20ff5565 383 struct timer_list stat_monitor;
55d7de9d
WH
384
385 unsigned long data[5];
55d7de9d
WH
386
387 int link_on;
388 u8 mdix_ctrl;
ce85e13a 389
87177ba6
WH
390 u32 chipid;
391 u32 chiprev;
ce85e13a 392 struct mii_bus *mdiobus;
349e0c5e
WH
393
394 int fc_autoneg;
395 u8 fc_request_control;
20ff5565
WH
396
397 int delta;
398 struct statstage stats;
cc89c323
WH
399
400 struct irq_domain_data domain_data;
55d7de9d
WH
401};
402
403/* use ethtool to change the level for any given device */
404static int msg_level = -1;
405module_param(msg_level, int, 0);
406MODULE_PARM_DESC(msg_level, "Override default message level");
407
408static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
409{
410 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
411 int ret;
412
55d7de9d
WH
413 if (!buf)
414 return -ENOMEM;
415
416 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
417 USB_VENDOR_REQUEST_READ_REGISTER,
418 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
419 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
420 if (likely(ret >= 0)) {
421 le32_to_cpus(buf);
422 *data = *buf;
423 } else {
424 netdev_warn(dev->net,
425 "Failed to read register index 0x%08x. ret = %d",
426 index, ret);
427 }
428
429 kfree(buf);
430
431 return ret;
432}
433
434static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
435{
436 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
437 int ret;
438
55d7de9d
WH
439 if (!buf)
440 return -ENOMEM;
441
442 *buf = data;
443 cpu_to_le32s(buf);
444
445 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
446 USB_VENDOR_REQUEST_WRITE_REGISTER,
447 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
448 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
449 if (unlikely(ret < 0)) {
450 netdev_warn(dev->net,
451 "Failed to write register index 0x%08x. ret = %d",
452 index, ret);
453 }
454
455 kfree(buf);
456
457 return ret;
458}
459
460static int lan78xx_read_stats(struct lan78xx_net *dev,
461 struct lan78xx_statstage *data)
462{
463 int ret = 0;
464 int i;
465 struct lan78xx_statstage *stats;
466 u32 *src;
467 u32 *dst;
468
55d7de9d
WH
469 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
470 if (!stats)
471 return -ENOMEM;
472
473 ret = usb_control_msg(dev->udev,
474 usb_rcvctrlpipe(dev->udev, 0),
475 USB_VENDOR_REQUEST_GET_STATS,
476 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
477 0,
478 0,
479 (void *)stats,
480 sizeof(*stats),
481 USB_CTRL_SET_TIMEOUT);
482 if (likely(ret >= 0)) {
483 src = (u32 *)stats;
484 dst = (u32 *)data;
485 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
486 le32_to_cpus(&src[i]);
487 dst[i] = src[i];
488 }
489 } else {
490 netdev_warn(dev->net,
491 "Failed to read stat ret = 0x%x", ret);
492 }
493
494 kfree(stats);
495
496 return ret;
497}
498
20ff5565
WH
499#define check_counter_rollover(struct1, dev_stats, member) { \
500 if (struct1->member < dev_stats.saved.member) \
501 dev_stats.rollover_count.member++; \
502 }
503
504static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
505 struct lan78xx_statstage *stats)
506{
507 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
508 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
509 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
510 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
511 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
512 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
513 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
514 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
515 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
516 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
517 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
518 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
519 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
520 check_counter_rollover(stats, dev->stats, rx_pause_frames);
521 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
522 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
523 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
524 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
525 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
526 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
527 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
528 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
529 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
530 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
531 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
532 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
533 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
534 check_counter_rollover(stats, dev->stats, tx_single_collisions);
535 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
536 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
537 check_counter_rollover(stats, dev->stats, tx_late_collisions);
538 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
539 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
540 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
541 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
542 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
543 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
544 check_counter_rollover(stats, dev->stats, tx_pause_frames);
545 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
546 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
547 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
548 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
549 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
550 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
551 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
552 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
553 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
554
555 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
556}
557
558static void lan78xx_update_stats(struct lan78xx_net *dev)
559{
560 u32 *p, *count, *max;
561 u64 *data;
562 int i;
563 struct lan78xx_statstage lan78xx_stats;
564
565 if (usb_autopm_get_interface(dev->intf) < 0)
566 return;
567
568 p = (u32 *)&lan78xx_stats;
569 count = (u32 *)&dev->stats.rollover_count;
570 max = (u32 *)&dev->stats.rollover_max;
571 data = (u64 *)&dev->stats.curr_stat;
572
573 mutex_lock(&dev->stats.access_lock);
574
575 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
576 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
577
578 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
579 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
580
581 mutex_unlock(&dev->stats.access_lock);
582
583 usb_autopm_put_interface(dev->intf);
584}
585
55d7de9d
WH
586/* Loop until the read is completed with timeout called with phy_mutex held */
587static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
588{
589 unsigned long start_time = jiffies;
590 u32 val;
591 int ret;
592
593 do {
594 ret = lan78xx_read_reg(dev, MII_ACC, &val);
595 if (unlikely(ret < 0))
596 return -EIO;
597
598 if (!(val & MII_ACC_MII_BUSY_))
599 return 0;
600 } while (!time_after(jiffies, start_time + HZ));
601
602 return -EIO;
603}
604
605static inline u32 mii_access(int id, int index, int read)
606{
607 u32 ret;
608
609 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
610 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
611 if (read)
612 ret |= MII_ACC_MII_READ_;
613 else
614 ret |= MII_ACC_MII_WRITE_;
615 ret |= MII_ACC_MII_BUSY_;
616
617 return ret;
618}
619
55d7de9d
WH
620static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
621{
622 unsigned long start_time = jiffies;
623 u32 val;
624 int ret;
625
626 do {
627 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
628 if (unlikely(ret < 0))
629 return -EIO;
630
631 if (!(val & E2P_CMD_EPC_BUSY_) ||
632 (val & E2P_CMD_EPC_TIMEOUT_))
633 break;
634 usleep_range(40, 100);
635 } while (!time_after(jiffies, start_time + HZ));
636
637 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
638 netdev_warn(dev->net, "EEPROM read operation timeout");
639 return -EIO;
640 }
641
642 return 0;
643}
644
645static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
646{
647 unsigned long start_time = jiffies;
648 u32 val;
649 int ret;
650
651 do {
652 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
653 if (unlikely(ret < 0))
654 return -EIO;
655
656 if (!(val & E2P_CMD_EPC_BUSY_))
657 return 0;
658
659 usleep_range(40, 100);
660 } while (!time_after(jiffies, start_time + HZ));
661
662 netdev_warn(dev->net, "EEPROM is busy");
663 return -EIO;
664}
665
666static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
667 u32 length, u8 *data)
668{
669 u32 val;
a0db7d10 670 u32 saved;
55d7de9d 671 int i, ret;
a0db7d10
WH
672 int retval;
673
674 /* depends on chip, some EEPROM pins are muxed with LED function.
675 * disable & restore LED function to access EEPROM.
676 */
677 ret = lan78xx_read_reg(dev, HW_CFG, &val);
678 saved = val;
87177ba6 679 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
680 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
681 ret = lan78xx_write_reg(dev, HW_CFG, val);
682 }
55d7de9d 683
a0db7d10
WH
684 retval = lan78xx_eeprom_confirm_not_busy(dev);
685 if (retval)
686 return retval;
55d7de9d
WH
687
688 for (i = 0; i < length; i++) {
689 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
690 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
691 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
692 if (unlikely(ret < 0)) {
693 retval = -EIO;
694 goto exit;
695 }
55d7de9d 696
a0db7d10
WH
697 retval = lan78xx_wait_eeprom(dev);
698 if (retval < 0)
699 goto exit;
55d7de9d
WH
700
701 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
702 if (unlikely(ret < 0)) {
703 retval = -EIO;
704 goto exit;
705 }
55d7de9d
WH
706
707 data[i] = val & 0xFF;
708 offset++;
709 }
710
a0db7d10
WH
711 retval = 0;
712exit:
87177ba6 713 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
714 ret = lan78xx_write_reg(dev, HW_CFG, saved);
715
716 return retval;
55d7de9d
WH
717}
718
719static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
720 u32 length, u8 *data)
721{
722 u8 sig;
723 int ret;
724
725 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
726 if ((ret == 0) && (sig == EEPROM_INDICATOR))
727 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
728 else
729 ret = -EINVAL;
730
731 return ret;
732}
733
734static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
735 u32 length, u8 *data)
736{
737 u32 val;
a0db7d10 738 u32 saved;
55d7de9d 739 int i, ret;
a0db7d10
WH
740 int retval;
741
742 /* depends on chip, some EEPROM pins are muxed with LED function.
743 * disable & restore LED function to access EEPROM.
744 */
745 ret = lan78xx_read_reg(dev, HW_CFG, &val);
746 saved = val;
87177ba6 747 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
748 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
749 ret = lan78xx_write_reg(dev, HW_CFG, val);
750 }
55d7de9d 751
a0db7d10
WH
752 retval = lan78xx_eeprom_confirm_not_busy(dev);
753 if (retval)
754 goto exit;
55d7de9d
WH
755
756 /* Issue write/erase enable command */
757 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
758 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
759 if (unlikely(ret < 0)) {
760 retval = -EIO;
761 goto exit;
762 }
55d7de9d 763
a0db7d10
WH
764 retval = lan78xx_wait_eeprom(dev);
765 if (retval < 0)
766 goto exit;
55d7de9d
WH
767
768 for (i = 0; i < length; i++) {
769 /* Fill data register */
770 val = data[i];
771 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
772 if (ret < 0) {
773 retval = -EIO;
774 goto exit;
775 }
55d7de9d
WH
776
777 /* Send "write" command */
778 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
779 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
780 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
781 if (ret < 0) {
782 retval = -EIO;
783 goto exit;
784 }
55d7de9d 785
a0db7d10
WH
786 retval = lan78xx_wait_eeprom(dev);
787 if (retval < 0)
788 goto exit;
55d7de9d
WH
789
790 offset++;
791 }
792
a0db7d10
WH
793 retval = 0;
794exit:
87177ba6 795 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
796 ret = lan78xx_write_reg(dev, HW_CFG, saved);
797
798 return retval;
55d7de9d
WH
799}
800
801static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
802 u32 length, u8 *data)
803{
804 int i;
805 int ret;
806 u32 buf;
807 unsigned long timeout;
808
809 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
810
811 if (buf & OTP_PWR_DN_PWRDN_N_) {
812 /* clear it and wait to be cleared */
813 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
814
815 timeout = jiffies + HZ;
816 do {
817 usleep_range(1, 10);
818 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
819 if (time_after(jiffies, timeout)) {
820 netdev_warn(dev->net,
821 "timeout on OTP_PWR_DN");
822 return -EIO;
823 }
824 } while (buf & OTP_PWR_DN_PWRDN_N_);
825 }
826
827 for (i = 0; i < length; i++) {
828 ret = lan78xx_write_reg(dev, OTP_ADDR1,
829 ((offset + i) >> 8) & OTP_ADDR1_15_11);
830 ret = lan78xx_write_reg(dev, OTP_ADDR2,
831 ((offset + i) & OTP_ADDR2_10_3));
832
833 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
834 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
835
836 timeout = jiffies + HZ;
837 do {
838 udelay(1);
839 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
840 if (time_after(jiffies, timeout)) {
841 netdev_warn(dev->net,
842 "timeout on OTP_STATUS");
843 return -EIO;
844 }
845 } while (buf & OTP_STATUS_BUSY_);
846
847 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
848
849 data[i] = (u8)(buf & 0xFF);
850 }
851
852 return 0;
853}
854
9fb6066d
WH
855static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
856 u32 length, u8 *data)
857{
858 int i;
859 int ret;
860 u32 buf;
861 unsigned long timeout;
862
863 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
864
865 if (buf & OTP_PWR_DN_PWRDN_N_) {
866 /* clear it and wait to be cleared */
867 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
868
869 timeout = jiffies + HZ;
870 do {
871 udelay(1);
872 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
873 if (time_after(jiffies, timeout)) {
874 netdev_warn(dev->net,
875 "timeout on OTP_PWR_DN completion");
876 return -EIO;
877 }
878 } while (buf & OTP_PWR_DN_PWRDN_N_);
879 }
880
881 /* set to BYTE program mode */
882 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
883
884 for (i = 0; i < length; i++) {
885 ret = lan78xx_write_reg(dev, OTP_ADDR1,
886 ((offset + i) >> 8) & OTP_ADDR1_15_11);
887 ret = lan78xx_write_reg(dev, OTP_ADDR2,
888 ((offset + i) & OTP_ADDR2_10_3));
889 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
890 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
891 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
892
893 timeout = jiffies + HZ;
894 do {
895 udelay(1);
896 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
897 if (time_after(jiffies, timeout)) {
898 netdev_warn(dev->net,
899 "Timeout on OTP_STATUS completion");
900 return -EIO;
901 }
902 } while (buf & OTP_STATUS_BUSY_);
903 }
904
905 return 0;
906}
907
55d7de9d
WH
908static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
909 u32 length, u8 *data)
910{
911 u8 sig;
912 int ret;
913
914 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
915
916 if (ret == 0) {
917 if (sig == OTP_INDICATOR_1)
918 offset = offset;
919 else if (sig == OTP_INDICATOR_2)
920 offset += 0x100;
921 else
922 ret = -EINVAL;
923 ret = lan78xx_read_raw_otp(dev, offset, length, data);
924 }
925
926 return ret;
927}
928
929static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
930{
931 int i, ret;
932
933 for (i = 0; i < 100; i++) {
934 u32 dp_sel;
935
936 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
937 if (unlikely(ret < 0))
938 return -EIO;
939
940 if (dp_sel & DP_SEL_DPRDY_)
941 return 0;
942
943 usleep_range(40, 100);
944 }
945
946 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
947
948 return -EIO;
949}
950
951static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
952 u32 addr, u32 length, u32 *buf)
953{
954 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
955 u32 dp_sel;
956 int i, ret;
957
958 if (usb_autopm_get_interface(dev->intf) < 0)
959 return 0;
960
961 mutex_lock(&pdata->dataport_mutex);
962
963 ret = lan78xx_dataport_wait_not_busy(dev);
964 if (ret < 0)
965 goto done;
966
967 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
968
969 dp_sel &= ~DP_SEL_RSEL_MASK_;
970 dp_sel |= ram_select;
971 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
972
973 for (i = 0; i < length; i++) {
974 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
975
976 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
977
978 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
979
980 ret = lan78xx_dataport_wait_not_busy(dev);
981 if (ret < 0)
982 goto done;
983 }
984
985done:
986 mutex_unlock(&pdata->dataport_mutex);
987 usb_autopm_put_interface(dev->intf);
988
989 return ret;
990}
991
992static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
993 int index, u8 addr[ETH_ALEN])
994{
995 u32 temp;
996
997 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
998 temp = addr[3];
999 temp = addr[2] | (temp << 8);
1000 temp = addr[1] | (temp << 8);
1001 temp = addr[0] | (temp << 8);
1002 pdata->pfilter_table[index][1] = temp;
1003 temp = addr[5];
1004 temp = addr[4] | (temp << 8);
1005 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1006 pdata->pfilter_table[index][0] = temp;
1007 }
1008}
1009
1010/* returns hash bit number for given MAC address */
1011static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1012{
1013 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1014}
1015
1016static void lan78xx_deferred_multicast_write(struct work_struct *param)
1017{
1018 struct lan78xx_priv *pdata =
1019 container_of(param, struct lan78xx_priv, set_multicast);
1020 struct lan78xx_net *dev = pdata->dev;
1021 int i;
1022 int ret;
1023
1024 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1025 pdata->rfe_ctl);
1026
1027 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1028 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1029
1030 for (i = 1; i < NUM_OF_MAF; i++) {
1031 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1032 ret = lan78xx_write_reg(dev, MAF_LO(i),
1033 pdata->pfilter_table[i][1]);
1034 ret = lan78xx_write_reg(dev, MAF_HI(i),
1035 pdata->pfilter_table[i][0]);
1036 }
1037
1038 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1039}
1040
1041static void lan78xx_set_multicast(struct net_device *netdev)
1042{
1043 struct lan78xx_net *dev = netdev_priv(netdev);
1044 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045 unsigned long flags;
1046 int i;
1047
1048 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1049
1050 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1051 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1052
1053 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1054 pdata->mchash_table[i] = 0;
1055 /* pfilter_table[0] has own HW address */
1056 for (i = 1; i < NUM_OF_MAF; i++) {
1057 pdata->pfilter_table[i][0] =
1058 pdata->pfilter_table[i][1] = 0;
1059 }
1060
1061 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1062
1063 if (dev->net->flags & IFF_PROMISC) {
1064 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1065 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1066 } else {
1067 if (dev->net->flags & IFF_ALLMULTI) {
1068 netif_dbg(dev, drv, dev->net,
1069 "receive all multicast enabled");
1070 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1071 }
1072 }
1073
1074 if (netdev_mc_count(dev->net)) {
1075 struct netdev_hw_addr *ha;
1076 int i;
1077
1078 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1079
1080 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1081
1082 i = 1;
1083 netdev_for_each_mc_addr(ha, netdev) {
1084 /* set first 32 into Perfect Filter */
1085 if (i < 33) {
1086 lan78xx_set_addr_filter(pdata, i, ha->addr);
1087 } else {
1088 u32 bitnum = lan78xx_hash(ha->addr);
1089
1090 pdata->mchash_table[bitnum / 32] |=
1091 (1 << (bitnum % 32));
1092 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1093 }
1094 i++;
1095 }
1096 }
1097
1098 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1099
1100 /* defer register writes to a sleepable context */
1101 schedule_work(&pdata->set_multicast);
1102}
1103
1104static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1105 u16 lcladv, u16 rmtadv)
1106{
1107 u32 flow = 0, fct_flow = 0;
1108 int ret;
349e0c5e 1109 u8 cap;
55d7de9d 1110
349e0c5e
WH
1111 if (dev->fc_autoneg)
1112 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1113 else
1114 cap = dev->fc_request_control;
55d7de9d
WH
1115
1116 if (cap & FLOW_CTRL_TX)
349e0c5e 1117 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
55d7de9d
WH
1118
1119 if (cap & FLOW_CTRL_RX)
1120 flow |= FLOW_CR_RX_FCEN_;
1121
1122 if (dev->udev->speed == USB_SPEED_SUPER)
1123 fct_flow = 0x817;
1124 else if (dev->udev->speed == USB_SPEED_HIGH)
1125 fct_flow = 0x211;
1126
1127 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1128 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1129 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1130
1131 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1132
1133 /* threshold value should be set before enabling flow */
1134 ret = lan78xx_write_reg(dev, FLOW, flow);
1135
1136 return 0;
1137}
1138
1139static int lan78xx_link_reset(struct lan78xx_net *dev)
1140{
ce85e13a 1141 struct phy_device *phydev = dev->net->phydev;
6e76510e 1142 struct ethtool_link_ksettings ecmd;
99c79ece 1143 int ladv, radv, ret;
55d7de9d
WH
1144 u32 buf;
1145
55d7de9d
WH
1146 /* clear LAN78xx interrupt status */
1147 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1148 if (unlikely(ret < 0))
1149 return -EIO;
1150
ce85e13a
WH
1151 phy_read_status(phydev);
1152
1153 if (!phydev->link && dev->link_on) {
55d7de9d 1154 dev->link_on = false;
55d7de9d
WH
1155
1156 /* reset MAC */
1157 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1158 if (unlikely(ret < 0))
1159 return -EIO;
1160 buf |= MAC_CR_RST_;
1161 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1162 if (unlikely(ret < 0))
1163 return -EIO;
e4953910 1164
20ff5565 1165 del_timer(&dev->stat_monitor);
ce85e13a 1166 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
1167 dev->link_on = true;
1168
6e76510e 1169 phy_ethtool_ksettings_get(phydev, &ecmd);
55d7de9d 1170
55d7de9d 1171 if (dev->udev->speed == USB_SPEED_SUPER) {
6e76510e 1172 if (ecmd.base.speed == 1000) {
55d7de9d
WH
1173 /* disable U2 */
1174 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1175 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1176 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1177 /* enable U1 */
1178 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1179 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1180 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1181 } else {
1182 /* enable U1 & U2 */
1183 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1184 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1185 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1186 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1187 }
1188 }
1189
ce85e13a 1190 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
1191 if (ladv < 0)
1192 return ladv;
55d7de9d 1193
ce85e13a 1194 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
1195 if (radv < 0)
1196 return radv;
55d7de9d
WH
1197
1198 netif_dbg(dev, link, dev->net,
1199 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
6e76510e 1200 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
55d7de9d 1201
6e76510e
PR
1202 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1203 radv);
20ff5565
WH
1204
1205 if (!timer_pending(&dev->stat_monitor)) {
1206 dev->delta = 1;
1207 mod_timer(&dev->stat_monitor,
1208 jiffies + STAT_UPDATE_TIMER);
1209 }
55d7de9d
WH
1210 }
1211
1212 return ret;
1213}
1214
1215/* some work can't be done in tasklets, so we use keventd
1216 *
1217 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1218 * but tasklet_schedule() doesn't. hope the failure is rare.
1219 */
e0c79ff6 1220static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
55d7de9d
WH
1221{
1222 set_bit(work, &dev->flags);
1223 if (!schedule_delayed_work(&dev->wq, 0))
1224 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1225}
1226
1227static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1228{
1229 u32 intdata;
1230
1231 if (urb->actual_length != 4) {
1232 netdev_warn(dev->net,
1233 "unexpected urb length %d", urb->actual_length);
1234 return;
1235 }
1236
1237 memcpy(&intdata, urb->transfer_buffer, 4);
1238 le32_to_cpus(&intdata);
1239
1240 if (intdata & INT_ENP_PHY_INT) {
1241 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
cc89c323
WH
1242 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1243
1244 if (dev->domain_data.phyirq > 0)
1245 generic_handle_irq(dev->domain_data.phyirq);
55d7de9d
WH
1246 } else
1247 netdev_warn(dev->net,
1248 "unexpected interrupt: 0x%08x\n", intdata);
1249}
1250
1251static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1252{
1253 return MAX_EEPROM_SIZE;
1254}
1255
1256static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1257 struct ethtool_eeprom *ee, u8 *data)
1258{
1259 struct lan78xx_net *dev = netdev_priv(netdev);
1260
1261 ee->magic = LAN78XX_EEPROM_MAGIC;
1262
1263 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1264}
1265
1266static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1267 struct ethtool_eeprom *ee, u8 *data)
1268{
1269 struct lan78xx_net *dev = netdev_priv(netdev);
1270
1271 /* Allow entire eeprom update only */
1272 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1273 (ee->offset == 0) &&
1274 (ee->len == 512) &&
1275 (data[0] == EEPROM_INDICATOR))
1276 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1277 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1278 (ee->offset == 0) &&
1279 (ee->len == 512) &&
1280 (data[0] == OTP_INDICATOR_1))
9fb6066d 1281 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d
WH
1282
1283 return -EINVAL;
1284}
1285
1286static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1287 u8 *data)
1288{
1289 if (stringset == ETH_SS_STATS)
1290 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1291}
1292
1293static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1294{
1295 if (sset == ETH_SS_STATS)
1296 return ARRAY_SIZE(lan78xx_gstrings);
1297 else
1298 return -EOPNOTSUPP;
1299}
1300
1301static void lan78xx_get_stats(struct net_device *netdev,
1302 struct ethtool_stats *stats, u64 *data)
1303{
1304 struct lan78xx_net *dev = netdev_priv(netdev);
55d7de9d 1305
20ff5565 1306 lan78xx_update_stats(dev);
55d7de9d 1307
20ff5565
WH
1308 mutex_lock(&dev->stats.access_lock);
1309 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1310 mutex_unlock(&dev->stats.access_lock);
55d7de9d
WH
1311}
1312
1313static void lan78xx_get_wol(struct net_device *netdev,
1314 struct ethtool_wolinfo *wol)
1315{
1316 struct lan78xx_net *dev = netdev_priv(netdev);
1317 int ret;
1318 u32 buf;
1319 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1320
1321 if (usb_autopm_get_interface(dev->intf) < 0)
1322 return;
1323
1324 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1325 if (unlikely(ret < 0)) {
1326 wol->supported = 0;
1327 wol->wolopts = 0;
1328 } else {
1329 if (buf & USB_CFG_RMT_WKP_) {
1330 wol->supported = WAKE_ALL;
1331 wol->wolopts = pdata->wol;
1332 } else {
1333 wol->supported = 0;
1334 wol->wolopts = 0;
1335 }
1336 }
1337
1338 usb_autopm_put_interface(dev->intf);
1339}
1340
1341static int lan78xx_set_wol(struct net_device *netdev,
1342 struct ethtool_wolinfo *wol)
1343{
1344 struct lan78xx_net *dev = netdev_priv(netdev);
1345 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1346 int ret;
1347
1348 ret = usb_autopm_get_interface(dev->intf);
1349 if (ret < 0)
1350 return ret;
1351
1352 pdata->wol = 0;
1353 if (wol->wolopts & WAKE_UCAST)
1354 pdata->wol |= WAKE_UCAST;
1355 if (wol->wolopts & WAKE_MCAST)
1356 pdata->wol |= WAKE_MCAST;
1357 if (wol->wolopts & WAKE_BCAST)
1358 pdata->wol |= WAKE_BCAST;
1359 if (wol->wolopts & WAKE_MAGIC)
1360 pdata->wol |= WAKE_MAGIC;
1361 if (wol->wolopts & WAKE_PHY)
1362 pdata->wol |= WAKE_PHY;
1363 if (wol->wolopts & WAKE_ARP)
1364 pdata->wol |= WAKE_ARP;
1365
1366 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1367
ce85e13a
WH
1368 phy_ethtool_set_wol(netdev->phydev, wol);
1369
55d7de9d
WH
1370 usb_autopm_put_interface(dev->intf);
1371
1372 return ret;
1373}
1374
1375static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1376{
1377 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1378 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1379 int ret;
1380 u32 buf;
55d7de9d
WH
1381
1382 ret = usb_autopm_get_interface(dev->intf);
1383 if (ret < 0)
1384 return ret;
1385
ce85e13a
WH
1386 ret = phy_ethtool_get_eee(phydev, edata);
1387 if (ret < 0)
1388 goto exit;
1389
55d7de9d
WH
1390 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1391 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1392 edata->eee_enabled = true;
ce85e13a
WH
1393 edata->eee_active = !!(edata->advertised &
1394 edata->lp_advertised);
55d7de9d
WH
1395 edata->tx_lpi_enabled = true;
1396 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1397 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1398 edata->tx_lpi_timer = buf;
1399 } else {
55d7de9d
WH
1400 edata->eee_enabled = false;
1401 edata->eee_active = false;
55d7de9d
WH
1402 edata->tx_lpi_enabled = false;
1403 edata->tx_lpi_timer = 0;
1404 }
1405
ce85e13a
WH
1406 ret = 0;
1407exit:
55d7de9d
WH
1408 usb_autopm_put_interface(dev->intf);
1409
ce85e13a 1410 return ret;
55d7de9d
WH
1411}
1412
1413static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1414{
1415 struct lan78xx_net *dev = netdev_priv(net);
1416 int ret;
1417 u32 buf;
1418
1419 ret = usb_autopm_get_interface(dev->intf);
1420 if (ret < 0)
1421 return ret;
1422
1423 if (edata->eee_enabled) {
1424 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1425 buf |= MAC_CR_EEE_EN_;
1426 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1427
ce85e13a
WH
1428 phy_ethtool_set_eee(net->phydev, edata);
1429
1430 buf = (u32)edata->tx_lpi_timer;
1431 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1432 } else {
1433 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1434 buf &= ~MAC_CR_EEE_EN_;
1435 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1436 }
1437
1438 usb_autopm_put_interface(dev->intf);
1439
1440 return 0;
1441}
1442
1443static u32 lan78xx_get_link(struct net_device *net)
1444{
ce85e13a 1445 phy_read_status(net->phydev);
55d7de9d 1446
ce85e13a 1447 return net->phydev->link;
55d7de9d
WH
1448}
1449
e0c79ff6 1450static int lan78xx_nway_reset(struct net_device *net)
55d7de9d 1451{
ce85e13a 1452 return phy_start_aneg(net->phydev);
55d7de9d
WH
1453}
1454
1455static void lan78xx_get_drvinfo(struct net_device *net,
1456 struct ethtool_drvinfo *info)
1457{
1458 struct lan78xx_net *dev = netdev_priv(net);
1459
1460 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1461 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1462 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1463}
1464
1465static u32 lan78xx_get_msglevel(struct net_device *net)
1466{
1467 struct lan78xx_net *dev = netdev_priv(net);
1468
1469 return dev->msg_enable;
1470}
1471
1472static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1473{
1474 struct lan78xx_net *dev = netdev_priv(net);
1475
1476 dev->msg_enable = level;
1477}
1478
758c5c11
WH
1479static int lan78xx_get_mdix_status(struct net_device *net)
1480{
1481 struct phy_device *phydev = net->phydev;
1482 int buf;
1483
1484 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1485 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1486 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1487
1488 return buf;
1489}
1490
1491static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1492{
1493 struct lan78xx_net *dev = netdev_priv(net);
1494 struct phy_device *phydev = net->phydev;
1495 int buf;
1496
1497 if (mdix_ctrl == ETH_TP_MDI) {
1498 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1499 LAN88XX_EXT_PAGE_SPACE_1);
1500 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1501 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1502 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1503 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1504 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1505 LAN88XX_EXT_PAGE_SPACE_0);
1506 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1507 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1508 LAN88XX_EXT_PAGE_SPACE_1);
1509 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1510 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1511 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1512 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1513 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1514 LAN88XX_EXT_PAGE_SPACE_0);
1515 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1516 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1517 LAN88XX_EXT_PAGE_SPACE_1);
1518 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1519 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1520 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1521 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1522 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1523 LAN88XX_EXT_PAGE_SPACE_0);
1524 }
1525 dev->mdix_ctrl = mdix_ctrl;
1526}
1527
6e76510e
PR
1528static int lan78xx_get_link_ksettings(struct net_device *net,
1529 struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1530{
1531 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1532 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1533 int ret;
1534 int buf;
1535
55d7de9d
WH
1536 ret = usb_autopm_get_interface(dev->intf);
1537 if (ret < 0)
1538 return ret;
1539
6e76510e 1540 ret = phy_ethtool_ksettings_get(phydev, cmd);
55d7de9d 1541
758c5c11 1542 buf = lan78xx_get_mdix_status(net);
55d7de9d 1543
bdfba55e
WH
1544 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1545 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
6e76510e
PR
1546 cmd->base.eth_tp_mdix = ETH_TP_MDI_AUTO;
1547 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
bdfba55e 1548 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
6e76510e
PR
1549 cmd->base.eth_tp_mdix = ETH_TP_MDI;
1550 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
bdfba55e 1551 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
6e76510e
PR
1552 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
1553 cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
55d7de9d
WH
1554 }
1555
1556 usb_autopm_put_interface(dev->intf);
1557
1558 return ret;
1559}
1560
6e76510e
PR
1561static int lan78xx_set_link_ksettings(struct net_device *net,
1562 const struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1563{
1564 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1565 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1566 int ret = 0;
1567 int temp;
1568
55d7de9d
WH
1569 ret = usb_autopm_get_interface(dev->intf);
1570 if (ret < 0)
1571 return ret;
1572
6e76510e
PR
1573 if (dev->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
1574 lan78xx_set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
55d7de9d
WH
1575
1576 /* change speed & duplex */
6e76510e 1577 ret = phy_ethtool_ksettings_set(phydev, cmd);
55d7de9d 1578
6e76510e 1579 if (!cmd->base.autoneg) {
55d7de9d 1580 /* force link down */
ce85e13a
WH
1581 temp = phy_read(phydev, MII_BMCR);
1582 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1583 mdelay(1);
ce85e13a 1584 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1585 }
1586
1587 usb_autopm_put_interface(dev->intf);
1588
1589 return ret;
1590}
1591
349e0c5e
WH
1592static void lan78xx_get_pause(struct net_device *net,
1593 struct ethtool_pauseparam *pause)
1594{
1595 struct lan78xx_net *dev = netdev_priv(net);
1596 struct phy_device *phydev = net->phydev;
6e76510e 1597 struct ethtool_link_ksettings ecmd;
349e0c5e 1598
6e76510e 1599 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e
WH
1600
1601 pause->autoneg = dev->fc_autoneg;
1602
1603 if (dev->fc_request_control & FLOW_CTRL_TX)
1604 pause->tx_pause = 1;
1605
1606 if (dev->fc_request_control & FLOW_CTRL_RX)
1607 pause->rx_pause = 1;
1608}
1609
1610static int lan78xx_set_pause(struct net_device *net,
1611 struct ethtool_pauseparam *pause)
1612{
1613 struct lan78xx_net *dev = netdev_priv(net);
1614 struct phy_device *phydev = net->phydev;
6e76510e 1615 struct ethtool_link_ksettings ecmd;
349e0c5e
WH
1616 int ret;
1617
6e76510e 1618 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e 1619
6e76510e 1620 if (pause->autoneg && !ecmd.base.autoneg) {
349e0c5e
WH
1621 ret = -EINVAL;
1622 goto exit;
1623 }
1624
1625 dev->fc_request_control = 0;
1626 if (pause->rx_pause)
1627 dev->fc_request_control |= FLOW_CTRL_RX;
1628
1629 if (pause->tx_pause)
1630 dev->fc_request_control |= FLOW_CTRL_TX;
1631
6e76510e 1632 if (ecmd.base.autoneg) {
349e0c5e 1633 u32 mii_adv;
6e76510e
PR
1634 u32 advertising;
1635
1636 ethtool_convert_link_mode_to_legacy_u32(
1637 &advertising, ecmd.link_modes.advertising);
349e0c5e 1638
6e76510e 1639 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
349e0c5e 1640 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
6e76510e
PR
1641 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1642
1643 ethtool_convert_legacy_u32_to_link_mode(
1644 ecmd.link_modes.advertising, advertising);
1645
1646 phy_ethtool_ksettings_set(phydev, &ecmd);
349e0c5e
WH
1647 }
1648
1649 dev->fc_autoneg = pause->autoneg;
1650
1651 ret = 0;
1652exit:
1653 return ret;
1654}
1655
55d7de9d
WH
1656static const struct ethtool_ops lan78xx_ethtool_ops = {
1657 .get_link = lan78xx_get_link,
1658 .nway_reset = lan78xx_nway_reset,
1659 .get_drvinfo = lan78xx_get_drvinfo,
1660 .get_msglevel = lan78xx_get_msglevel,
1661 .set_msglevel = lan78xx_set_msglevel,
55d7de9d
WH
1662 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1663 .get_eeprom = lan78xx_ethtool_get_eeprom,
1664 .set_eeprom = lan78xx_ethtool_set_eeprom,
1665 .get_ethtool_stats = lan78xx_get_stats,
1666 .get_sset_count = lan78xx_get_sset_count,
1667 .get_strings = lan78xx_get_strings,
1668 .get_wol = lan78xx_get_wol,
1669 .set_wol = lan78xx_set_wol,
1670 .get_eee = lan78xx_get_eee,
1671 .set_eee = lan78xx_set_eee,
349e0c5e
WH
1672 .get_pauseparam = lan78xx_get_pause,
1673 .set_pauseparam = lan78xx_set_pause,
6e76510e
PR
1674 .get_link_ksettings = lan78xx_get_link_ksettings,
1675 .set_link_ksettings = lan78xx_set_link_ksettings,
55d7de9d
WH
1676};
1677
1678static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1679{
55d7de9d
WH
1680 if (!netif_running(netdev))
1681 return -EINVAL;
1682
ce85e13a 1683 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1684}
1685
1686static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1687{
1688 u32 addr_lo, addr_hi;
1689 int ret;
1690 u8 addr[6];
1691
1692 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1693 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1694
1695 addr[0] = addr_lo & 0xFF;
1696 addr[1] = (addr_lo >> 8) & 0xFF;
1697 addr[2] = (addr_lo >> 16) & 0xFF;
1698 addr[3] = (addr_lo >> 24) & 0xFF;
1699 addr[4] = addr_hi & 0xFF;
1700 addr[5] = (addr_hi >> 8) & 0xFF;
1701
1702 if (!is_valid_ether_addr(addr)) {
1703 /* reading mac address from EEPROM or OTP */
1704 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1705 addr) == 0) ||
1706 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1707 addr) == 0)) {
1708 if (is_valid_ether_addr(addr)) {
1709 /* eeprom values are valid so use them */
1710 netif_dbg(dev, ifup, dev->net,
1711 "MAC address read from EEPROM");
1712 } else {
1713 /* generate random MAC */
1714 random_ether_addr(addr);
1715 netif_dbg(dev, ifup, dev->net,
1716 "MAC address set to random addr");
1717 }
1718
1719 addr_lo = addr[0] | (addr[1] << 8) |
1720 (addr[2] << 16) | (addr[3] << 24);
1721 addr_hi = addr[4] | (addr[5] << 8);
1722
1723 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1724 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1725 } else {
1726 /* generate random MAC */
1727 random_ether_addr(addr);
1728 netif_dbg(dev, ifup, dev->net,
1729 "MAC address set to random addr");
1730 }
1731 }
1732
1733 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1734 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1735
1736 ether_addr_copy(dev->net->dev_addr, addr);
1737}
1738
ce85e13a
WH
1739/* MDIO read and write wrappers for phylib */
1740static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1741{
1742 struct lan78xx_net *dev = bus->priv;
1743 u32 val, addr;
1744 int ret;
1745
1746 ret = usb_autopm_get_interface(dev->intf);
1747 if (ret < 0)
1748 return ret;
1749
1750 mutex_lock(&dev->phy_mutex);
1751
1752 /* confirm MII not busy */
1753 ret = lan78xx_phy_wait_not_busy(dev);
1754 if (ret < 0)
1755 goto done;
1756
1757 /* set the address, index & direction (read from PHY) */
1758 addr = mii_access(phy_id, idx, MII_READ);
1759 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1760
1761 ret = lan78xx_phy_wait_not_busy(dev);
1762 if (ret < 0)
1763 goto done;
1764
1765 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1766
1767 ret = (int)(val & 0xFFFF);
1768
1769done:
1770 mutex_unlock(&dev->phy_mutex);
1771 usb_autopm_put_interface(dev->intf);
1772 return ret;
1773}
1774
1775static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1776 u16 regval)
1777{
1778 struct lan78xx_net *dev = bus->priv;
1779 u32 val, addr;
1780 int ret;
1781
1782 ret = usb_autopm_get_interface(dev->intf);
1783 if (ret < 0)
1784 return ret;
1785
1786 mutex_lock(&dev->phy_mutex);
1787
1788 /* confirm MII not busy */
1789 ret = lan78xx_phy_wait_not_busy(dev);
1790 if (ret < 0)
1791 goto done;
1792
1793 val = (u32)regval;
1794 ret = lan78xx_write_reg(dev, MII_DATA, val);
1795
1796 /* set the address, index & direction (write to PHY) */
1797 addr = mii_access(phy_id, idx, MII_WRITE);
1798 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1799
1800 ret = lan78xx_phy_wait_not_busy(dev);
1801 if (ret < 0)
1802 goto done;
1803
1804done:
1805 mutex_unlock(&dev->phy_mutex);
1806 usb_autopm_put_interface(dev->intf);
1807 return 0;
1808}
1809
1810static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1811{
ce85e13a 1812 int ret;
ce85e13a
WH
1813
1814 dev->mdiobus = mdiobus_alloc();
1815 if (!dev->mdiobus) {
1816 netdev_err(dev->net, "can't allocate MDIO bus\n");
1817 return -ENOMEM;
1818 }
1819
1820 dev->mdiobus->priv = (void *)dev;
1821 dev->mdiobus->read = lan78xx_mdiobus_read;
1822 dev->mdiobus->write = lan78xx_mdiobus_write;
1823 dev->mdiobus->name = "lan78xx-mdiobus";
1824
1825 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1826 dev->udev->bus->busnum, dev->udev->devnum);
1827
87177ba6
WH
1828 switch (dev->chipid) {
1829 case ID_REV_CHIP_ID_7800_:
1830 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1831 /* set to internal PHY id */
1832 dev->mdiobus->phy_mask = ~(1 << 1);
1833 break;
1834 }
1835
1836 ret = mdiobus_register(dev->mdiobus);
1837 if (ret) {
1838 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1839 goto exit1;
ce85e13a
WH
1840 }
1841
1842 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1843 return 0;
ce85e13a
WH
1844exit1:
1845 mdiobus_free(dev->mdiobus);
1846 return ret;
1847}
1848
1849static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1850{
1851 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1852 mdiobus_free(dev->mdiobus);
1853}
1854
1855static void lan78xx_link_status_change(struct net_device *net)
1856{
14437e3f
WH
1857 struct phy_device *phydev = net->phydev;
1858 int ret, temp;
1859
1860 /* At forced 100 F/H mode, chip may fail to set mode correctly
1861 * when cable is switched between long(~50+m) and short one.
1862 * As workaround, set to 10 before setting to 100
1863 * at forced 100 F/H mode.
1864 */
1865 if (!phydev->autoneg && (phydev->speed == 100)) {
1866 /* disable phy interrupt */
1867 temp = phy_read(phydev, LAN88XX_INT_MASK);
1868 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1869 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1870
1871 temp = phy_read(phydev, MII_BMCR);
1872 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1873 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1874 temp |= BMCR_SPEED100;
1875 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1876
1877 /* clear pending interrupt generated while workaround */
1878 temp = phy_read(phydev, LAN88XX_INT_STS);
1879
1880 /* enable phy interrupt back */
1881 temp = phy_read(phydev, LAN88XX_INT_MASK);
1882 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1883 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1884 }
55d7de9d
WH
1885}
1886
cc89c323
WH
1887static int irq_map(struct irq_domain *d, unsigned int irq,
1888 irq_hw_number_t hwirq)
1889{
1890 struct irq_domain_data *data = d->host_data;
1891
1892 irq_set_chip_data(irq, data);
1893 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1894 irq_set_noprobe(irq);
1895
1896 return 0;
1897}
1898
1899static void irq_unmap(struct irq_domain *d, unsigned int irq)
1900{
1901 irq_set_chip_and_handler(irq, NULL, NULL);
1902 irq_set_chip_data(irq, NULL);
1903}
1904
1905static const struct irq_domain_ops chip_domain_ops = {
1906 .map = irq_map,
1907 .unmap = irq_unmap,
1908};
1909
1910static void lan78xx_irq_mask(struct irq_data *irqd)
1911{
1912 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1913
1914 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1915}
1916
1917static void lan78xx_irq_unmask(struct irq_data *irqd)
1918{
1919 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1920
1921 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1922}
1923
1924static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1925{
1926 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1927
1928 mutex_lock(&data->irq_lock);
1929}
1930
1931static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1932{
1933 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1934 struct lan78xx_net *dev =
1935 container_of(data, struct lan78xx_net, domain_data);
1936 u32 buf;
1937 int ret;
1938
1939 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1940 * are only two callbacks executed in non-atomic contex.
1941 */
1942 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1943 if (buf != data->irqenable)
1944 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1945
1946 mutex_unlock(&data->irq_lock);
1947}
1948
1949static struct irq_chip lan78xx_irqchip = {
1950 .name = "lan78xx-irqs",
1951 .irq_mask = lan78xx_irq_mask,
1952 .irq_unmask = lan78xx_irq_unmask,
1953 .irq_bus_lock = lan78xx_irq_bus_lock,
1954 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1955};
1956
1957static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1958{
1959 struct device_node *of_node;
1960 struct irq_domain *irqdomain;
1961 unsigned int irqmap = 0;
1962 u32 buf;
1963 int ret = 0;
1964
1965 of_node = dev->udev->dev.parent->of_node;
1966
1967 mutex_init(&dev->domain_data.irq_lock);
1968
1969 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1970 dev->domain_data.irqenable = buf;
1971
1972 dev->domain_data.irqchip = &lan78xx_irqchip;
1973 dev->domain_data.irq_handler = handle_simple_irq;
1974
1975 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1976 &chip_domain_ops, &dev->domain_data);
1977 if (irqdomain) {
1978 /* create mapping for PHY interrupt */
1979 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1980 if (!irqmap) {
1981 irq_domain_remove(irqdomain);
1982
1983 irqdomain = NULL;
1984 ret = -EINVAL;
1985 }
1986 } else {
1987 ret = -EINVAL;
1988 }
1989
1990 dev->domain_data.irqdomain = irqdomain;
1991 dev->domain_data.phyirq = irqmap;
1992
1993 return ret;
1994}
1995
1996static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1997{
1998 if (dev->domain_data.phyirq > 0) {
1999 irq_dispose_mapping(dev->domain_data.phyirq);
2000
2001 if (dev->domain_data.irqdomain)
2002 irq_domain_remove(dev->domain_data.irqdomain);
2003 }
2004 dev->domain_data.phyirq = 0;
2005 dev->domain_data.irqdomain = NULL;
2006}
2007
55d7de9d
WH
2008static int lan78xx_phy_init(struct lan78xx_net *dev)
2009{
ce85e13a 2010 int ret;
349e0c5e 2011 u32 mii_adv;
ce85e13a 2012 struct phy_device *phydev = dev->net->phydev;
55d7de9d 2013
ce85e13a
WH
2014 phydev = phy_find_first(dev->mdiobus);
2015 if (!phydev) {
2016 netdev_err(dev->net, "no PHY found\n");
2017 return -EIO;
2018 }
55d7de9d 2019
cc89c323
WH
2020 /* if phyirq is not set, use polling mode in phylib */
2021 if (dev->domain_data.phyirq > 0)
2022 phydev->irq = dev->domain_data.phyirq;
2023 else
2024 phydev->irq = 0;
2025 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
e4953910 2026
ce85e13a
WH
2027 ret = phy_connect_direct(dev->net, phydev,
2028 lan78xx_link_status_change,
2029 PHY_INTERFACE_MODE_GMII);
2030 if (ret) {
2031 netdev_err(dev->net, "can't attach PHY to %s\n",
2032 dev->mdiobus->id);
2033 return -EIO;
2034 }
55d7de9d
WH
2035
2036 /* set to AUTOMDIX */
758c5c11 2037 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
55d7de9d 2038
ce85e13a
WH
2039 /* MAC doesn't support 1000T Half */
2040 phydev->supported &= ~SUPPORTED_1000baseT_Half;
e270b2db 2041
349e0c5e
WH
2042 /* support both flow controls */
2043 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2044 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2045 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2046 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2047
ce85e13a
WH
2048 genphy_config_aneg(phydev);
2049
349e0c5e
WH
2050 dev->fc_autoneg = phydev->autoneg;
2051
ce85e13a 2052 phy_start(phydev);
55d7de9d
WH
2053
2054 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2055
2056 return 0;
2057}
2058
2059static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2060{
2061 int ret = 0;
2062 u32 buf;
2063 bool rxenabled;
2064
2065 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2066
2067 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2068
2069 if (rxenabled) {
2070 buf &= ~MAC_RX_RXEN_;
2071 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2072 }
2073
2074 /* add 4 to size for FCS */
2075 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2076 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2077
2078 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2079
2080 if (rxenabled) {
2081 buf |= MAC_RX_RXEN_;
2082 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2083 }
2084
2085 return 0;
2086}
2087
2088static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2089{
2090 struct sk_buff *skb;
2091 unsigned long flags;
2092 int count = 0;
2093
2094 spin_lock_irqsave(&q->lock, flags);
2095 while (!skb_queue_empty(q)) {
2096 struct skb_data *entry;
2097 struct urb *urb;
2098 int ret;
2099
2100 skb_queue_walk(q, skb) {
2101 entry = (struct skb_data *)skb->cb;
2102 if (entry->state != unlink_start)
2103 goto found;
2104 }
2105 break;
2106found:
2107 entry->state = unlink_start;
2108 urb = entry->urb;
2109
2110 /* Get reference count of the URB to avoid it to be
2111 * freed during usb_unlink_urb, which may trigger
2112 * use-after-free problem inside usb_unlink_urb since
2113 * usb_unlink_urb is always racing with .complete
2114 * handler(include defer_bh).
2115 */
2116 usb_get_urb(urb);
2117 spin_unlock_irqrestore(&q->lock, flags);
2118 /* during some PM-driven resume scenarios,
2119 * these (async) unlinks complete immediately
2120 */
2121 ret = usb_unlink_urb(urb);
2122 if (ret != -EINPROGRESS && ret != 0)
2123 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2124 else
2125 count++;
2126 usb_put_urb(urb);
2127 spin_lock_irqsave(&q->lock, flags);
2128 }
2129 spin_unlock_irqrestore(&q->lock, flags);
2130 return count;
2131}
2132
2133static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2134{
2135 struct lan78xx_net *dev = netdev_priv(netdev);
2136 int ll_mtu = new_mtu + netdev->hard_header_len;
2137 int old_hard_mtu = dev->hard_mtu;
2138 int old_rx_urb_size = dev->rx_urb_size;
2139 int ret;
2140
55d7de9d
WH
2141 /* no second zero-length packet read wanted after mtu-sized packets */
2142 if ((ll_mtu % dev->maxpacket) == 0)
2143 return -EDOM;
2144
2145 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2146
2147 netdev->mtu = new_mtu;
2148
2149 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2150 if (dev->rx_urb_size == old_hard_mtu) {
2151 dev->rx_urb_size = dev->hard_mtu;
2152 if (dev->rx_urb_size > old_rx_urb_size) {
2153 if (netif_running(dev->net)) {
2154 unlink_urbs(dev, &dev->rxq);
2155 tasklet_schedule(&dev->bh);
2156 }
2157 }
2158 }
2159
2160 return 0;
2161}
2162
e0c79ff6 2163static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
55d7de9d
WH
2164{
2165 struct lan78xx_net *dev = netdev_priv(netdev);
2166 struct sockaddr *addr = p;
2167 u32 addr_lo, addr_hi;
2168 int ret;
2169
2170 if (netif_running(netdev))
2171 return -EBUSY;
2172
2173 if (!is_valid_ether_addr(addr->sa_data))
2174 return -EADDRNOTAVAIL;
2175
2176 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2177
2178 addr_lo = netdev->dev_addr[0] |
2179 netdev->dev_addr[1] << 8 |
2180 netdev->dev_addr[2] << 16 |
2181 netdev->dev_addr[3] << 24;
2182 addr_hi = netdev->dev_addr[4] |
2183 netdev->dev_addr[5] << 8;
2184
2185 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2186 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2187
2188 return 0;
2189}
2190
2191/* Enable or disable Rx checksum offload engine */
2192static int lan78xx_set_features(struct net_device *netdev,
2193 netdev_features_t features)
2194{
2195 struct lan78xx_net *dev = netdev_priv(netdev);
2196 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2197 unsigned long flags;
2198 int ret;
2199
2200 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2201
2202 if (features & NETIF_F_RXCSUM) {
2203 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2204 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2205 } else {
2206 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2207 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2208 }
2209
2210 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2211 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2212 else
2213 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2214
2215 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2216
2217 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2218
2219 return 0;
2220}
2221
2222static void lan78xx_deferred_vlan_write(struct work_struct *param)
2223{
2224 struct lan78xx_priv *pdata =
2225 container_of(param, struct lan78xx_priv, set_vlan);
2226 struct lan78xx_net *dev = pdata->dev;
2227
2228 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2229 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2230}
2231
2232static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2233 __be16 proto, u16 vid)
2234{
2235 struct lan78xx_net *dev = netdev_priv(netdev);
2236 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2237 u16 vid_bit_index;
2238 u16 vid_dword_index;
2239
2240 vid_dword_index = (vid >> 5) & 0x7F;
2241 vid_bit_index = vid & 0x1F;
2242
2243 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2244
2245 /* defer register writes to a sleepable context */
2246 schedule_work(&pdata->set_vlan);
2247
2248 return 0;
2249}
2250
2251static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2252 __be16 proto, u16 vid)
2253{
2254 struct lan78xx_net *dev = netdev_priv(netdev);
2255 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2256 u16 vid_bit_index;
2257 u16 vid_dword_index;
2258
2259 vid_dword_index = (vid >> 5) & 0x7F;
2260 vid_bit_index = vid & 0x1F;
2261
2262 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2263
2264 /* defer register writes to a sleepable context */
2265 schedule_work(&pdata->set_vlan);
2266
2267 return 0;
2268}
2269
2270static void lan78xx_init_ltm(struct lan78xx_net *dev)
2271{
2272 int ret;
2273 u32 buf;
2274 u32 regs[6] = { 0 };
2275
2276 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2277 if (buf & USB_CFG1_LTM_ENABLE_) {
2278 u8 temp[2];
2279 /* Get values from EEPROM first */
2280 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2281 if (temp[0] == 24) {
2282 ret = lan78xx_read_raw_eeprom(dev,
2283 temp[1] * 2,
2284 24,
2285 (u8 *)regs);
2286 if (ret < 0)
2287 return;
2288 }
2289 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2290 if (temp[0] == 24) {
2291 ret = lan78xx_read_raw_otp(dev,
2292 temp[1] * 2,
2293 24,
2294 (u8 *)regs);
2295 if (ret < 0)
2296 return;
2297 }
2298 }
2299 }
2300
2301 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2302 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2303 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2304 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2305 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2306 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2307}
2308
2309static int lan78xx_reset(struct lan78xx_net *dev)
2310{
2311 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2312 u32 buf;
2313 int ret = 0;
2314 unsigned long timeout;
2315
2316 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2317 buf |= HW_CFG_LRST_;
2318 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2319
2320 timeout = jiffies + HZ;
2321 do {
2322 mdelay(1);
2323 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2324 if (time_after(jiffies, timeout)) {
2325 netdev_warn(dev->net,
2326 "timeout on completion of LiteReset");
2327 return -EIO;
2328 }
2329 } while (buf & HW_CFG_LRST_);
2330
2331 lan78xx_init_mac_address(dev);
2332
ce85e13a
WH
2333 /* save DEVID for later usage */
2334 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
2335 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2336 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 2337
55d7de9d
WH
2338 /* Respond to the IN token with a NAK */
2339 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2340 buf |= USB_CFG_BIR_;
2341 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2342
2343 /* Init LTM */
2344 lan78xx_init_ltm(dev);
2345
2346 dev->net->hard_header_len += TX_OVERHEAD;
2347 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2348
2349 if (dev->udev->speed == USB_SPEED_SUPER) {
2350 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2351 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2352 dev->rx_qlen = 4;
2353 dev->tx_qlen = 4;
2354 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2355 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2356 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2357 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2358 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2359 } else {
2360 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2361 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2362 dev->rx_qlen = 4;
2363 }
2364
2365 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2366 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2367
2368 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2369 buf |= HW_CFG_MEF_;
2370 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2371
2372 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2373 buf |= USB_CFG_BCE_;
2374 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2375
2376 /* set FIFO sizes */
2377 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2378 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2379
2380 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2381 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2382
2383 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2384 ret = lan78xx_write_reg(dev, FLOW, 0);
2385 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2386
2387 /* Don't need rfe_ctl_lock during initialisation */
2388 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2389 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2390 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2391
2392 /* Enable or disable checksum offload engines */
2393 lan78xx_set_features(dev->net, dev->net->features);
2394
2395 lan78xx_set_multicast(dev->net);
2396
2397 /* reset PHY */
2398 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2399 buf |= PMT_CTL_PHY_RST_;
2400 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2401
2402 timeout = jiffies + HZ;
2403 do {
2404 mdelay(1);
2405 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2406 if (time_after(jiffies, timeout)) {
2407 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2408 return -EIO;
2409 }
6c595b03 2410 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 2411
55d7de9d 2412 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
55d7de9d 2413 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
55d7de9d
WH
2414 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2415
55d7de9d
WH
2416 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2417 buf |= MAC_TX_TXEN_;
2418 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2419
2420 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2421 buf |= FCT_TX_CTL_EN_;
2422 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2423
2424 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2425
2426 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2427 buf |= MAC_RX_RXEN_;
2428 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2429
2430 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2431 buf |= FCT_RX_CTL_EN_;
2432 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2433
55d7de9d
WH
2434 return 0;
2435}
2436
20ff5565
WH
2437static void lan78xx_init_stats(struct lan78xx_net *dev)
2438{
2439 u32 *p;
2440 int i;
2441
2442 /* initialize for stats update
2443 * some counters are 20bits and some are 32bits
2444 */
2445 p = (u32 *)&dev->stats.rollover_max;
2446 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2447 p[i] = 0xFFFFF;
2448
2449 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2450 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2451 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2452 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2453 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2454 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2455 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2456 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2457 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2458 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2459
2460 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2461}
2462
55d7de9d
WH
2463static int lan78xx_open(struct net_device *net)
2464{
2465 struct lan78xx_net *dev = netdev_priv(net);
2466 int ret;
2467
2468 ret = usb_autopm_get_interface(dev->intf);
2469 if (ret < 0)
2470 goto out;
2471
2472 ret = lan78xx_reset(dev);
2473 if (ret < 0)
2474 goto done;
2475
ce85e13a
WH
2476 ret = lan78xx_phy_init(dev);
2477 if (ret < 0)
2478 goto done;
2479
55d7de9d
WH
2480 /* for Link Check */
2481 if (dev->urb_intr) {
2482 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2483 if (ret < 0) {
2484 netif_err(dev, ifup, dev->net,
2485 "intr submit %d\n", ret);
2486 goto done;
2487 }
2488 }
2489
20ff5565
WH
2490 lan78xx_init_stats(dev);
2491
55d7de9d
WH
2492 set_bit(EVENT_DEV_OPEN, &dev->flags);
2493
2494 netif_start_queue(net);
2495
2496 dev->link_on = false;
2497
2498 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2499done:
2500 usb_autopm_put_interface(dev->intf);
2501
2502out:
2503 return ret;
2504}
2505
2506static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2507{
2508 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2509 DECLARE_WAITQUEUE(wait, current);
2510 int temp;
2511
2512 /* ensure there are no more active urbs */
2513 add_wait_queue(&unlink_wakeup, &wait);
2514 set_current_state(TASK_UNINTERRUPTIBLE);
2515 dev->wait = &unlink_wakeup;
2516 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2517
2518 /* maybe wait for deletions to finish. */
2519 while (!skb_queue_empty(&dev->rxq) &&
2520 !skb_queue_empty(&dev->txq) &&
2521 !skb_queue_empty(&dev->done)) {
2522 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2523 set_current_state(TASK_UNINTERRUPTIBLE);
2524 netif_dbg(dev, ifdown, dev->net,
2525 "waited for %d urb completions\n", temp);
2526 }
2527 set_current_state(TASK_RUNNING);
2528 dev->wait = NULL;
2529 remove_wait_queue(&unlink_wakeup, &wait);
2530}
2531
e0c79ff6 2532static int lan78xx_stop(struct net_device *net)
55d7de9d
WH
2533{
2534 struct lan78xx_net *dev = netdev_priv(net);
2535
20ff5565
WH
2536 if (timer_pending(&dev->stat_monitor))
2537 del_timer_sync(&dev->stat_monitor);
2538
ce85e13a
WH
2539 phy_stop(net->phydev);
2540 phy_disconnect(net->phydev);
2541 net->phydev = NULL;
2542
55d7de9d
WH
2543 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2544 netif_stop_queue(net);
2545
2546 netif_info(dev, ifdown, dev->net,
2547 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2548 net->stats.rx_packets, net->stats.tx_packets,
2549 net->stats.rx_errors, net->stats.tx_errors);
2550
2551 lan78xx_terminate_urbs(dev);
2552
2553 usb_kill_urb(dev->urb_intr);
2554
2555 skb_queue_purge(&dev->rxq_pause);
2556
2557 /* deferred work (task, timer, softirq) must also stop.
2558 * can't flush_scheduled_work() until we drop rtnl (later),
2559 * else workers could deadlock; so make workers a NOP.
2560 */
2561 dev->flags = 0;
2562 cancel_delayed_work_sync(&dev->wq);
2563 tasklet_kill(&dev->bh);
2564
2565 usb_autopm_put_interface(dev->intf);
2566
2567 return 0;
2568}
2569
2570static int lan78xx_linearize(struct sk_buff *skb)
2571{
2572 return skb_linearize(skb);
2573}
2574
2575static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2576 struct sk_buff *skb, gfp_t flags)
2577{
2578 u32 tx_cmd_a, tx_cmd_b;
2579
2580 if (skb_headroom(skb) < TX_OVERHEAD) {
2581 struct sk_buff *skb2;
2582
2583 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2584 dev_kfree_skb_any(skb);
2585 skb = skb2;
2586 if (!skb)
2587 return NULL;
2588 }
2589
2590 if (lan78xx_linearize(skb) < 0)
2591 return NULL;
2592
2593 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2594
2595 if (skb->ip_summed == CHECKSUM_PARTIAL)
2596 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2597
2598 tx_cmd_b = 0;
2599 if (skb_is_gso(skb)) {
2600 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2601
2602 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2603
2604 tx_cmd_a |= TX_CMD_A_LSO_;
2605 }
2606
2607 if (skb_vlan_tag_present(skb)) {
2608 tx_cmd_a |= TX_CMD_A_IVTG_;
2609 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2610 }
2611
2612 skb_push(skb, 4);
2613 cpu_to_le32s(&tx_cmd_b);
2614 memcpy(skb->data, &tx_cmd_b, 4);
2615
2616 skb_push(skb, 4);
2617 cpu_to_le32s(&tx_cmd_a);
2618 memcpy(skb->data, &tx_cmd_a, 4);
2619
2620 return skb;
2621}
2622
2623static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2624 struct sk_buff_head *list, enum skb_state state)
2625{
2626 unsigned long flags;
2627 enum skb_state old_state;
2628 struct skb_data *entry = (struct skb_data *)skb->cb;
2629
2630 spin_lock_irqsave(&list->lock, flags);
2631 old_state = entry->state;
2632 entry->state = state;
55d7de9d
WH
2633
2634 __skb_unlink(skb, list);
2635 spin_unlock(&list->lock);
2636 spin_lock(&dev->done.lock);
55d7de9d
WH
2637
2638 __skb_queue_tail(&dev->done, skb);
2639 if (skb_queue_len(&dev->done) == 1)
2640 tasklet_schedule(&dev->bh);
2641 spin_unlock_irqrestore(&dev->done.lock, flags);
2642
2643 return old_state;
2644}
2645
2646static void tx_complete(struct urb *urb)
2647{
2648 struct sk_buff *skb = (struct sk_buff *)urb->context;
2649 struct skb_data *entry = (struct skb_data *)skb->cb;
2650 struct lan78xx_net *dev = entry->dev;
2651
2652 if (urb->status == 0) {
74d79a2e 2653 dev->net->stats.tx_packets += entry->num_of_packet;
55d7de9d
WH
2654 dev->net->stats.tx_bytes += entry->length;
2655 } else {
2656 dev->net->stats.tx_errors++;
2657
2658 switch (urb->status) {
2659 case -EPIPE:
2660 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2661 break;
2662
2663 /* software-driven interface shutdown */
2664 case -ECONNRESET:
2665 case -ESHUTDOWN:
2666 break;
2667
2668 case -EPROTO:
2669 case -ETIME:
2670 case -EILSEQ:
2671 netif_stop_queue(dev->net);
2672 break;
2673 default:
2674 netif_dbg(dev, tx_err, dev->net,
2675 "tx err %d\n", entry->urb->status);
2676 break;
2677 }
2678 }
2679
2680 usb_autopm_put_interface_async(dev->intf);
2681
81c38e81 2682 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2683}
2684
2685static void lan78xx_queue_skb(struct sk_buff_head *list,
2686 struct sk_buff *newsk, enum skb_state state)
2687{
2688 struct skb_data *entry = (struct skb_data *)newsk->cb;
2689
2690 __skb_queue_tail(list, newsk);
2691 entry->state = state;
2692}
2693
e0c79ff6
BX
2694static netdev_tx_t
2695lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
55d7de9d
WH
2696{
2697 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2698 struct sk_buff *skb2 = NULL;
55d7de9d 2699
81c38e81 2700 if (skb) {
55d7de9d 2701 skb_tx_timestamp(skb);
81c38e81
WH
2702 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2703 }
55d7de9d 2704
81c38e81
WH
2705 if (skb2) {
2706 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2707
4b2a4a96
WH
2708 /* throttle TX patch at slower than SUPER SPEED USB */
2709 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2710 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2711 netif_stop_queue(net);
2712 } else {
2713 netif_dbg(dev, tx_err, dev->net,
2714 "lan78xx_tx_prep return NULL\n");
2715 dev->net->stats.tx_errors++;
2716 dev->net->stats.tx_dropped++;
2717 }
2718
2719 tasklet_schedule(&dev->bh);
2720
2721 return NETDEV_TX_OK;
2722}
2723
e0c79ff6
BX
2724static int
2725lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
55d7de9d
WH
2726{
2727 int tmp;
2728 struct usb_host_interface *alt = NULL;
2729 struct usb_host_endpoint *in = NULL, *out = NULL;
2730 struct usb_host_endpoint *status = NULL;
2731
2732 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2733 unsigned ep;
2734
2735 in = NULL;
2736 out = NULL;
2737 status = NULL;
2738 alt = intf->altsetting + tmp;
2739
2740 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2741 struct usb_host_endpoint *e;
2742 int intr = 0;
2743
2744 e = alt->endpoint + ep;
2745 switch (e->desc.bmAttributes) {
2746 case USB_ENDPOINT_XFER_INT:
2747 if (!usb_endpoint_dir_in(&e->desc))
2748 continue;
2749 intr = 1;
2750 /* FALLTHROUGH */
2751 case USB_ENDPOINT_XFER_BULK:
2752 break;
2753 default:
2754 continue;
2755 }
2756 if (usb_endpoint_dir_in(&e->desc)) {
2757 if (!intr && !in)
2758 in = e;
2759 else if (intr && !status)
2760 status = e;
2761 } else {
2762 if (!out)
2763 out = e;
2764 }
2765 }
2766 if (in && out)
2767 break;
2768 }
2769 if (!alt || !in || !out)
2770 return -EINVAL;
2771
2772 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2773 in->desc.bEndpointAddress &
2774 USB_ENDPOINT_NUMBER_MASK);
2775 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2776 out->desc.bEndpointAddress &
2777 USB_ENDPOINT_NUMBER_MASK);
2778 dev->ep_intr = status;
2779
2780 return 0;
2781}
2782
2783static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2784{
2785 struct lan78xx_priv *pdata = NULL;
2786 int ret;
2787 int i;
2788
2789 ret = lan78xx_get_endpoints(dev, intf);
2790
2791 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2792
2793 pdata = (struct lan78xx_priv *)(dev->data[0]);
2794 if (!pdata) {
2795 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2796 return -ENOMEM;
2797 }
2798
2799 pdata->dev = dev;
2800
2801 spin_lock_init(&pdata->rfe_ctl_lock);
2802 mutex_init(&pdata->dataport_mutex);
2803
2804 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2805
2806 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2807 pdata->vlan_table[i] = 0;
2808
2809 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2810
2811 dev->net->features = 0;
2812
2813 if (DEFAULT_TX_CSUM_ENABLE)
2814 dev->net->features |= NETIF_F_HW_CSUM;
2815
2816 if (DEFAULT_RX_CSUM_ENABLE)
2817 dev->net->features |= NETIF_F_RXCSUM;
2818
2819 if (DEFAULT_TSO_CSUM_ENABLE)
2820 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2821
2822 dev->net->hw_features = dev->net->features;
2823
cc89c323
WH
2824 ret = lan78xx_setup_irq_domain(dev);
2825 if (ret < 0) {
2826 netdev_warn(dev->net,
2827 "lan78xx_setup_irq_domain() failed : %d", ret);
2828 kfree(pdata);
2829 return ret;
2830 }
2831
55d7de9d
WH
2832 /* Init all registers */
2833 ret = lan78xx_reset(dev);
2834
ce85e13a
WH
2835 lan78xx_mdio_init(dev);
2836
55d7de9d
WH
2837 dev->net->flags |= IFF_MULTICAST;
2838
2839 pdata->wol = WAKE_MAGIC;
2840
2841 return 0;
2842}
2843
2844static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2845{
2846 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2847
cc89c323
WH
2848 lan78xx_remove_irq_domain(dev);
2849
ce85e13a
WH
2850 lan78xx_remove_mdio(dev);
2851
55d7de9d
WH
2852 if (pdata) {
2853 netif_dbg(dev, ifdown, dev->net, "free pdata");
2854 kfree(pdata);
2855 pdata = NULL;
2856 dev->data[0] = 0;
2857 }
2858}
2859
2860static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2861 struct sk_buff *skb,
2862 u32 rx_cmd_a, u32 rx_cmd_b)
2863{
2864 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2865 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2866 skb->ip_summed = CHECKSUM_NONE;
2867 } else {
2868 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2869 skb->ip_summed = CHECKSUM_COMPLETE;
2870 }
2871}
2872
e0c79ff6 2873static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
55d7de9d
WH
2874{
2875 int status;
2876
2877 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2878 skb_queue_tail(&dev->rxq_pause, skb);
2879 return;
2880 }
2881
55d7de9d
WH
2882 dev->net->stats.rx_packets++;
2883 dev->net->stats.rx_bytes += skb->len;
2884
74d79a2e
WH
2885 skb->protocol = eth_type_trans(skb, dev->net);
2886
55d7de9d
WH
2887 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2888 skb->len + sizeof(struct ethhdr), skb->protocol);
2889 memset(skb->cb, 0, sizeof(struct skb_data));
2890
2891 if (skb_defer_rx_timestamp(skb))
2892 return;
2893
2894 status = netif_rx(skb);
2895 if (status != NET_RX_SUCCESS)
2896 netif_dbg(dev, rx_err, dev->net,
2897 "netif_rx status %d\n", status);
2898}
2899
2900static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2901{
2902 if (skb->len < dev->net->hard_header_len)
2903 return 0;
2904
2905 while (skb->len > 0) {
2906 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2907 u16 rx_cmd_c;
2908 struct sk_buff *skb2;
2909 unsigned char *packet;
2910
2911 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2912 le32_to_cpus(&rx_cmd_a);
2913 skb_pull(skb, sizeof(rx_cmd_a));
2914
2915 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2916 le32_to_cpus(&rx_cmd_b);
2917 skb_pull(skb, sizeof(rx_cmd_b));
2918
2919 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2920 le16_to_cpus(&rx_cmd_c);
2921 skb_pull(skb, sizeof(rx_cmd_c));
2922
2923 packet = skb->data;
2924
2925 /* get the packet length */
2926 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2927 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2928
2929 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2930 netif_dbg(dev, rx_err, dev->net,
2931 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2932 } else {
2933 /* last frame in this batch */
2934 if (skb->len == size) {
2935 lan78xx_rx_csum_offload(dev, skb,
2936 rx_cmd_a, rx_cmd_b);
2937
2938 skb_trim(skb, skb->len - 4); /* remove fcs */
2939 skb->truesize = size + sizeof(struct sk_buff);
2940
2941 return 1;
2942 }
2943
2944 skb2 = skb_clone(skb, GFP_ATOMIC);
2945 if (unlikely(!skb2)) {
2946 netdev_warn(dev->net, "Error allocating skb");
2947 return 0;
2948 }
2949
2950 skb2->len = size;
2951 skb2->data = packet;
2952 skb_set_tail_pointer(skb2, size);
2953
2954 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2955
2956 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2957 skb2->truesize = size + sizeof(struct sk_buff);
2958
2959 lan78xx_skb_return(dev, skb2);
2960 }
2961
2962 skb_pull(skb, size);
2963
2964 /* padding bytes before the next frame starts */
2965 if (skb->len)
2966 skb_pull(skb, align_count);
2967 }
2968
55d7de9d
WH
2969 return 1;
2970}
2971
2972static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2973{
2974 if (!lan78xx_rx(dev, skb)) {
2975 dev->net->stats.rx_errors++;
2976 goto done;
2977 }
2978
2979 if (skb->len) {
2980 lan78xx_skb_return(dev, skb);
2981 return;
2982 }
2983
2984 netif_dbg(dev, rx_err, dev->net, "drop\n");
2985 dev->net->stats.rx_errors++;
2986done:
2987 skb_queue_tail(&dev->done, skb);
2988}
2989
2990static void rx_complete(struct urb *urb);
2991
2992static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2993{
2994 struct sk_buff *skb;
2995 struct skb_data *entry;
2996 unsigned long lockflags;
2997 size_t size = dev->rx_urb_size;
2998 int ret = 0;
2999
3000 skb = netdev_alloc_skb_ip_align(dev->net, size);
3001 if (!skb) {
3002 usb_free_urb(urb);
3003 return -ENOMEM;
3004 }
3005
3006 entry = (struct skb_data *)skb->cb;
3007 entry->urb = urb;
3008 entry->dev = dev;
3009 entry->length = 0;
3010
3011 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3012 skb->data, size, rx_complete, skb);
3013
3014 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3015
3016 if (netif_device_present(dev->net) &&
3017 netif_running(dev->net) &&
3018 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3019 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3020 ret = usb_submit_urb(urb, GFP_ATOMIC);
3021 switch (ret) {
3022 case 0:
3023 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3024 break;
3025 case -EPIPE:
3026 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3027 break;
3028 case -ENODEV:
3029 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3030 netif_device_detach(dev->net);
3031 break;
3032 case -EHOSTUNREACH:
3033 ret = -ENOLINK;
3034 break;
3035 default:
3036 netif_dbg(dev, rx_err, dev->net,
3037 "rx submit, %d\n", ret);
3038 tasklet_schedule(&dev->bh);
3039 }
3040 } else {
3041 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3042 ret = -ENOLINK;
3043 }
3044 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3045 if (ret) {
3046 dev_kfree_skb_any(skb);
3047 usb_free_urb(urb);
3048 }
3049 return ret;
3050}
3051
3052static void rx_complete(struct urb *urb)
3053{
3054 struct sk_buff *skb = (struct sk_buff *)urb->context;
3055 struct skb_data *entry = (struct skb_data *)skb->cb;
3056 struct lan78xx_net *dev = entry->dev;
3057 int urb_status = urb->status;
3058 enum skb_state state;
3059
3060 skb_put(skb, urb->actual_length);
3061 state = rx_done;
3062 entry->urb = NULL;
3063
3064 switch (urb_status) {
3065 case 0:
3066 if (skb->len < dev->net->hard_header_len) {
3067 state = rx_cleanup;
3068 dev->net->stats.rx_errors++;
3069 dev->net->stats.rx_length_errors++;
3070 netif_dbg(dev, rx_err, dev->net,
3071 "rx length %d\n", skb->len);
3072 }
3073 usb_mark_last_busy(dev->udev);
3074 break;
3075 case -EPIPE:
3076 dev->net->stats.rx_errors++;
3077 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3078 /* FALLTHROUGH */
3079 case -ECONNRESET: /* async unlink */
3080 case -ESHUTDOWN: /* hardware gone */
3081 netif_dbg(dev, ifdown, dev->net,
3082 "rx shutdown, code %d\n", urb_status);
3083 state = rx_cleanup;
3084 entry->urb = urb;
3085 urb = NULL;
3086 break;
3087 case -EPROTO:
3088 case -ETIME:
3089 case -EILSEQ:
3090 dev->net->stats.rx_errors++;
3091 state = rx_cleanup;
3092 entry->urb = urb;
3093 urb = NULL;
3094 break;
3095
3096 /* data overrun ... flush fifo? */
3097 case -EOVERFLOW:
3098 dev->net->stats.rx_over_errors++;
3099 /* FALLTHROUGH */
3100
3101 default:
3102 state = rx_cleanup;
3103 dev->net->stats.rx_errors++;
3104 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3105 break;
3106 }
3107
3108 state = defer_bh(dev, skb, &dev->rxq, state);
3109
3110 if (urb) {
3111 if (netif_running(dev->net) &&
3112 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3113 state != unlink_start) {
3114 rx_submit(dev, urb, GFP_ATOMIC);
3115 return;
3116 }
3117 usb_free_urb(urb);
3118 }
3119 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3120}
3121
3122static void lan78xx_tx_bh(struct lan78xx_net *dev)
3123{
3124 int length;
3125 struct urb *urb = NULL;
3126 struct skb_data *entry;
3127 unsigned long flags;
3128 struct sk_buff_head *tqp = &dev->txq_pend;
3129 struct sk_buff *skb, *skb2;
3130 int ret;
3131 int count, pos;
3132 int skb_totallen, pkt_cnt;
3133
3134 skb_totallen = 0;
3135 pkt_cnt = 0;
74d79a2e
WH
3136 count = 0;
3137 length = 0;
55d7de9d
WH
3138 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3139 if (skb_is_gso(skb)) {
3140 if (pkt_cnt) {
3141 /* handle previous packets first */
3142 break;
3143 }
74d79a2e
WH
3144 count = 1;
3145 length = skb->len - TX_OVERHEAD;
55d7de9d
WH
3146 skb2 = skb_dequeue(tqp);
3147 goto gso_skb;
3148 }
3149
3150 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3151 break;
3152 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3153 pkt_cnt++;
3154 }
3155
3156 /* copy to a single skb */
3157 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3158 if (!skb)
3159 goto drop;
3160
3161 skb_put(skb, skb_totallen);
3162
3163 for (count = pos = 0; count < pkt_cnt; count++) {
3164 skb2 = skb_dequeue(tqp);
3165 if (skb2) {
74d79a2e 3166 length += (skb2->len - TX_OVERHEAD);
55d7de9d
WH
3167 memcpy(skb->data + pos, skb2->data, skb2->len);
3168 pos += roundup(skb2->len, sizeof(u32));
3169 dev_kfree_skb(skb2);
55d7de9d
WH
3170 }
3171 }
3172
55d7de9d
WH
3173gso_skb:
3174 urb = usb_alloc_urb(0, GFP_ATOMIC);
d7c4e84e 3175 if (!urb)
55d7de9d 3176 goto drop;
55d7de9d
WH
3177
3178 entry = (struct skb_data *)skb->cb;
3179 entry->urb = urb;
3180 entry->dev = dev;
3181 entry->length = length;
74d79a2e 3182 entry->num_of_packet = count;
55d7de9d
WH
3183
3184 spin_lock_irqsave(&dev->txq.lock, flags);
3185 ret = usb_autopm_get_interface_async(dev->intf);
3186 if (ret < 0) {
3187 spin_unlock_irqrestore(&dev->txq.lock, flags);
3188 goto drop;
3189 }
3190
3191 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3192 skb->data, skb->len, tx_complete, skb);
3193
3194 if (length % dev->maxpacket == 0) {
3195 /* send USB_ZERO_PACKET */
3196 urb->transfer_flags |= URB_ZERO_PACKET;
3197 }
3198
3199#ifdef CONFIG_PM
3200 /* if this triggers the device is still a sleep */
3201 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3202 /* transmission will be done in resume */
3203 usb_anchor_urb(urb, &dev->deferred);
3204 /* no use to process more packets */
3205 netif_stop_queue(dev->net);
3206 usb_put_urb(urb);
3207 spin_unlock_irqrestore(&dev->txq.lock, flags);
3208 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3209 return;
3210 }
3211#endif
3212
3213 ret = usb_submit_urb(urb, GFP_ATOMIC);
3214 switch (ret) {
3215 case 0:
860e9538 3216 netif_trans_update(dev->net);
55d7de9d
WH
3217 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3218 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3219 netif_stop_queue(dev->net);
3220 break;
3221 case -EPIPE:
3222 netif_stop_queue(dev->net);
3223 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3224 usb_autopm_put_interface_async(dev->intf);
3225 break;
3226 default:
3227 usb_autopm_put_interface_async(dev->intf);
3228 netif_dbg(dev, tx_err, dev->net,
3229 "tx: submit urb err %d\n", ret);
3230 break;
3231 }
3232
3233 spin_unlock_irqrestore(&dev->txq.lock, flags);
3234
3235 if (ret) {
3236 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3237drop:
3238 dev->net->stats.tx_dropped++;
3239 if (skb)
3240 dev_kfree_skb_any(skb);
3241 usb_free_urb(urb);
3242 } else
3243 netif_dbg(dev, tx_queued, dev->net,
3244 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3245}
3246
3247static void lan78xx_rx_bh(struct lan78xx_net *dev)
3248{
3249 struct urb *urb;
3250 int i;
3251
3252 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3253 for (i = 0; i < 10; i++) {
3254 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3255 break;
3256 urb = usb_alloc_urb(0, GFP_ATOMIC);
3257 if (urb)
3258 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3259 return;
3260 }
3261
3262 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3263 tasklet_schedule(&dev->bh);
3264 }
3265 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3266 netif_wake_queue(dev->net);
3267}
3268
3269static void lan78xx_bh(unsigned long param)
3270{
3271 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3272 struct sk_buff *skb;
3273 struct skb_data *entry;
3274
55d7de9d
WH
3275 while ((skb = skb_dequeue(&dev->done))) {
3276 entry = (struct skb_data *)(skb->cb);
3277 switch (entry->state) {
3278 case rx_done:
3279 entry->state = rx_cleanup;
3280 rx_process(dev, skb);
3281 continue;
3282 case tx_done:
3283 usb_free_urb(entry->urb);
3284 dev_kfree_skb(skb);
3285 continue;
3286 case rx_cleanup:
3287 usb_free_urb(entry->urb);
3288 dev_kfree_skb(skb);
3289 continue;
3290 default:
3291 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3292 return;
3293 }
55d7de9d
WH
3294 }
3295
3296 if (netif_device_present(dev->net) && netif_running(dev->net)) {
20ff5565
WH
3297 /* reset update timer delta */
3298 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3299 dev->delta = 1;
3300 mod_timer(&dev->stat_monitor,
3301 jiffies + STAT_UPDATE_TIMER);
3302 }
3303
55d7de9d
WH
3304 if (!skb_queue_empty(&dev->txq_pend))
3305 lan78xx_tx_bh(dev);
3306
3307 if (!timer_pending(&dev->delay) &&
3308 !test_bit(EVENT_RX_HALT, &dev->flags))
3309 lan78xx_rx_bh(dev);
3310 }
3311}
3312
3313static void lan78xx_delayedwork(struct work_struct *work)
3314{
3315 int status;
3316 struct lan78xx_net *dev;
3317
3318 dev = container_of(work, struct lan78xx_net, wq.work);
3319
3320 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3321 unlink_urbs(dev, &dev->txq);
3322 status = usb_autopm_get_interface(dev->intf);
3323 if (status < 0)
3324 goto fail_pipe;
3325 status = usb_clear_halt(dev->udev, dev->pipe_out);
3326 usb_autopm_put_interface(dev->intf);
3327 if (status < 0 &&
3328 status != -EPIPE &&
3329 status != -ESHUTDOWN) {
3330 if (netif_msg_tx_err(dev))
3331fail_pipe:
3332 netdev_err(dev->net,
3333 "can't clear tx halt, status %d\n",
3334 status);
3335 } else {
3336 clear_bit(EVENT_TX_HALT, &dev->flags);
3337 if (status != -ESHUTDOWN)
3338 netif_wake_queue(dev->net);
3339 }
3340 }
3341 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3342 unlink_urbs(dev, &dev->rxq);
3343 status = usb_autopm_get_interface(dev->intf);
3344 if (status < 0)
3345 goto fail_halt;
3346 status = usb_clear_halt(dev->udev, dev->pipe_in);
3347 usb_autopm_put_interface(dev->intf);
3348 if (status < 0 &&
3349 status != -EPIPE &&
3350 status != -ESHUTDOWN) {
3351 if (netif_msg_rx_err(dev))
3352fail_halt:
3353 netdev_err(dev->net,
3354 "can't clear rx halt, status %d\n",
3355 status);
3356 } else {
3357 clear_bit(EVENT_RX_HALT, &dev->flags);
3358 tasklet_schedule(&dev->bh);
3359 }
3360 }
3361
3362 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3363 int ret = 0;
3364
3365 clear_bit(EVENT_LINK_RESET, &dev->flags);
3366 status = usb_autopm_get_interface(dev->intf);
3367 if (status < 0)
3368 goto skip_reset;
3369 if (lan78xx_link_reset(dev) < 0) {
3370 usb_autopm_put_interface(dev->intf);
3371skip_reset:
3372 netdev_info(dev->net, "link reset failed (%d)\n",
3373 ret);
3374 } else {
3375 usb_autopm_put_interface(dev->intf);
3376 }
3377 }
20ff5565
WH
3378
3379 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3380 lan78xx_update_stats(dev);
3381
3382 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3383
3384 mod_timer(&dev->stat_monitor,
3385 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3386
3387 dev->delta = min((dev->delta * 2), 50);
3388 }
55d7de9d
WH
3389}
3390
3391static void intr_complete(struct urb *urb)
3392{
3393 struct lan78xx_net *dev = urb->context;
3394 int status = urb->status;
3395
3396 switch (status) {
3397 /* success */
3398 case 0:
3399 lan78xx_status(dev, urb);
3400 break;
3401
3402 /* software-driven interface shutdown */
3403 case -ENOENT: /* urb killed */
3404 case -ESHUTDOWN: /* hardware gone */
3405 netif_dbg(dev, ifdown, dev->net,
3406 "intr shutdown, code %d\n", status);
3407 return;
3408
3409 /* NOTE: not throttling like RX/TX, since this endpoint
3410 * already polls infrequently
3411 */
3412 default:
3413 netdev_dbg(dev->net, "intr status %d\n", status);
3414 break;
3415 }
3416
3417 if (!netif_running(dev->net))
3418 return;
3419
3420 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3421 status = usb_submit_urb(urb, GFP_ATOMIC);
3422 if (status != 0)
3423 netif_err(dev, timer, dev->net,
3424 "intr resubmit --> %d\n", status);
3425}
3426
3427static void lan78xx_disconnect(struct usb_interface *intf)
3428{
3429 struct lan78xx_net *dev;
3430 struct usb_device *udev;
3431 struct net_device *net;
3432
3433 dev = usb_get_intfdata(intf);
3434 usb_set_intfdata(intf, NULL);
3435 if (!dev)
3436 return;
3437
3438 udev = interface_to_usbdev(intf);
3439
3440 net = dev->net;
3441 unregister_netdev(net);
3442
3443 cancel_delayed_work_sync(&dev->wq);
3444
3445 usb_scuttle_anchored_urbs(&dev->deferred);
3446
3447 lan78xx_unbind(dev, intf);
3448
3449 usb_kill_urb(dev->urb_intr);
3450 usb_free_urb(dev->urb_intr);
3451
3452 free_netdev(net);
3453 usb_put_dev(udev);
3454}
3455
e0c79ff6 3456static void lan78xx_tx_timeout(struct net_device *net)
55d7de9d
WH
3457{
3458 struct lan78xx_net *dev = netdev_priv(net);
3459
3460 unlink_urbs(dev, &dev->txq);
3461 tasklet_schedule(&dev->bh);
3462}
3463
3464static const struct net_device_ops lan78xx_netdev_ops = {
3465 .ndo_open = lan78xx_open,
3466 .ndo_stop = lan78xx_stop,
3467 .ndo_start_xmit = lan78xx_start_xmit,
3468 .ndo_tx_timeout = lan78xx_tx_timeout,
3469 .ndo_change_mtu = lan78xx_change_mtu,
3470 .ndo_set_mac_address = lan78xx_set_mac_addr,
3471 .ndo_validate_addr = eth_validate_addr,
3472 .ndo_do_ioctl = lan78xx_ioctl,
3473 .ndo_set_rx_mode = lan78xx_set_multicast,
3474 .ndo_set_features = lan78xx_set_features,
3475 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3476 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3477};
3478
20ff5565
WH
3479static void lan78xx_stat_monitor(unsigned long param)
3480{
3481 struct lan78xx_net *dev;
3482
3483 dev = (struct lan78xx_net *)param;
3484
3485 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3486}
3487
55d7de9d
WH
3488static int lan78xx_probe(struct usb_interface *intf,
3489 const struct usb_device_id *id)
3490{
3491 struct lan78xx_net *dev;
3492 struct net_device *netdev;
3493 struct usb_device *udev;
3494 int ret;
3495 unsigned maxp;
3496 unsigned period;
3497 u8 *buf = NULL;
3498
3499 udev = interface_to_usbdev(intf);
3500 udev = usb_get_dev(udev);
3501
3502 ret = -ENOMEM;
3503 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3504 if (!netdev) {
3505 dev_err(&intf->dev, "Error: OOM\n");
3506 goto out1;
3507 }
3508
3509 /* netdev_printk() needs this */
3510 SET_NETDEV_DEV(netdev, &intf->dev);
3511
3512 dev = netdev_priv(netdev);
3513 dev->udev = udev;
3514 dev->intf = intf;
3515 dev->net = netdev;
3516 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3517 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3518
3519 skb_queue_head_init(&dev->rxq);
3520 skb_queue_head_init(&dev->txq);
3521 skb_queue_head_init(&dev->done);
3522 skb_queue_head_init(&dev->rxq_pause);
3523 skb_queue_head_init(&dev->txq_pend);
3524 mutex_init(&dev->phy_mutex);
3525
3526 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3527 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3528 init_usb_anchor(&dev->deferred);
3529
3530 netdev->netdev_ops = &lan78xx_netdev_ops;
3531 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3532 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3533
20ff5565
WH
3534 dev->stat_monitor.function = lan78xx_stat_monitor;
3535 dev->stat_monitor.data = (unsigned long)dev;
3536 dev->delta = 1;
3537 init_timer(&dev->stat_monitor);
3538
3539 mutex_init(&dev->stats.access_lock);
3540
55d7de9d
WH
3541 ret = lan78xx_bind(dev, intf);
3542 if (ret < 0)
3543 goto out2;
3544 strcpy(netdev->name, "eth%d");
3545
3546 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3547 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3548
f77f0aee
JW
3549 /* MTU range: 68 - 9000 */
3550 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3551
55d7de9d
WH
3552 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3553 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3554 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3555
3556 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3557 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3558
3559 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3560 dev->ep_intr->desc.bEndpointAddress &
3561 USB_ENDPOINT_NUMBER_MASK);
3562 period = dev->ep_intr->desc.bInterval;
3563
3564 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3565 buf = kmalloc(maxp, GFP_KERNEL);
3566 if (buf) {
3567 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3568 if (!dev->urb_intr) {
3569 kfree(buf);
3570 goto out3;
3571 } else {
3572 usb_fill_int_urb(dev->urb_intr, dev->udev,
3573 dev->pipe_intr, buf, maxp,
3574 intr_complete, dev, period);
3575 }
3576 }
3577
3578 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3579
3580 /* driver requires remote-wakeup capability during autosuspend. */
3581 intf->needs_remote_wakeup = 1;
3582
3583 ret = register_netdev(netdev);
3584 if (ret != 0) {
3585 netif_err(dev, probe, netdev, "couldn't register the device\n");
3586 goto out2;
3587 }
3588
3589 usb_set_intfdata(intf, dev);
3590
3591 ret = device_set_wakeup_enable(&udev->dev, true);
3592
3593 /* Default delay of 2sec has more overhead than advantage.
3594 * Set to 10sec as default.
3595 */
3596 pm_runtime_set_autosuspend_delay(&udev->dev,
3597 DEFAULT_AUTOSUSPEND_DELAY);
3598
3599 return 0;
3600
55d7de9d
WH
3601out3:
3602 lan78xx_unbind(dev, intf);
3603out2:
3604 free_netdev(netdev);
3605out1:
3606 usb_put_dev(udev);
3607
3608 return ret;
3609}
3610
3611static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3612{
3613 const u16 crc16poly = 0x8005;
3614 int i;
3615 u16 bit, crc, msb;
3616 u8 data;
3617
3618 crc = 0xFFFF;
3619 for (i = 0; i < len; i++) {
3620 data = *buf++;
3621 for (bit = 0; bit < 8; bit++) {
3622 msb = crc >> 15;
3623 crc <<= 1;
3624
3625 if (msb ^ (u16)(data & 1)) {
3626 crc ^= crc16poly;
3627 crc |= (u16)0x0001U;
3628 }
3629 data >>= 1;
3630 }
3631 }
3632
3633 return crc;
3634}
3635
3636static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3637{
3638 u32 buf;
3639 int ret;
3640 int mask_index;
3641 u16 crc;
3642 u32 temp_wucsr;
3643 u32 temp_pmt_ctl;
3644 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3645 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3646 const u8 arp_type[2] = { 0x08, 0x06 };
3647
3648 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3649 buf &= ~MAC_TX_TXEN_;
3650 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3651 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3652 buf &= ~MAC_RX_RXEN_;
3653 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3654
3655 ret = lan78xx_write_reg(dev, WUCSR, 0);
3656 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3657 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3658
3659 temp_wucsr = 0;
3660
3661 temp_pmt_ctl = 0;
3662 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3663 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3664 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3665
3666 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3667 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3668
3669 mask_index = 0;
3670 if (wol & WAKE_PHY) {
3671 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3672
3673 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3674 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3675 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3676 }
3677 if (wol & WAKE_MAGIC) {
3678 temp_wucsr |= WUCSR_MPEN_;
3679
3680 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3681 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3682 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3683 }
3684 if (wol & WAKE_BCAST) {
3685 temp_wucsr |= WUCSR_BCST_EN_;
3686
3687 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3688 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3689 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3690 }
3691 if (wol & WAKE_MCAST) {
3692 temp_wucsr |= WUCSR_WAKE_EN_;
3693
3694 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3695 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3696 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3697 WUF_CFGX_EN_ |
3698 WUF_CFGX_TYPE_MCAST_ |
3699 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3700 (crc & WUF_CFGX_CRC16_MASK_));
3701
3702 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3703 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3704 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3705 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3706 mask_index++;
3707
3708 /* for IPv6 Multicast */
3709 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3710 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3711 WUF_CFGX_EN_ |
3712 WUF_CFGX_TYPE_MCAST_ |
3713 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3714 (crc & WUF_CFGX_CRC16_MASK_));
3715
3716 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3717 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3718 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3719 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3720 mask_index++;
3721
3722 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3723 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3724 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3725 }
3726 if (wol & WAKE_UCAST) {
3727 temp_wucsr |= WUCSR_PFDA_EN_;
3728
3729 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3730 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3731 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3732 }
3733 if (wol & WAKE_ARP) {
3734 temp_wucsr |= WUCSR_WAKE_EN_;
3735
3736 /* set WUF_CFG & WUF_MASK
3737 * for packettype (offset 12,13) = ARP (0x0806)
3738 */
3739 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3740 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3741 WUF_CFGX_EN_ |
3742 WUF_CFGX_TYPE_ALL_ |
3743 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3744 (crc & WUF_CFGX_CRC16_MASK_));
3745
3746 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3747 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3748 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3749 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3750 mask_index++;
3751
3752 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3753 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3754 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3755 }
3756
3757 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3758
3759 /* when multiple WOL bits are set */
3760 if (hweight_long((unsigned long)wol) > 1) {
3761 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3762 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3763 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3764 }
3765 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3766
3767 /* clear WUPS */
3768 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3769 buf |= PMT_CTL_WUPS_MASK_;
3770 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3771
3772 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3773 buf |= MAC_RX_RXEN_;
3774 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3775
3776 return 0;
3777}
3778
e0c79ff6 3779static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
55d7de9d
WH
3780{
3781 struct lan78xx_net *dev = usb_get_intfdata(intf);
3782 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3783 u32 buf;
3784 int ret;
3785 int event;
3786
55d7de9d
WH
3787 event = message.event;
3788
3789 if (!dev->suspend_count++) {
3790 spin_lock_irq(&dev->txq.lock);
3791 /* don't autosuspend while transmitting */
3792 if ((skb_queue_len(&dev->txq) ||
3793 skb_queue_len(&dev->txq_pend)) &&
3794 PMSG_IS_AUTO(message)) {
3795 spin_unlock_irq(&dev->txq.lock);
3796 ret = -EBUSY;
3797 goto out;
3798 } else {
3799 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3800 spin_unlock_irq(&dev->txq.lock);
3801 }
3802
3803 /* stop TX & RX */
3804 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3805 buf &= ~MAC_TX_TXEN_;
3806 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3807 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3808 buf &= ~MAC_RX_RXEN_;
3809 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3810
3811 /* empty out the rx and queues */
3812 netif_device_detach(dev->net);
3813 lan78xx_terminate_urbs(dev);
3814 usb_kill_urb(dev->urb_intr);
3815
3816 /* reattach */
3817 netif_device_attach(dev->net);
3818 }
3819
3820 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
20ff5565
WH
3821 del_timer(&dev->stat_monitor);
3822
55d7de9d
WH
3823 if (PMSG_IS_AUTO(message)) {
3824 /* auto suspend (selective suspend) */
3825 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3826 buf &= ~MAC_TX_TXEN_;
3827 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3828 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3829 buf &= ~MAC_RX_RXEN_;
3830 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3831
3832 ret = lan78xx_write_reg(dev, WUCSR, 0);
3833 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3834 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3835
3836 /* set goodframe wakeup */
3837 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3838
3839 buf |= WUCSR_RFE_WAKE_EN_;
3840 buf |= WUCSR_STORE_WAKE_;
3841
3842 ret = lan78xx_write_reg(dev, WUCSR, buf);
3843
3844 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3845
3846 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3847 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3848
3849 buf |= PMT_CTL_PHY_WAKE_EN_;
3850 buf |= PMT_CTL_WOL_EN_;
3851 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3852 buf |= PMT_CTL_SUS_MODE_3_;
3853
3854 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3855
3856 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3857
3858 buf |= PMT_CTL_WUPS_MASK_;
3859
3860 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3861
3862 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3863 buf |= MAC_RX_RXEN_;
3864 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3865 } else {
3866 lan78xx_set_suspend(dev, pdata->wol);
3867 }
3868 }
3869
49d28b56 3870 ret = 0;
55d7de9d
WH
3871out:
3872 return ret;
3873}
3874
e0c79ff6 3875static int lan78xx_resume(struct usb_interface *intf)
55d7de9d
WH
3876{
3877 struct lan78xx_net *dev = usb_get_intfdata(intf);
3878 struct sk_buff *skb;
3879 struct urb *res;
3880 int ret;
3881 u32 buf;
3882
20ff5565
WH
3883 if (!timer_pending(&dev->stat_monitor)) {
3884 dev->delta = 1;
3885 mod_timer(&dev->stat_monitor,
3886 jiffies + STAT_UPDATE_TIMER);
3887 }
3888
55d7de9d
WH
3889 if (!--dev->suspend_count) {
3890 /* resume interrupt URBs */
3891 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3892 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3893
3894 spin_lock_irq(&dev->txq.lock);
3895 while ((res = usb_get_from_anchor(&dev->deferred))) {
3896 skb = (struct sk_buff *)res->context;
3897 ret = usb_submit_urb(res, GFP_ATOMIC);
3898 if (ret < 0) {
3899 dev_kfree_skb_any(skb);
3900 usb_free_urb(res);
3901 usb_autopm_put_interface_async(dev->intf);
3902 } else {
860e9538 3903 netif_trans_update(dev->net);
55d7de9d
WH
3904 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3905 }
3906 }
3907
3908 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3909 spin_unlock_irq(&dev->txq.lock);
3910
3911 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3912 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3913 netif_start_queue(dev->net);
3914 tasklet_schedule(&dev->bh);
3915 }
3916 }
3917
3918 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3919 ret = lan78xx_write_reg(dev, WUCSR, 0);
3920 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3921
3922 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3923 WUCSR2_ARP_RCD_ |
3924 WUCSR2_IPV6_TCPSYN_RCD_ |
3925 WUCSR2_IPV4_TCPSYN_RCD_);
3926
3927 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3928 WUCSR_EEE_RX_WAKE_ |
3929 WUCSR_PFDA_FR_ |
3930 WUCSR_RFE_WAKE_FR_ |
3931 WUCSR_WUFR_ |
3932 WUCSR_MPR_ |
3933 WUCSR_BCST_FR_);
3934
3935 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3936 buf |= MAC_TX_TXEN_;
3937 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3938
3939 return 0;
3940}
3941
e0c79ff6 3942static int lan78xx_reset_resume(struct usb_interface *intf)
55d7de9d
WH
3943{
3944 struct lan78xx_net *dev = usb_get_intfdata(intf);
3945
3946 lan78xx_reset(dev);
ce85e13a
WH
3947
3948 lan78xx_phy_init(dev);
3949
55d7de9d
WH
3950 return lan78xx_resume(intf);
3951}
3952
3953static const struct usb_device_id products[] = {
3954 {
3955 /* LAN7800 USB Gigabit Ethernet Device */
3956 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3957 },
3958 {
3959 /* LAN7850 USB Gigabit Ethernet Device */
3960 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3961 },
3962 {},
3963};
3964MODULE_DEVICE_TABLE(usb, products);
3965
3966static struct usb_driver lan78xx_driver = {
3967 .name = DRIVER_NAME,
3968 .id_table = products,
3969 .probe = lan78xx_probe,
3970 .disconnect = lan78xx_disconnect,
3971 .suspend = lan78xx_suspend,
3972 .resume = lan78xx_resume,
3973 .reset_resume = lan78xx_reset_resume,
3974 .supports_autosuspend = 1,
3975 .disable_hub_initiated_lpm = 1,
3976};
3977
3978module_usb_driver(lan78xx_driver);
3979
3980MODULE_AUTHOR(DRIVER_AUTHOR);
3981MODULE_DESCRIPTION(DRIVER_DESC);
3982MODULE_LICENSE("GPL");