]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/usb/lan78xx.c
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / usb / lan78xx.c
CommitLineData
55d7de9d
WH
1/*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
55d7de9d
WH
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/list.h>
29#include <linux/ip.h>
30#include <linux/ipv6.h>
31#include <linux/mdio.h>
32#include <net/ip6_checksum.h>
cc89c323
WH
33#include <linux/interrupt.h>
34#include <linux/irqdomain.h>
35#include <linux/irq.h>
36#include <linux/irqchip/chained_irq.h>
bdfba55e 37#include <linux/microchipphy.h>
8c56ea41 38#include <linux/phy.h>
55d7de9d
WH
39#include "lan78xx.h"
40
41#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
42#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
43#define DRIVER_NAME "lan78xx"
02dc1f3d 44#define DRIVER_VERSION "1.0.6"
55d7de9d
WH
45
46#define TX_TIMEOUT_JIFFIES (5 * HZ)
47#define THROTTLE_JIFFIES (HZ / 8)
48#define UNLINK_TIMEOUT_MS 3
49
50#define RX_MAX_QUEUE_MEMORY (60 * 1518)
51
52#define SS_USB_PKT_SIZE (1024)
53#define HS_USB_PKT_SIZE (512)
54#define FS_USB_PKT_SIZE (64)
55
56#define MAX_RX_FIFO_SIZE (12 * 1024)
57#define MAX_TX_FIFO_SIZE (12 * 1024)
58#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
59#define DEFAULT_BULK_IN_DELAY (0x0800)
60#define MAX_SINGLE_PACKET_SIZE (9000)
61#define DEFAULT_TX_CSUM_ENABLE (true)
62#define DEFAULT_RX_CSUM_ENABLE (true)
63#define DEFAULT_TSO_CSUM_ENABLE (true)
64#define DEFAULT_VLAN_FILTER_ENABLE (true)
55d7de9d
WH
65#define TX_OVERHEAD (8)
66#define RXW_PADDING 2
67
68#define LAN78XX_USB_VENDOR_ID (0x0424)
69#define LAN7800_USB_PRODUCT_ID (0x7800)
70#define LAN7850_USB_PRODUCT_ID (0x7850)
02dc1f3d 71#define LAN7801_USB_PRODUCT_ID (0x7801)
55d7de9d
WH
72#define LAN78XX_EEPROM_MAGIC (0x78A5)
73#define LAN78XX_OTP_MAGIC (0x78F3)
74
75#define MII_READ 1
76#define MII_WRITE 0
77
78#define EEPROM_INDICATOR (0xA5)
79#define EEPROM_MAC_OFFSET (0x01)
80#define MAX_EEPROM_SIZE 512
81#define OTP_INDICATOR_1 (0xF3)
82#define OTP_INDICATOR_2 (0xF7)
83
84#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
85 WAKE_MCAST | WAKE_BCAST | \
86 WAKE_ARP | WAKE_MAGIC)
87
88/* USB related defines */
89#define BULK_IN_PIPE 1
90#define BULK_OUT_PIPE 2
91
92/* default autosuspend delay (mSec)*/
93#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
94
20ff5565
WH
95/* statistic update interval (mSec) */
96#define STAT_UPDATE_TIMER (1 * 1000)
97
cc89c323
WH
98/* defines interrupts from interrupt EP */
99#define MAX_INT_EP (32)
100#define INT_EP_INTEP (31)
101#define INT_EP_OTP_WR_DONE (28)
102#define INT_EP_EEE_TX_LPI_START (26)
103#define INT_EP_EEE_TX_LPI_STOP (25)
104#define INT_EP_EEE_RX_LPI (24)
105#define INT_EP_MAC_RESET_TIMEOUT (23)
106#define INT_EP_RDFO (22)
107#define INT_EP_TXE (21)
108#define INT_EP_USB_STATUS (20)
109#define INT_EP_TX_DIS (19)
110#define INT_EP_RX_DIS (18)
111#define INT_EP_PHY (17)
112#define INT_EP_DP (16)
113#define INT_EP_MAC_ERR (15)
114#define INT_EP_TDFU (14)
115#define INT_EP_TDFO (13)
116#define INT_EP_UTX (12)
117#define INT_EP_GPIO_11 (11)
118#define INT_EP_GPIO_10 (10)
119#define INT_EP_GPIO_9 (9)
120#define INT_EP_GPIO_8 (8)
121#define INT_EP_GPIO_7 (7)
122#define INT_EP_GPIO_6 (6)
123#define INT_EP_GPIO_5 (5)
124#define INT_EP_GPIO_4 (4)
125#define INT_EP_GPIO_3 (3)
126#define INT_EP_GPIO_2 (2)
127#define INT_EP_GPIO_1 (1)
128#define INT_EP_GPIO_0 (0)
129
55d7de9d
WH
130static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
131 "RX FCS Errors",
132 "RX Alignment Errors",
133 "Rx Fragment Errors",
134 "RX Jabber Errors",
135 "RX Undersize Frame Errors",
136 "RX Oversize Frame Errors",
137 "RX Dropped Frames",
138 "RX Unicast Byte Count",
139 "RX Broadcast Byte Count",
140 "RX Multicast Byte Count",
141 "RX Unicast Frames",
142 "RX Broadcast Frames",
143 "RX Multicast Frames",
144 "RX Pause Frames",
145 "RX 64 Byte Frames",
146 "RX 65 - 127 Byte Frames",
147 "RX 128 - 255 Byte Frames",
148 "RX 256 - 511 Bytes Frames",
149 "RX 512 - 1023 Byte Frames",
150 "RX 1024 - 1518 Byte Frames",
151 "RX Greater 1518 Byte Frames",
152 "EEE RX LPI Transitions",
153 "EEE RX LPI Time",
154 "TX FCS Errors",
155 "TX Excess Deferral Errors",
156 "TX Carrier Errors",
157 "TX Bad Byte Count",
158 "TX Single Collisions",
159 "TX Multiple Collisions",
160 "TX Excessive Collision",
161 "TX Late Collisions",
162 "TX Unicast Byte Count",
163 "TX Broadcast Byte Count",
164 "TX Multicast Byte Count",
165 "TX Unicast Frames",
166 "TX Broadcast Frames",
167 "TX Multicast Frames",
168 "TX Pause Frames",
169 "TX 64 Byte Frames",
170 "TX 65 - 127 Byte Frames",
171 "TX 128 - 255 Byte Frames",
172 "TX 256 - 511 Bytes Frames",
173 "TX 512 - 1023 Byte Frames",
174 "TX 1024 - 1518 Byte Frames",
175 "TX Greater 1518 Byte Frames",
176 "EEE TX LPI Transitions",
177 "EEE TX LPI Time",
178};
179
180struct lan78xx_statstage {
181 u32 rx_fcs_errors;
182 u32 rx_alignment_errors;
183 u32 rx_fragment_errors;
184 u32 rx_jabber_errors;
185 u32 rx_undersize_frame_errors;
186 u32 rx_oversize_frame_errors;
187 u32 rx_dropped_frames;
188 u32 rx_unicast_byte_count;
189 u32 rx_broadcast_byte_count;
190 u32 rx_multicast_byte_count;
191 u32 rx_unicast_frames;
192 u32 rx_broadcast_frames;
193 u32 rx_multicast_frames;
194 u32 rx_pause_frames;
195 u32 rx_64_byte_frames;
196 u32 rx_65_127_byte_frames;
197 u32 rx_128_255_byte_frames;
198 u32 rx_256_511_bytes_frames;
199 u32 rx_512_1023_byte_frames;
200 u32 rx_1024_1518_byte_frames;
201 u32 rx_greater_1518_byte_frames;
202 u32 eee_rx_lpi_transitions;
203 u32 eee_rx_lpi_time;
204 u32 tx_fcs_errors;
205 u32 tx_excess_deferral_errors;
206 u32 tx_carrier_errors;
207 u32 tx_bad_byte_count;
208 u32 tx_single_collisions;
209 u32 tx_multiple_collisions;
210 u32 tx_excessive_collision;
211 u32 tx_late_collisions;
212 u32 tx_unicast_byte_count;
213 u32 tx_broadcast_byte_count;
214 u32 tx_multicast_byte_count;
215 u32 tx_unicast_frames;
216 u32 tx_broadcast_frames;
217 u32 tx_multicast_frames;
218 u32 tx_pause_frames;
219 u32 tx_64_byte_frames;
220 u32 tx_65_127_byte_frames;
221 u32 tx_128_255_byte_frames;
222 u32 tx_256_511_bytes_frames;
223 u32 tx_512_1023_byte_frames;
224 u32 tx_1024_1518_byte_frames;
225 u32 tx_greater_1518_byte_frames;
226 u32 eee_tx_lpi_transitions;
227 u32 eee_tx_lpi_time;
228};
229
20ff5565
WH
230struct lan78xx_statstage64 {
231 u64 rx_fcs_errors;
232 u64 rx_alignment_errors;
233 u64 rx_fragment_errors;
234 u64 rx_jabber_errors;
235 u64 rx_undersize_frame_errors;
236 u64 rx_oversize_frame_errors;
237 u64 rx_dropped_frames;
238 u64 rx_unicast_byte_count;
239 u64 rx_broadcast_byte_count;
240 u64 rx_multicast_byte_count;
241 u64 rx_unicast_frames;
242 u64 rx_broadcast_frames;
243 u64 rx_multicast_frames;
244 u64 rx_pause_frames;
245 u64 rx_64_byte_frames;
246 u64 rx_65_127_byte_frames;
247 u64 rx_128_255_byte_frames;
248 u64 rx_256_511_bytes_frames;
249 u64 rx_512_1023_byte_frames;
250 u64 rx_1024_1518_byte_frames;
251 u64 rx_greater_1518_byte_frames;
252 u64 eee_rx_lpi_transitions;
253 u64 eee_rx_lpi_time;
254 u64 tx_fcs_errors;
255 u64 tx_excess_deferral_errors;
256 u64 tx_carrier_errors;
257 u64 tx_bad_byte_count;
258 u64 tx_single_collisions;
259 u64 tx_multiple_collisions;
260 u64 tx_excessive_collision;
261 u64 tx_late_collisions;
262 u64 tx_unicast_byte_count;
263 u64 tx_broadcast_byte_count;
264 u64 tx_multicast_byte_count;
265 u64 tx_unicast_frames;
266 u64 tx_broadcast_frames;
267 u64 tx_multicast_frames;
268 u64 tx_pause_frames;
269 u64 tx_64_byte_frames;
270 u64 tx_65_127_byte_frames;
271 u64 tx_128_255_byte_frames;
272 u64 tx_256_511_bytes_frames;
273 u64 tx_512_1023_byte_frames;
274 u64 tx_1024_1518_byte_frames;
275 u64 tx_greater_1518_byte_frames;
276 u64 eee_tx_lpi_transitions;
277 u64 eee_tx_lpi_time;
278};
279
55d7de9d
WH
280struct lan78xx_net;
281
282struct lan78xx_priv {
283 struct lan78xx_net *dev;
284 u32 rfe_ctl;
285 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
286 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
287 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
288 struct mutex dataport_mutex; /* for dataport access */
289 spinlock_t rfe_ctl_lock; /* for rfe register access */
290 struct work_struct set_multicast;
291 struct work_struct set_vlan;
292 u32 wol;
293};
294
295enum skb_state {
296 illegal = 0,
297 tx_start,
298 tx_done,
299 rx_start,
300 rx_done,
301 rx_cleanup,
302 unlink_start
303};
304
305struct skb_data { /* skb->cb is one of these */
306 struct urb *urb;
307 struct lan78xx_net *dev;
308 enum skb_state state;
309 size_t length;
74d79a2e 310 int num_of_packet;
55d7de9d
WH
311};
312
313struct usb_context {
314 struct usb_ctrlrequest req;
315 struct lan78xx_net *dev;
316};
317
318#define EVENT_TX_HALT 0
319#define EVENT_RX_HALT 1
320#define EVENT_RX_MEMORY 2
321#define EVENT_STS_SPLIT 3
322#define EVENT_LINK_RESET 4
323#define EVENT_RX_PAUSED 5
324#define EVENT_DEV_WAKING 6
325#define EVENT_DEV_ASLEEP 7
326#define EVENT_DEV_OPEN 8
20ff5565
WH
327#define EVENT_STAT_UPDATE 9
328
329struct statstage {
330 struct mutex access_lock; /* for stats access */
331 struct lan78xx_statstage saved;
332 struct lan78xx_statstage rollover_count;
333 struct lan78xx_statstage rollover_max;
334 struct lan78xx_statstage64 curr_stat;
335};
55d7de9d 336
cc89c323
WH
337struct irq_domain_data {
338 struct irq_domain *irqdomain;
339 unsigned int phyirq;
340 struct irq_chip *irqchip;
341 irq_flow_handler_t irq_handler;
342 u32 irqenable;
343 struct mutex irq_lock; /* for irq bus access */
344};
345
55d7de9d
WH
346struct lan78xx_net {
347 struct net_device *net;
348 struct usb_device *udev;
349 struct usb_interface *intf;
350 void *driver_priv;
351
352 int rx_qlen;
353 int tx_qlen;
354 struct sk_buff_head rxq;
355 struct sk_buff_head txq;
356 struct sk_buff_head done;
357 struct sk_buff_head rxq_pause;
358 struct sk_buff_head txq_pend;
359
360 struct tasklet_struct bh;
361 struct delayed_work wq;
362
363 struct usb_host_endpoint *ep_blkin;
364 struct usb_host_endpoint *ep_blkout;
365 struct usb_host_endpoint *ep_intr;
366
367 int msg_enable;
368
369 struct urb *urb_intr;
370 struct usb_anchor deferred;
371
372 struct mutex phy_mutex; /* for phy access */
373 unsigned pipe_in, pipe_out, pipe_intr;
374
375 u32 hard_mtu; /* count any extra framing */
376 size_t rx_urb_size; /* size for rx urbs */
377
378 unsigned long flags;
379
380 wait_queue_head_t *wait;
381 unsigned char suspend_count;
382
383 unsigned maxpacket;
384 struct timer_list delay;
20ff5565 385 struct timer_list stat_monitor;
55d7de9d
WH
386
387 unsigned long data[5];
55d7de9d
WH
388
389 int link_on;
390 u8 mdix_ctrl;
ce85e13a 391
87177ba6
WH
392 u32 chipid;
393 u32 chiprev;
ce85e13a 394 struct mii_bus *mdiobus;
02dc1f3d 395 phy_interface_t interface;
349e0c5e
WH
396
397 int fc_autoneg;
398 u8 fc_request_control;
20ff5565
WH
399
400 int delta;
401 struct statstage stats;
cc89c323
WH
402
403 struct irq_domain_data domain_data;
55d7de9d
WH
404};
405
02dc1f3d
WH
406/* define external phy id */
407#define PHY_LAN8835 (0x0007C130)
408#define PHY_KSZ9031RNX (0x00221620)
409
55d7de9d
WH
410/* use ethtool to change the level for any given device */
411static int msg_level = -1;
412module_param(msg_level, int, 0);
413MODULE_PARM_DESC(msg_level, "Override default message level");
414
415static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
416{
417 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
418 int ret;
419
55d7de9d
WH
420 if (!buf)
421 return -ENOMEM;
422
423 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
424 USB_VENDOR_REQUEST_READ_REGISTER,
425 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
426 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
427 if (likely(ret >= 0)) {
428 le32_to_cpus(buf);
429 *data = *buf;
430 } else {
431 netdev_warn(dev->net,
432 "Failed to read register index 0x%08x. ret = %d",
433 index, ret);
434 }
435
436 kfree(buf);
437
438 return ret;
439}
440
441static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
442{
443 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
444 int ret;
445
55d7de9d
WH
446 if (!buf)
447 return -ENOMEM;
448
449 *buf = data;
450 cpu_to_le32s(buf);
451
452 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
453 USB_VENDOR_REQUEST_WRITE_REGISTER,
454 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
455 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
456 if (unlikely(ret < 0)) {
457 netdev_warn(dev->net,
458 "Failed to write register index 0x%08x. ret = %d",
459 index, ret);
460 }
461
462 kfree(buf);
463
464 return ret;
465}
466
467static int lan78xx_read_stats(struct lan78xx_net *dev,
468 struct lan78xx_statstage *data)
469{
470 int ret = 0;
471 int i;
472 struct lan78xx_statstage *stats;
473 u32 *src;
474 u32 *dst;
475
55d7de9d
WH
476 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
477 if (!stats)
478 return -ENOMEM;
479
480 ret = usb_control_msg(dev->udev,
481 usb_rcvctrlpipe(dev->udev, 0),
482 USB_VENDOR_REQUEST_GET_STATS,
483 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
484 0,
485 0,
486 (void *)stats,
487 sizeof(*stats),
488 USB_CTRL_SET_TIMEOUT);
489 if (likely(ret >= 0)) {
490 src = (u32 *)stats;
491 dst = (u32 *)data;
492 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
493 le32_to_cpus(&src[i]);
494 dst[i] = src[i];
495 }
496 } else {
497 netdev_warn(dev->net,
498 "Failed to read stat ret = 0x%x", ret);
499 }
500
501 kfree(stats);
502
503 return ret;
504}
505
20ff5565
WH
506#define check_counter_rollover(struct1, dev_stats, member) { \
507 if (struct1->member < dev_stats.saved.member) \
508 dev_stats.rollover_count.member++; \
509 }
510
511static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
512 struct lan78xx_statstage *stats)
513{
514 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
515 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
516 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
517 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
518 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
519 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
520 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
521 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
522 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
524 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
525 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
526 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
527 check_counter_rollover(stats, dev->stats, rx_pause_frames);
528 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
529 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
531 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
532 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
533 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
534 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
535 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
536 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
537 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
538 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
539 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
540 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
541 check_counter_rollover(stats, dev->stats, tx_single_collisions);
542 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
543 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
544 check_counter_rollover(stats, dev->stats, tx_late_collisions);
545 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
546 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
548 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
549 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
550 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
551 check_counter_rollover(stats, dev->stats, tx_pause_frames);
552 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
553 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
555 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
556 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
557 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
558 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
559 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
560 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
561
562 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
563}
564
565static void lan78xx_update_stats(struct lan78xx_net *dev)
566{
567 u32 *p, *count, *max;
568 u64 *data;
569 int i;
570 struct lan78xx_statstage lan78xx_stats;
571
572 if (usb_autopm_get_interface(dev->intf) < 0)
573 return;
574
575 p = (u32 *)&lan78xx_stats;
576 count = (u32 *)&dev->stats.rollover_count;
577 max = (u32 *)&dev->stats.rollover_max;
578 data = (u64 *)&dev->stats.curr_stat;
579
580 mutex_lock(&dev->stats.access_lock);
581
582 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
583 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
584
585 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
586 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
587
588 mutex_unlock(&dev->stats.access_lock);
589
590 usb_autopm_put_interface(dev->intf);
591}
592
55d7de9d
WH
593/* Loop until the read is completed with timeout called with phy_mutex held */
594static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
595{
596 unsigned long start_time = jiffies;
597 u32 val;
598 int ret;
599
600 do {
601 ret = lan78xx_read_reg(dev, MII_ACC, &val);
602 if (unlikely(ret < 0))
603 return -EIO;
604
605 if (!(val & MII_ACC_MII_BUSY_))
606 return 0;
607 } while (!time_after(jiffies, start_time + HZ));
608
609 return -EIO;
610}
611
612static inline u32 mii_access(int id, int index, int read)
613{
614 u32 ret;
615
616 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
617 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
618 if (read)
619 ret |= MII_ACC_MII_READ_;
620 else
621 ret |= MII_ACC_MII_WRITE_;
622 ret |= MII_ACC_MII_BUSY_;
623
624 return ret;
625}
626
55d7de9d
WH
627static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
628{
629 unsigned long start_time = jiffies;
630 u32 val;
631 int ret;
632
633 do {
634 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
635 if (unlikely(ret < 0))
636 return -EIO;
637
638 if (!(val & E2P_CMD_EPC_BUSY_) ||
639 (val & E2P_CMD_EPC_TIMEOUT_))
640 break;
641 usleep_range(40, 100);
642 } while (!time_after(jiffies, start_time + HZ));
643
644 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
645 netdev_warn(dev->net, "EEPROM read operation timeout");
646 return -EIO;
647 }
648
649 return 0;
650}
651
652static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
653{
654 unsigned long start_time = jiffies;
655 u32 val;
656 int ret;
657
658 do {
659 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
660 if (unlikely(ret < 0))
661 return -EIO;
662
663 if (!(val & E2P_CMD_EPC_BUSY_))
664 return 0;
665
666 usleep_range(40, 100);
667 } while (!time_after(jiffies, start_time + HZ));
668
669 netdev_warn(dev->net, "EEPROM is busy");
670 return -EIO;
671}
672
673static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
674 u32 length, u8 *data)
675{
676 u32 val;
a0db7d10 677 u32 saved;
55d7de9d 678 int i, ret;
a0db7d10
WH
679 int retval;
680
681 /* depends on chip, some EEPROM pins are muxed with LED function.
682 * disable & restore LED function to access EEPROM.
683 */
684 ret = lan78xx_read_reg(dev, HW_CFG, &val);
685 saved = val;
87177ba6 686 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
687 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
688 ret = lan78xx_write_reg(dev, HW_CFG, val);
689 }
55d7de9d 690
a0db7d10
WH
691 retval = lan78xx_eeprom_confirm_not_busy(dev);
692 if (retval)
693 return retval;
55d7de9d
WH
694
695 for (i = 0; i < length; i++) {
696 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
697 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
698 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
699 if (unlikely(ret < 0)) {
700 retval = -EIO;
701 goto exit;
702 }
55d7de9d 703
a0db7d10
WH
704 retval = lan78xx_wait_eeprom(dev);
705 if (retval < 0)
706 goto exit;
55d7de9d
WH
707
708 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
a0db7d10
WH
709 if (unlikely(ret < 0)) {
710 retval = -EIO;
711 goto exit;
712 }
55d7de9d
WH
713
714 data[i] = val & 0xFF;
715 offset++;
716 }
717
a0db7d10
WH
718 retval = 0;
719exit:
87177ba6 720 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
721 ret = lan78xx_write_reg(dev, HW_CFG, saved);
722
723 return retval;
55d7de9d
WH
724}
725
726static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
727 u32 length, u8 *data)
728{
729 u8 sig;
730 int ret;
731
732 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
733 if ((ret == 0) && (sig == EEPROM_INDICATOR))
734 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
735 else
736 ret = -EINVAL;
737
738 return ret;
739}
740
741static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
742 u32 length, u8 *data)
743{
744 u32 val;
a0db7d10 745 u32 saved;
55d7de9d 746 int i, ret;
a0db7d10
WH
747 int retval;
748
749 /* depends on chip, some EEPROM pins are muxed with LED function.
750 * disable & restore LED function to access EEPROM.
751 */
752 ret = lan78xx_read_reg(dev, HW_CFG, &val);
753 saved = val;
87177ba6 754 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
a0db7d10
WH
755 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
756 ret = lan78xx_write_reg(dev, HW_CFG, val);
757 }
55d7de9d 758
a0db7d10
WH
759 retval = lan78xx_eeprom_confirm_not_busy(dev);
760 if (retval)
761 goto exit;
55d7de9d
WH
762
763 /* Issue write/erase enable command */
764 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
765 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
766 if (unlikely(ret < 0)) {
767 retval = -EIO;
768 goto exit;
769 }
55d7de9d 770
a0db7d10
WH
771 retval = lan78xx_wait_eeprom(dev);
772 if (retval < 0)
773 goto exit;
55d7de9d
WH
774
775 for (i = 0; i < length; i++) {
776 /* Fill data register */
777 val = data[i];
778 ret = lan78xx_write_reg(dev, E2P_DATA, val);
a0db7d10
WH
779 if (ret < 0) {
780 retval = -EIO;
781 goto exit;
782 }
55d7de9d
WH
783
784 /* Send "write" command */
785 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
786 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
787 ret = lan78xx_write_reg(dev, E2P_CMD, val);
a0db7d10
WH
788 if (ret < 0) {
789 retval = -EIO;
790 goto exit;
791 }
55d7de9d 792
a0db7d10
WH
793 retval = lan78xx_wait_eeprom(dev);
794 if (retval < 0)
795 goto exit;
55d7de9d
WH
796
797 offset++;
798 }
799
a0db7d10
WH
800 retval = 0;
801exit:
87177ba6 802 if (dev->chipid == ID_REV_CHIP_ID_7800_)
a0db7d10
WH
803 ret = lan78xx_write_reg(dev, HW_CFG, saved);
804
805 return retval;
55d7de9d
WH
806}
807
808static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
809 u32 length, u8 *data)
810{
811 int i;
812 int ret;
813 u32 buf;
814 unsigned long timeout;
815
816 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
817
818 if (buf & OTP_PWR_DN_PWRDN_N_) {
819 /* clear it and wait to be cleared */
820 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
821
822 timeout = jiffies + HZ;
823 do {
824 usleep_range(1, 10);
825 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
826 if (time_after(jiffies, timeout)) {
827 netdev_warn(dev->net,
828 "timeout on OTP_PWR_DN");
829 return -EIO;
830 }
831 } while (buf & OTP_PWR_DN_PWRDN_N_);
832 }
833
834 for (i = 0; i < length; i++) {
835 ret = lan78xx_write_reg(dev, OTP_ADDR1,
836 ((offset + i) >> 8) & OTP_ADDR1_15_11);
837 ret = lan78xx_write_reg(dev, OTP_ADDR2,
838 ((offset + i) & OTP_ADDR2_10_3));
839
840 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
841 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
842
843 timeout = jiffies + HZ;
844 do {
845 udelay(1);
846 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
847 if (time_after(jiffies, timeout)) {
848 netdev_warn(dev->net,
849 "timeout on OTP_STATUS");
850 return -EIO;
851 }
852 } while (buf & OTP_STATUS_BUSY_);
853
854 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
855
856 data[i] = (u8)(buf & 0xFF);
857 }
858
859 return 0;
860}
861
9fb6066d
WH
862static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
863 u32 length, u8 *data)
864{
865 int i;
866 int ret;
867 u32 buf;
868 unsigned long timeout;
869
870 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
871
872 if (buf & OTP_PWR_DN_PWRDN_N_) {
873 /* clear it and wait to be cleared */
874 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
875
876 timeout = jiffies + HZ;
877 do {
878 udelay(1);
879 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
880 if (time_after(jiffies, timeout)) {
881 netdev_warn(dev->net,
882 "timeout on OTP_PWR_DN completion");
883 return -EIO;
884 }
885 } while (buf & OTP_PWR_DN_PWRDN_N_);
886 }
887
888 /* set to BYTE program mode */
889 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
890
891 for (i = 0; i < length; i++) {
892 ret = lan78xx_write_reg(dev, OTP_ADDR1,
893 ((offset + i) >> 8) & OTP_ADDR1_15_11);
894 ret = lan78xx_write_reg(dev, OTP_ADDR2,
895 ((offset + i) & OTP_ADDR2_10_3));
896 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
897 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
898 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
899
900 timeout = jiffies + HZ;
901 do {
902 udelay(1);
903 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
904 if (time_after(jiffies, timeout)) {
905 netdev_warn(dev->net,
906 "Timeout on OTP_STATUS completion");
907 return -EIO;
908 }
909 } while (buf & OTP_STATUS_BUSY_);
910 }
911
912 return 0;
913}
914
55d7de9d
WH
915static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
916 u32 length, u8 *data)
917{
918 u8 sig;
919 int ret;
920
921 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
922
923 if (ret == 0) {
924 if (sig == OTP_INDICATOR_1)
925 offset = offset;
926 else if (sig == OTP_INDICATOR_2)
927 offset += 0x100;
928 else
929 ret = -EINVAL;
930 ret = lan78xx_read_raw_otp(dev, offset, length, data);
931 }
932
933 return ret;
934}
935
936static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
937{
938 int i, ret;
939
940 for (i = 0; i < 100; i++) {
941 u32 dp_sel;
942
943 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
944 if (unlikely(ret < 0))
945 return -EIO;
946
947 if (dp_sel & DP_SEL_DPRDY_)
948 return 0;
949
950 usleep_range(40, 100);
951 }
952
953 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
954
955 return -EIO;
956}
957
958static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
959 u32 addr, u32 length, u32 *buf)
960{
961 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
962 u32 dp_sel;
963 int i, ret;
964
965 if (usb_autopm_get_interface(dev->intf) < 0)
966 return 0;
967
968 mutex_lock(&pdata->dataport_mutex);
969
970 ret = lan78xx_dataport_wait_not_busy(dev);
971 if (ret < 0)
972 goto done;
973
974 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
975
976 dp_sel &= ~DP_SEL_RSEL_MASK_;
977 dp_sel |= ram_select;
978 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
979
980 for (i = 0; i < length; i++) {
981 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
982
983 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
984
985 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
986
987 ret = lan78xx_dataport_wait_not_busy(dev);
988 if (ret < 0)
989 goto done;
990 }
991
992done:
993 mutex_unlock(&pdata->dataport_mutex);
994 usb_autopm_put_interface(dev->intf);
995
996 return ret;
997}
998
999static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1000 int index, u8 addr[ETH_ALEN])
1001{
1002 u32 temp;
1003
1004 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1005 temp = addr[3];
1006 temp = addr[2] | (temp << 8);
1007 temp = addr[1] | (temp << 8);
1008 temp = addr[0] | (temp << 8);
1009 pdata->pfilter_table[index][1] = temp;
1010 temp = addr[5];
1011 temp = addr[4] | (temp << 8);
1012 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1013 pdata->pfilter_table[index][0] = temp;
1014 }
1015}
1016
1017/* returns hash bit number for given MAC address */
1018static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1019{
1020 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1021}
1022
1023static void lan78xx_deferred_multicast_write(struct work_struct *param)
1024{
1025 struct lan78xx_priv *pdata =
1026 container_of(param, struct lan78xx_priv, set_multicast);
1027 struct lan78xx_net *dev = pdata->dev;
1028 int i;
1029 int ret;
1030
1031 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1032 pdata->rfe_ctl);
1033
1034 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1035 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1036
1037 for (i = 1; i < NUM_OF_MAF; i++) {
1038 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1039 ret = lan78xx_write_reg(dev, MAF_LO(i),
1040 pdata->pfilter_table[i][1]);
1041 ret = lan78xx_write_reg(dev, MAF_HI(i),
1042 pdata->pfilter_table[i][0]);
1043 }
1044
1045 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1046}
1047
1048static void lan78xx_set_multicast(struct net_device *netdev)
1049{
1050 struct lan78xx_net *dev = netdev_priv(netdev);
1051 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1052 unsigned long flags;
1053 int i;
1054
1055 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1056
1057 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1058 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1059
1060 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1061 pdata->mchash_table[i] = 0;
1062 /* pfilter_table[0] has own HW address */
1063 for (i = 1; i < NUM_OF_MAF; i++) {
1064 pdata->pfilter_table[i][0] =
1065 pdata->pfilter_table[i][1] = 0;
1066 }
1067
1068 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1069
1070 if (dev->net->flags & IFF_PROMISC) {
1071 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1072 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1073 } else {
1074 if (dev->net->flags & IFF_ALLMULTI) {
1075 netif_dbg(dev, drv, dev->net,
1076 "receive all multicast enabled");
1077 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1078 }
1079 }
1080
1081 if (netdev_mc_count(dev->net)) {
1082 struct netdev_hw_addr *ha;
1083 int i;
1084
1085 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1086
1087 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1088
1089 i = 1;
1090 netdev_for_each_mc_addr(ha, netdev) {
1091 /* set first 32 into Perfect Filter */
1092 if (i < 33) {
1093 lan78xx_set_addr_filter(pdata, i, ha->addr);
1094 } else {
1095 u32 bitnum = lan78xx_hash(ha->addr);
1096
1097 pdata->mchash_table[bitnum / 32] |=
1098 (1 << (bitnum % 32));
1099 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1100 }
1101 i++;
1102 }
1103 }
1104
1105 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1106
1107 /* defer register writes to a sleepable context */
1108 schedule_work(&pdata->set_multicast);
1109}
1110
1111static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1112 u16 lcladv, u16 rmtadv)
1113{
1114 u32 flow = 0, fct_flow = 0;
1115 int ret;
349e0c5e 1116 u8 cap;
55d7de9d 1117
349e0c5e
WH
1118 if (dev->fc_autoneg)
1119 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1120 else
1121 cap = dev->fc_request_control;
55d7de9d
WH
1122
1123 if (cap & FLOW_CTRL_TX)
349e0c5e 1124 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
55d7de9d
WH
1125
1126 if (cap & FLOW_CTRL_RX)
1127 flow |= FLOW_CR_RX_FCEN_;
1128
1129 if (dev->udev->speed == USB_SPEED_SUPER)
1130 fct_flow = 0x817;
1131 else if (dev->udev->speed == USB_SPEED_HIGH)
1132 fct_flow = 0x211;
1133
1134 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1135 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1136 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1137
1138 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1139
1140 /* threshold value should be set before enabling flow */
1141 ret = lan78xx_write_reg(dev, FLOW, flow);
1142
1143 return 0;
1144}
1145
1146static int lan78xx_link_reset(struct lan78xx_net *dev)
1147{
ce85e13a 1148 struct phy_device *phydev = dev->net->phydev;
6e76510e 1149 struct ethtool_link_ksettings ecmd;
99c79ece 1150 int ladv, radv, ret;
55d7de9d
WH
1151 u32 buf;
1152
55d7de9d
WH
1153 /* clear LAN78xx interrupt status */
1154 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1155 if (unlikely(ret < 0))
1156 return -EIO;
1157
ce85e13a
WH
1158 phy_read_status(phydev);
1159
1160 if (!phydev->link && dev->link_on) {
55d7de9d 1161 dev->link_on = false;
55d7de9d
WH
1162
1163 /* reset MAC */
1164 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1165 if (unlikely(ret < 0))
1166 return -EIO;
1167 buf |= MAC_CR_RST_;
1168 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1169 if (unlikely(ret < 0))
1170 return -EIO;
e4953910 1171
20ff5565 1172 del_timer(&dev->stat_monitor);
ce85e13a 1173 } else if (phydev->link && !dev->link_on) {
55d7de9d
WH
1174 dev->link_on = true;
1175
6e76510e 1176 phy_ethtool_ksettings_get(phydev, &ecmd);
55d7de9d 1177
55d7de9d 1178 if (dev->udev->speed == USB_SPEED_SUPER) {
6e76510e 1179 if (ecmd.base.speed == 1000) {
55d7de9d
WH
1180 /* disable U2 */
1181 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1182 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1183 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1184 /* enable U1 */
1185 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1186 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1187 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1188 } else {
1189 /* enable U1 & U2 */
1190 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1191 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1192 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1193 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1194 }
1195 }
1196
ce85e13a 1197 ladv = phy_read(phydev, MII_ADVERTISE);
99c79ece
GU
1198 if (ladv < 0)
1199 return ladv;
55d7de9d 1200
ce85e13a 1201 radv = phy_read(phydev, MII_LPA);
99c79ece
GU
1202 if (radv < 0)
1203 return radv;
55d7de9d
WH
1204
1205 netif_dbg(dev, link, dev->net,
1206 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
6e76510e 1207 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
55d7de9d 1208
6e76510e
PR
1209 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1210 radv);
20ff5565
WH
1211
1212 if (!timer_pending(&dev->stat_monitor)) {
1213 dev->delta = 1;
1214 mod_timer(&dev->stat_monitor,
1215 jiffies + STAT_UPDATE_TIMER);
1216 }
55d7de9d
WH
1217 }
1218
1219 return ret;
1220}
1221
1222/* some work can't be done in tasklets, so we use keventd
1223 *
1224 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1225 * but tasklet_schedule() doesn't. hope the failure is rare.
1226 */
e0c79ff6 1227static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
55d7de9d
WH
1228{
1229 set_bit(work, &dev->flags);
1230 if (!schedule_delayed_work(&dev->wq, 0))
1231 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1232}
1233
1234static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1235{
1236 u32 intdata;
1237
1238 if (urb->actual_length != 4) {
1239 netdev_warn(dev->net,
1240 "unexpected urb length %d", urb->actual_length);
1241 return;
1242 }
1243
1244 memcpy(&intdata, urb->transfer_buffer, 4);
1245 le32_to_cpus(&intdata);
1246
1247 if (intdata & INT_ENP_PHY_INT) {
1248 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
cc89c323
WH
1249 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1250
1251 if (dev->domain_data.phyirq > 0)
1252 generic_handle_irq(dev->domain_data.phyirq);
55d7de9d
WH
1253 } else
1254 netdev_warn(dev->net,
1255 "unexpected interrupt: 0x%08x\n", intdata);
1256}
1257
1258static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1259{
1260 return MAX_EEPROM_SIZE;
1261}
1262
1263static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1264 struct ethtool_eeprom *ee, u8 *data)
1265{
1266 struct lan78xx_net *dev = netdev_priv(netdev);
1267
1268 ee->magic = LAN78XX_EEPROM_MAGIC;
1269
1270 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1271}
1272
1273static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1274 struct ethtool_eeprom *ee, u8 *data)
1275{
1276 struct lan78xx_net *dev = netdev_priv(netdev);
1277
1278 /* Allow entire eeprom update only */
1279 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1280 (ee->offset == 0) &&
1281 (ee->len == 512) &&
1282 (data[0] == EEPROM_INDICATOR))
1283 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1284 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1285 (ee->offset == 0) &&
1286 (ee->len == 512) &&
1287 (data[0] == OTP_INDICATOR_1))
9fb6066d 1288 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
55d7de9d
WH
1289
1290 return -EINVAL;
1291}
1292
1293static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1294 u8 *data)
1295{
1296 if (stringset == ETH_SS_STATS)
1297 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1298}
1299
1300static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1301{
1302 if (sset == ETH_SS_STATS)
1303 return ARRAY_SIZE(lan78xx_gstrings);
1304 else
1305 return -EOPNOTSUPP;
1306}
1307
1308static void lan78xx_get_stats(struct net_device *netdev,
1309 struct ethtool_stats *stats, u64 *data)
1310{
1311 struct lan78xx_net *dev = netdev_priv(netdev);
55d7de9d 1312
20ff5565 1313 lan78xx_update_stats(dev);
55d7de9d 1314
20ff5565
WH
1315 mutex_lock(&dev->stats.access_lock);
1316 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1317 mutex_unlock(&dev->stats.access_lock);
55d7de9d
WH
1318}
1319
1320static void lan78xx_get_wol(struct net_device *netdev,
1321 struct ethtool_wolinfo *wol)
1322{
1323 struct lan78xx_net *dev = netdev_priv(netdev);
1324 int ret;
1325 u32 buf;
1326 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1327
1328 if (usb_autopm_get_interface(dev->intf) < 0)
1329 return;
1330
1331 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1332 if (unlikely(ret < 0)) {
1333 wol->supported = 0;
1334 wol->wolopts = 0;
1335 } else {
1336 if (buf & USB_CFG_RMT_WKP_) {
1337 wol->supported = WAKE_ALL;
1338 wol->wolopts = pdata->wol;
1339 } else {
1340 wol->supported = 0;
1341 wol->wolopts = 0;
1342 }
1343 }
1344
1345 usb_autopm_put_interface(dev->intf);
1346}
1347
1348static int lan78xx_set_wol(struct net_device *netdev,
1349 struct ethtool_wolinfo *wol)
1350{
1351 struct lan78xx_net *dev = netdev_priv(netdev);
1352 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1353 int ret;
1354
1355 ret = usb_autopm_get_interface(dev->intf);
1356 if (ret < 0)
1357 return ret;
1358
1359 pdata->wol = 0;
1360 if (wol->wolopts & WAKE_UCAST)
1361 pdata->wol |= WAKE_UCAST;
1362 if (wol->wolopts & WAKE_MCAST)
1363 pdata->wol |= WAKE_MCAST;
1364 if (wol->wolopts & WAKE_BCAST)
1365 pdata->wol |= WAKE_BCAST;
1366 if (wol->wolopts & WAKE_MAGIC)
1367 pdata->wol |= WAKE_MAGIC;
1368 if (wol->wolopts & WAKE_PHY)
1369 pdata->wol |= WAKE_PHY;
1370 if (wol->wolopts & WAKE_ARP)
1371 pdata->wol |= WAKE_ARP;
1372
1373 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1374
ce85e13a
WH
1375 phy_ethtool_set_wol(netdev->phydev, wol);
1376
55d7de9d
WH
1377 usb_autopm_put_interface(dev->intf);
1378
1379 return ret;
1380}
1381
1382static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1383{
1384 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1385 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1386 int ret;
1387 u32 buf;
55d7de9d
WH
1388
1389 ret = usb_autopm_get_interface(dev->intf);
1390 if (ret < 0)
1391 return ret;
1392
ce85e13a
WH
1393 ret = phy_ethtool_get_eee(phydev, edata);
1394 if (ret < 0)
1395 goto exit;
1396
55d7de9d
WH
1397 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1398 if (buf & MAC_CR_EEE_EN_) {
55d7de9d 1399 edata->eee_enabled = true;
ce85e13a
WH
1400 edata->eee_active = !!(edata->advertised &
1401 edata->lp_advertised);
55d7de9d
WH
1402 edata->tx_lpi_enabled = true;
1403 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1404 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1405 edata->tx_lpi_timer = buf;
1406 } else {
55d7de9d
WH
1407 edata->eee_enabled = false;
1408 edata->eee_active = false;
55d7de9d
WH
1409 edata->tx_lpi_enabled = false;
1410 edata->tx_lpi_timer = 0;
1411 }
1412
ce85e13a
WH
1413 ret = 0;
1414exit:
55d7de9d
WH
1415 usb_autopm_put_interface(dev->intf);
1416
ce85e13a 1417 return ret;
55d7de9d
WH
1418}
1419
1420static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1421{
1422 struct lan78xx_net *dev = netdev_priv(net);
1423 int ret;
1424 u32 buf;
1425
1426 ret = usb_autopm_get_interface(dev->intf);
1427 if (ret < 0)
1428 return ret;
1429
1430 if (edata->eee_enabled) {
1431 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1432 buf |= MAC_CR_EEE_EN_;
1433 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1434
ce85e13a
WH
1435 phy_ethtool_set_eee(net->phydev, edata);
1436
1437 buf = (u32)edata->tx_lpi_timer;
1438 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
55d7de9d
WH
1439 } else {
1440 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1441 buf &= ~MAC_CR_EEE_EN_;
1442 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1443 }
1444
1445 usb_autopm_put_interface(dev->intf);
1446
1447 return 0;
1448}
1449
1450static u32 lan78xx_get_link(struct net_device *net)
1451{
ce85e13a 1452 phy_read_status(net->phydev);
55d7de9d 1453
ce85e13a 1454 return net->phydev->link;
55d7de9d
WH
1455}
1456
55d7de9d
WH
1457static void lan78xx_get_drvinfo(struct net_device *net,
1458 struct ethtool_drvinfo *info)
1459{
1460 struct lan78xx_net *dev = netdev_priv(net);
1461
1462 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1463 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1464 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1465}
1466
1467static u32 lan78xx_get_msglevel(struct net_device *net)
1468{
1469 struct lan78xx_net *dev = netdev_priv(net);
1470
1471 return dev->msg_enable;
1472}
1473
1474static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1475{
1476 struct lan78xx_net *dev = netdev_priv(net);
1477
1478 dev->msg_enable = level;
1479}
1480
6e76510e
PR
1481static int lan78xx_get_link_ksettings(struct net_device *net,
1482 struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1483{
1484 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1485 struct phy_device *phydev = net->phydev;
55d7de9d 1486 int ret;
55d7de9d 1487
55d7de9d
WH
1488 ret = usb_autopm_get_interface(dev->intf);
1489 if (ret < 0)
1490 return ret;
1491
6e76510e 1492 ret = phy_ethtool_ksettings_get(phydev, cmd);
55d7de9d 1493
55d7de9d
WH
1494 usb_autopm_put_interface(dev->intf);
1495
1496 return ret;
1497}
1498
6e76510e
PR
1499static int lan78xx_set_link_ksettings(struct net_device *net,
1500 const struct ethtool_link_ksettings *cmd)
55d7de9d
WH
1501{
1502 struct lan78xx_net *dev = netdev_priv(net);
ce85e13a 1503 struct phy_device *phydev = net->phydev;
55d7de9d
WH
1504 int ret = 0;
1505 int temp;
1506
55d7de9d
WH
1507 ret = usb_autopm_get_interface(dev->intf);
1508 if (ret < 0)
1509 return ret;
1510
55d7de9d 1511 /* change speed & duplex */
6e76510e 1512 ret = phy_ethtool_ksettings_set(phydev, cmd);
55d7de9d 1513
6e76510e 1514 if (!cmd->base.autoneg) {
55d7de9d 1515 /* force link down */
ce85e13a
WH
1516 temp = phy_read(phydev, MII_BMCR);
1517 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
55d7de9d 1518 mdelay(1);
ce85e13a 1519 phy_write(phydev, MII_BMCR, temp);
55d7de9d
WH
1520 }
1521
1522 usb_autopm_put_interface(dev->intf);
1523
1524 return ret;
1525}
1526
349e0c5e
WH
1527static void lan78xx_get_pause(struct net_device *net,
1528 struct ethtool_pauseparam *pause)
1529{
1530 struct lan78xx_net *dev = netdev_priv(net);
1531 struct phy_device *phydev = net->phydev;
6e76510e 1532 struct ethtool_link_ksettings ecmd;
349e0c5e 1533
6e76510e 1534 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e
WH
1535
1536 pause->autoneg = dev->fc_autoneg;
1537
1538 if (dev->fc_request_control & FLOW_CTRL_TX)
1539 pause->tx_pause = 1;
1540
1541 if (dev->fc_request_control & FLOW_CTRL_RX)
1542 pause->rx_pause = 1;
1543}
1544
1545static int lan78xx_set_pause(struct net_device *net,
1546 struct ethtool_pauseparam *pause)
1547{
1548 struct lan78xx_net *dev = netdev_priv(net);
1549 struct phy_device *phydev = net->phydev;
6e76510e 1550 struct ethtool_link_ksettings ecmd;
349e0c5e
WH
1551 int ret;
1552
6e76510e 1553 phy_ethtool_ksettings_get(phydev, &ecmd);
349e0c5e 1554
6e76510e 1555 if (pause->autoneg && !ecmd.base.autoneg) {
349e0c5e
WH
1556 ret = -EINVAL;
1557 goto exit;
1558 }
1559
1560 dev->fc_request_control = 0;
1561 if (pause->rx_pause)
1562 dev->fc_request_control |= FLOW_CTRL_RX;
1563
1564 if (pause->tx_pause)
1565 dev->fc_request_control |= FLOW_CTRL_TX;
1566
6e76510e 1567 if (ecmd.base.autoneg) {
349e0c5e 1568 u32 mii_adv;
6e76510e
PR
1569 u32 advertising;
1570
1571 ethtool_convert_link_mode_to_legacy_u32(
1572 &advertising, ecmd.link_modes.advertising);
349e0c5e 1573
6e76510e 1574 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
349e0c5e 1575 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
6e76510e
PR
1576 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1577
1578 ethtool_convert_legacy_u32_to_link_mode(
1579 ecmd.link_modes.advertising, advertising);
1580
1581 phy_ethtool_ksettings_set(phydev, &ecmd);
349e0c5e
WH
1582 }
1583
1584 dev->fc_autoneg = pause->autoneg;
1585
1586 ret = 0;
1587exit:
1588 return ret;
1589}
1590
55d7de9d
WH
1591static const struct ethtool_ops lan78xx_ethtool_ops = {
1592 .get_link = lan78xx_get_link,
860ce4b4 1593 .nway_reset = phy_ethtool_nway_reset,
55d7de9d
WH
1594 .get_drvinfo = lan78xx_get_drvinfo,
1595 .get_msglevel = lan78xx_get_msglevel,
1596 .set_msglevel = lan78xx_set_msglevel,
55d7de9d
WH
1597 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1598 .get_eeprom = lan78xx_ethtool_get_eeprom,
1599 .set_eeprom = lan78xx_ethtool_set_eeprom,
1600 .get_ethtool_stats = lan78xx_get_stats,
1601 .get_sset_count = lan78xx_get_sset_count,
1602 .get_strings = lan78xx_get_strings,
1603 .get_wol = lan78xx_get_wol,
1604 .set_wol = lan78xx_set_wol,
1605 .get_eee = lan78xx_get_eee,
1606 .set_eee = lan78xx_set_eee,
349e0c5e
WH
1607 .get_pauseparam = lan78xx_get_pause,
1608 .set_pauseparam = lan78xx_set_pause,
6e76510e
PR
1609 .get_link_ksettings = lan78xx_get_link_ksettings,
1610 .set_link_ksettings = lan78xx_set_link_ksettings,
55d7de9d
WH
1611};
1612
1613static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1614{
55d7de9d
WH
1615 if (!netif_running(netdev))
1616 return -EINVAL;
1617
ce85e13a 1618 return phy_mii_ioctl(netdev->phydev, rq, cmd);
55d7de9d
WH
1619}
1620
1621static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1622{
1623 u32 addr_lo, addr_hi;
1624 int ret;
1625 u8 addr[6];
1626
1627 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1628 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1629
1630 addr[0] = addr_lo & 0xFF;
1631 addr[1] = (addr_lo >> 8) & 0xFF;
1632 addr[2] = (addr_lo >> 16) & 0xFF;
1633 addr[3] = (addr_lo >> 24) & 0xFF;
1634 addr[4] = addr_hi & 0xFF;
1635 addr[5] = (addr_hi >> 8) & 0xFF;
1636
1637 if (!is_valid_ether_addr(addr)) {
1638 /* reading mac address from EEPROM or OTP */
1639 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1640 addr) == 0) ||
1641 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1642 addr) == 0)) {
1643 if (is_valid_ether_addr(addr)) {
1644 /* eeprom values are valid so use them */
1645 netif_dbg(dev, ifup, dev->net,
1646 "MAC address read from EEPROM");
1647 } else {
1648 /* generate random MAC */
1649 random_ether_addr(addr);
1650 netif_dbg(dev, ifup, dev->net,
1651 "MAC address set to random addr");
1652 }
1653
1654 addr_lo = addr[0] | (addr[1] << 8) |
1655 (addr[2] << 16) | (addr[3] << 24);
1656 addr_hi = addr[4] | (addr[5] << 8);
1657
1658 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1659 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1660 } else {
1661 /* generate random MAC */
1662 random_ether_addr(addr);
1663 netif_dbg(dev, ifup, dev->net,
1664 "MAC address set to random addr");
1665 }
1666 }
1667
1668 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1669 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1670
1671 ether_addr_copy(dev->net->dev_addr, addr);
1672}
1673
ce85e13a
WH
1674/* MDIO read and write wrappers for phylib */
1675static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1676{
1677 struct lan78xx_net *dev = bus->priv;
1678 u32 val, addr;
1679 int ret;
1680
1681 ret = usb_autopm_get_interface(dev->intf);
1682 if (ret < 0)
1683 return ret;
1684
1685 mutex_lock(&dev->phy_mutex);
1686
1687 /* confirm MII not busy */
1688 ret = lan78xx_phy_wait_not_busy(dev);
1689 if (ret < 0)
1690 goto done;
1691
1692 /* set the address, index & direction (read from PHY) */
1693 addr = mii_access(phy_id, idx, MII_READ);
1694 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1695
1696 ret = lan78xx_phy_wait_not_busy(dev);
1697 if (ret < 0)
1698 goto done;
1699
1700 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1701
1702 ret = (int)(val & 0xFFFF);
1703
1704done:
1705 mutex_unlock(&dev->phy_mutex);
1706 usb_autopm_put_interface(dev->intf);
02dc1f3d 1707
ce85e13a
WH
1708 return ret;
1709}
1710
1711static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1712 u16 regval)
1713{
1714 struct lan78xx_net *dev = bus->priv;
1715 u32 val, addr;
1716 int ret;
1717
1718 ret = usb_autopm_get_interface(dev->intf);
1719 if (ret < 0)
1720 return ret;
1721
1722 mutex_lock(&dev->phy_mutex);
1723
1724 /* confirm MII not busy */
1725 ret = lan78xx_phy_wait_not_busy(dev);
1726 if (ret < 0)
1727 goto done;
1728
1729 val = (u32)regval;
1730 ret = lan78xx_write_reg(dev, MII_DATA, val);
1731
1732 /* set the address, index & direction (write to PHY) */
1733 addr = mii_access(phy_id, idx, MII_WRITE);
1734 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1735
1736 ret = lan78xx_phy_wait_not_busy(dev);
1737 if (ret < 0)
1738 goto done;
1739
1740done:
1741 mutex_unlock(&dev->phy_mutex);
1742 usb_autopm_put_interface(dev->intf);
1743 return 0;
1744}
1745
1746static int lan78xx_mdio_init(struct lan78xx_net *dev)
55d7de9d 1747{
ce85e13a 1748 int ret;
ce85e13a
WH
1749
1750 dev->mdiobus = mdiobus_alloc();
1751 if (!dev->mdiobus) {
1752 netdev_err(dev->net, "can't allocate MDIO bus\n");
1753 return -ENOMEM;
1754 }
1755
1756 dev->mdiobus->priv = (void *)dev;
1757 dev->mdiobus->read = lan78xx_mdiobus_read;
1758 dev->mdiobus->write = lan78xx_mdiobus_write;
1759 dev->mdiobus->name = "lan78xx-mdiobus";
1760
1761 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1762 dev->udev->bus->busnum, dev->udev->devnum);
1763
87177ba6
WH
1764 switch (dev->chipid) {
1765 case ID_REV_CHIP_ID_7800_:
1766 case ID_REV_CHIP_ID_7850_:
ce85e13a
WH
1767 /* set to internal PHY id */
1768 dev->mdiobus->phy_mask = ~(1 << 1);
1769 break;
02dc1f3d
WH
1770 case ID_REV_CHIP_ID_7801_:
1771 /* scan thru PHYAD[2..0] */
1772 dev->mdiobus->phy_mask = ~(0xFF);
1773 break;
ce85e13a
WH
1774 }
1775
1776 ret = mdiobus_register(dev->mdiobus);
1777 if (ret) {
1778 netdev_err(dev->net, "can't register MDIO bus\n");
e7f4dc35 1779 goto exit1;
ce85e13a
WH
1780 }
1781
1782 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1783 return 0;
ce85e13a
WH
1784exit1:
1785 mdiobus_free(dev->mdiobus);
1786 return ret;
1787}
1788
1789static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1790{
1791 mdiobus_unregister(dev->mdiobus);
ce85e13a
WH
1792 mdiobus_free(dev->mdiobus);
1793}
1794
1795static void lan78xx_link_status_change(struct net_device *net)
1796{
14437e3f
WH
1797 struct phy_device *phydev = net->phydev;
1798 int ret, temp;
1799
1800 /* At forced 100 F/H mode, chip may fail to set mode correctly
1801 * when cable is switched between long(~50+m) and short one.
1802 * As workaround, set to 10 before setting to 100
1803 * at forced 100 F/H mode.
1804 */
1805 if (!phydev->autoneg && (phydev->speed == 100)) {
1806 /* disable phy interrupt */
1807 temp = phy_read(phydev, LAN88XX_INT_MASK);
1808 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1809 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1810
1811 temp = phy_read(phydev, MII_BMCR);
1812 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1813 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1814 temp |= BMCR_SPEED100;
1815 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1816
1817 /* clear pending interrupt generated while workaround */
1818 temp = phy_read(phydev, LAN88XX_INT_STS);
1819
1820 /* enable phy interrupt back */
1821 temp = phy_read(phydev, LAN88XX_INT_MASK);
1822 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1823 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1824 }
55d7de9d
WH
1825}
1826
cc89c323
WH
1827static int irq_map(struct irq_domain *d, unsigned int irq,
1828 irq_hw_number_t hwirq)
1829{
1830 struct irq_domain_data *data = d->host_data;
1831
1832 irq_set_chip_data(irq, data);
1833 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1834 irq_set_noprobe(irq);
1835
1836 return 0;
1837}
1838
1839static void irq_unmap(struct irq_domain *d, unsigned int irq)
1840{
1841 irq_set_chip_and_handler(irq, NULL, NULL);
1842 irq_set_chip_data(irq, NULL);
1843}
1844
1845static const struct irq_domain_ops chip_domain_ops = {
1846 .map = irq_map,
1847 .unmap = irq_unmap,
1848};
1849
1850static void lan78xx_irq_mask(struct irq_data *irqd)
1851{
1852 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1853
1854 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1855}
1856
1857static void lan78xx_irq_unmask(struct irq_data *irqd)
1858{
1859 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1860
1861 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1862}
1863
1864static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1865{
1866 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1867
1868 mutex_lock(&data->irq_lock);
1869}
1870
1871static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1872{
1873 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1874 struct lan78xx_net *dev =
1875 container_of(data, struct lan78xx_net, domain_data);
1876 u32 buf;
1877 int ret;
1878
1879 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1880 * are only two callbacks executed in non-atomic contex.
1881 */
1882 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1883 if (buf != data->irqenable)
1884 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1885
1886 mutex_unlock(&data->irq_lock);
1887}
1888
1889static struct irq_chip lan78xx_irqchip = {
1890 .name = "lan78xx-irqs",
1891 .irq_mask = lan78xx_irq_mask,
1892 .irq_unmask = lan78xx_irq_unmask,
1893 .irq_bus_lock = lan78xx_irq_bus_lock,
1894 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1895};
1896
1897static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1898{
1899 struct device_node *of_node;
1900 struct irq_domain *irqdomain;
1901 unsigned int irqmap = 0;
1902 u32 buf;
1903 int ret = 0;
1904
1905 of_node = dev->udev->dev.parent->of_node;
1906
1907 mutex_init(&dev->domain_data.irq_lock);
1908
1909 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1910 dev->domain_data.irqenable = buf;
1911
1912 dev->domain_data.irqchip = &lan78xx_irqchip;
1913 dev->domain_data.irq_handler = handle_simple_irq;
1914
1915 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1916 &chip_domain_ops, &dev->domain_data);
1917 if (irqdomain) {
1918 /* create mapping for PHY interrupt */
1919 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1920 if (!irqmap) {
1921 irq_domain_remove(irqdomain);
1922
1923 irqdomain = NULL;
1924 ret = -EINVAL;
1925 }
1926 } else {
1927 ret = -EINVAL;
1928 }
1929
1930 dev->domain_data.irqdomain = irqdomain;
1931 dev->domain_data.phyirq = irqmap;
1932
1933 return ret;
1934}
1935
1936static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1937{
1938 if (dev->domain_data.phyirq > 0) {
1939 irq_dispose_mapping(dev->domain_data.phyirq);
1940
1941 if (dev->domain_data.irqdomain)
1942 irq_domain_remove(dev->domain_data.irqdomain);
1943 }
1944 dev->domain_data.phyirq = 0;
1945 dev->domain_data.irqdomain = NULL;
1946}
1947
02dc1f3d
WH
1948static int lan8835_fixup(struct phy_device *phydev)
1949{
1950 int buf;
1951 int ret;
1952 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1953
1954 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1955 buf = phy_read_mmd_indirect(phydev, 0x8010, 3);
1956 buf &= ~0x1800;
1957 buf |= 0x0800;
1958 phy_write_mmd_indirect(phydev, 0x8010, 3, buf);
1959
1960 /* RGMII MAC TXC Delay Enable */
1961 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1962 MAC_RGMII_ID_TXC_DELAY_EN_);
1963
1964 /* RGMII TX DLL Tune Adjust */
1965 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1966
1967 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1968
1969 return 1;
1970}
1971
1972static int ksz9031rnx_fixup(struct phy_device *phydev)
1973{
1974 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1975
1976 /* Micrel9301RNX PHY configuration */
1977 /* RGMII Control Signal Pad Skew */
1978 phy_write_mmd_indirect(phydev, 4, 2, 0x0077);
1979 /* RGMII RX Data Pad Skew */
1980 phy_write_mmd_indirect(phydev, 5, 2, 0x7777);
1981 /* RGMII RX Clock Pad Skew */
1982 phy_write_mmd_indirect(phydev, 8, 2, 0x1FF);
1983
1984 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1985
1986 return 1;
1987}
1988
55d7de9d
WH
1989static int lan78xx_phy_init(struct lan78xx_net *dev)
1990{
ce85e13a 1991 int ret;
349e0c5e 1992 u32 mii_adv;
ce85e13a 1993 struct phy_device *phydev = dev->net->phydev;
55d7de9d 1994
ce85e13a
WH
1995 phydev = phy_find_first(dev->mdiobus);
1996 if (!phydev) {
1997 netdev_err(dev->net, "no PHY found\n");
1998 return -EIO;
1999 }
55d7de9d 2000
02dc1f3d
WH
2001 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2002 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2003 phydev->is_internal = true;
2004 dev->interface = PHY_INTERFACE_MODE_GMII;
2005
2006 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2007 if (!phydev->drv) {
2008 netdev_err(dev->net, "no PHY driver found\n");
2009 return -EIO;
2010 }
2011
2012 dev->interface = PHY_INTERFACE_MODE_RGMII;
2013
2014 /* external PHY fixup for KSZ9031RNX */
2015 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2016 ksz9031rnx_fixup);
2017 if (ret < 0) {
2018 netdev_err(dev->net, "fail to register fixup\n");
2019 return ret;
2020 }
2021 /* external PHY fixup for LAN8835 */
2022 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2023 lan8835_fixup);
2024 if (ret < 0) {
2025 netdev_err(dev->net, "fail to register fixup\n");
2026 return ret;
2027 }
2028 /* add more external PHY fixup here if needed */
2029
2030 phydev->is_internal = false;
2031 } else {
2032 netdev_err(dev->net, "unknown ID found\n");
2033 ret = -EIO;
2034 goto error;
2035 }
2036
cc89c323
WH
2037 /* if phyirq is not set, use polling mode in phylib */
2038 if (dev->domain_data.phyirq > 0)
2039 phydev->irq = dev->domain_data.phyirq;
2040 else
2041 phydev->irq = 0;
2042 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
e4953910 2043
f6e3ef3e
WH
2044 /* set to AUTOMDIX */
2045 phydev->mdix = ETH_TP_MDI_AUTO;
2046
ce85e13a
WH
2047 ret = phy_connect_direct(dev->net, phydev,
2048 lan78xx_link_status_change,
02dc1f3d 2049 dev->interface);
ce85e13a
WH
2050 if (ret) {
2051 netdev_err(dev->net, "can't attach PHY to %s\n",
2052 dev->mdiobus->id);
2053 return -EIO;
2054 }
55d7de9d 2055
ce85e13a
WH
2056 /* MAC doesn't support 1000T Half */
2057 phydev->supported &= ~SUPPORTED_1000baseT_Half;
e270b2db 2058
349e0c5e
WH
2059 /* support both flow controls */
2060 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2061 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2062 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2063 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2064
ce85e13a
WH
2065 genphy_config_aneg(phydev);
2066
349e0c5e
WH
2067 dev->fc_autoneg = phydev->autoneg;
2068
ce85e13a 2069 phy_start(phydev);
55d7de9d
WH
2070
2071 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2072
2073 return 0;
02dc1f3d
WH
2074
2075error:
2076 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2077 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2078
2079 return ret;
55d7de9d
WH
2080}
2081
2082static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2083{
2084 int ret = 0;
2085 u32 buf;
2086 bool rxenabled;
2087
2088 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2089
2090 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2091
2092 if (rxenabled) {
2093 buf &= ~MAC_RX_RXEN_;
2094 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2095 }
2096
2097 /* add 4 to size for FCS */
2098 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2099 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2100
2101 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2102
2103 if (rxenabled) {
2104 buf |= MAC_RX_RXEN_;
2105 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2106 }
2107
2108 return 0;
2109}
2110
2111static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2112{
2113 struct sk_buff *skb;
2114 unsigned long flags;
2115 int count = 0;
2116
2117 spin_lock_irqsave(&q->lock, flags);
2118 while (!skb_queue_empty(q)) {
2119 struct skb_data *entry;
2120 struct urb *urb;
2121 int ret;
2122
2123 skb_queue_walk(q, skb) {
2124 entry = (struct skb_data *)skb->cb;
2125 if (entry->state != unlink_start)
2126 goto found;
2127 }
2128 break;
2129found:
2130 entry->state = unlink_start;
2131 urb = entry->urb;
2132
2133 /* Get reference count of the URB to avoid it to be
2134 * freed during usb_unlink_urb, which may trigger
2135 * use-after-free problem inside usb_unlink_urb since
2136 * usb_unlink_urb is always racing with .complete
2137 * handler(include defer_bh).
2138 */
2139 usb_get_urb(urb);
2140 spin_unlock_irqrestore(&q->lock, flags);
2141 /* during some PM-driven resume scenarios,
2142 * these (async) unlinks complete immediately
2143 */
2144 ret = usb_unlink_urb(urb);
2145 if (ret != -EINPROGRESS && ret != 0)
2146 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2147 else
2148 count++;
2149 usb_put_urb(urb);
2150 spin_lock_irqsave(&q->lock, flags);
2151 }
2152 spin_unlock_irqrestore(&q->lock, flags);
2153 return count;
2154}
2155
2156static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2157{
2158 struct lan78xx_net *dev = netdev_priv(netdev);
2159 int ll_mtu = new_mtu + netdev->hard_header_len;
2160 int old_hard_mtu = dev->hard_mtu;
2161 int old_rx_urb_size = dev->rx_urb_size;
2162 int ret;
2163
55d7de9d
WH
2164 /* no second zero-length packet read wanted after mtu-sized packets */
2165 if ((ll_mtu % dev->maxpacket) == 0)
2166 return -EDOM;
2167
2168 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2169
2170 netdev->mtu = new_mtu;
2171
2172 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2173 if (dev->rx_urb_size == old_hard_mtu) {
2174 dev->rx_urb_size = dev->hard_mtu;
2175 if (dev->rx_urb_size > old_rx_urb_size) {
2176 if (netif_running(dev->net)) {
2177 unlink_urbs(dev, &dev->rxq);
2178 tasklet_schedule(&dev->bh);
2179 }
2180 }
2181 }
2182
2183 return 0;
2184}
2185
e0c79ff6 2186static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
55d7de9d
WH
2187{
2188 struct lan78xx_net *dev = netdev_priv(netdev);
2189 struct sockaddr *addr = p;
2190 u32 addr_lo, addr_hi;
2191 int ret;
2192
2193 if (netif_running(netdev))
2194 return -EBUSY;
2195
2196 if (!is_valid_ether_addr(addr->sa_data))
2197 return -EADDRNOTAVAIL;
2198
2199 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2200
2201 addr_lo = netdev->dev_addr[0] |
2202 netdev->dev_addr[1] << 8 |
2203 netdev->dev_addr[2] << 16 |
2204 netdev->dev_addr[3] << 24;
2205 addr_hi = netdev->dev_addr[4] |
2206 netdev->dev_addr[5] << 8;
2207
2208 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2209 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2210
2211 return 0;
2212}
2213
2214/* Enable or disable Rx checksum offload engine */
2215static int lan78xx_set_features(struct net_device *netdev,
2216 netdev_features_t features)
2217{
2218 struct lan78xx_net *dev = netdev_priv(netdev);
2219 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2220 unsigned long flags;
2221 int ret;
2222
2223 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2224
2225 if (features & NETIF_F_RXCSUM) {
2226 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2227 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2228 } else {
2229 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2230 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2231 }
2232
2233 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2234 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2235 else
2236 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2237
2238 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2239
2240 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2241
2242 return 0;
2243}
2244
2245static void lan78xx_deferred_vlan_write(struct work_struct *param)
2246{
2247 struct lan78xx_priv *pdata =
2248 container_of(param, struct lan78xx_priv, set_vlan);
2249 struct lan78xx_net *dev = pdata->dev;
2250
2251 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2252 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2253}
2254
2255static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2256 __be16 proto, u16 vid)
2257{
2258 struct lan78xx_net *dev = netdev_priv(netdev);
2259 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2260 u16 vid_bit_index;
2261 u16 vid_dword_index;
2262
2263 vid_dword_index = (vid >> 5) & 0x7F;
2264 vid_bit_index = vid & 0x1F;
2265
2266 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2267
2268 /* defer register writes to a sleepable context */
2269 schedule_work(&pdata->set_vlan);
2270
2271 return 0;
2272}
2273
2274static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2275 __be16 proto, u16 vid)
2276{
2277 struct lan78xx_net *dev = netdev_priv(netdev);
2278 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2279 u16 vid_bit_index;
2280 u16 vid_dword_index;
2281
2282 vid_dword_index = (vid >> 5) & 0x7F;
2283 vid_bit_index = vid & 0x1F;
2284
2285 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2286
2287 /* defer register writes to a sleepable context */
2288 schedule_work(&pdata->set_vlan);
2289
2290 return 0;
2291}
2292
2293static void lan78xx_init_ltm(struct lan78xx_net *dev)
2294{
2295 int ret;
2296 u32 buf;
2297 u32 regs[6] = { 0 };
2298
2299 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2300 if (buf & USB_CFG1_LTM_ENABLE_) {
2301 u8 temp[2];
2302 /* Get values from EEPROM first */
2303 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2304 if (temp[0] == 24) {
2305 ret = lan78xx_read_raw_eeprom(dev,
2306 temp[1] * 2,
2307 24,
2308 (u8 *)regs);
2309 if (ret < 0)
2310 return;
2311 }
2312 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2313 if (temp[0] == 24) {
2314 ret = lan78xx_read_raw_otp(dev,
2315 temp[1] * 2,
2316 24,
2317 (u8 *)regs);
2318 if (ret < 0)
2319 return;
2320 }
2321 }
2322 }
2323
2324 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2325 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2326 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2327 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2328 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2329 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2330}
2331
2332static int lan78xx_reset(struct lan78xx_net *dev)
2333{
2334 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2335 u32 buf;
2336 int ret = 0;
2337 unsigned long timeout;
2338
2339 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2340 buf |= HW_CFG_LRST_;
2341 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2342
2343 timeout = jiffies + HZ;
2344 do {
2345 mdelay(1);
2346 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2347 if (time_after(jiffies, timeout)) {
2348 netdev_warn(dev->net,
2349 "timeout on completion of LiteReset");
2350 return -EIO;
2351 }
2352 } while (buf & HW_CFG_LRST_);
2353
2354 lan78xx_init_mac_address(dev);
2355
ce85e13a
WH
2356 /* save DEVID for later usage */
2357 ret = lan78xx_read_reg(dev, ID_REV, &buf);
87177ba6
WH
2358 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2359 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ce85e13a 2360
55d7de9d
WH
2361 /* Respond to the IN token with a NAK */
2362 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2363 buf |= USB_CFG_BIR_;
2364 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2365
2366 /* Init LTM */
2367 lan78xx_init_ltm(dev);
2368
2369 dev->net->hard_header_len += TX_OVERHEAD;
2370 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2371
2372 if (dev->udev->speed == USB_SPEED_SUPER) {
2373 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2374 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2375 dev->rx_qlen = 4;
2376 dev->tx_qlen = 4;
2377 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2378 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2379 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2380 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2381 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2382 } else {
2383 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2384 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2385 dev->rx_qlen = 4;
2386 }
2387
2388 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2389 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2390
2391 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2392 buf |= HW_CFG_MEF_;
2393 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2394
2395 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2396 buf |= USB_CFG_BCE_;
2397 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2398
2399 /* set FIFO sizes */
2400 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2401 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2402
2403 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2404 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2405
2406 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2407 ret = lan78xx_write_reg(dev, FLOW, 0);
2408 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2409
2410 /* Don't need rfe_ctl_lock during initialisation */
2411 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2412 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2413 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2414
2415 /* Enable or disable checksum offload engines */
2416 lan78xx_set_features(dev->net, dev->net->features);
2417
2418 lan78xx_set_multicast(dev->net);
2419
2420 /* reset PHY */
2421 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2422 buf |= PMT_CTL_PHY_RST_;
2423 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2424
2425 timeout = jiffies + HZ;
2426 do {
2427 mdelay(1);
2428 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2429 if (time_after(jiffies, timeout)) {
2430 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2431 return -EIO;
2432 }
6c595b03 2433 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
55d7de9d 2434
55d7de9d 2435 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
02dc1f3d
WH
2436 /* LAN7801 only has RGMII mode */
2437 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2438 buf &= ~MAC_CR_GMII_EN_;
55d7de9d 2439 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
55d7de9d
WH
2440 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2441
55d7de9d
WH
2442 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2443 buf |= MAC_TX_TXEN_;
2444 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2445
2446 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2447 buf |= FCT_TX_CTL_EN_;
2448 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2449
2450 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2451
2452 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2453 buf |= MAC_RX_RXEN_;
2454 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2455
2456 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2457 buf |= FCT_RX_CTL_EN_;
2458 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2459
55d7de9d
WH
2460 return 0;
2461}
2462
20ff5565
WH
2463static void lan78xx_init_stats(struct lan78xx_net *dev)
2464{
2465 u32 *p;
2466 int i;
2467
2468 /* initialize for stats update
2469 * some counters are 20bits and some are 32bits
2470 */
2471 p = (u32 *)&dev->stats.rollover_max;
2472 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2473 p[i] = 0xFFFFF;
2474
2475 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2476 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2477 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2478 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2479 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2480 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2481 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2482 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2483 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2484 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2485
2486 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2487}
2488
55d7de9d
WH
2489static int lan78xx_open(struct net_device *net)
2490{
2491 struct lan78xx_net *dev = netdev_priv(net);
2492 int ret;
2493
2494 ret = usb_autopm_get_interface(dev->intf);
2495 if (ret < 0)
2496 goto out;
2497
2498 ret = lan78xx_reset(dev);
2499 if (ret < 0)
2500 goto done;
2501
ce85e13a
WH
2502 ret = lan78xx_phy_init(dev);
2503 if (ret < 0)
2504 goto done;
2505
55d7de9d
WH
2506 /* for Link Check */
2507 if (dev->urb_intr) {
2508 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2509 if (ret < 0) {
2510 netif_err(dev, ifup, dev->net,
2511 "intr submit %d\n", ret);
2512 goto done;
2513 }
2514 }
2515
20ff5565
WH
2516 lan78xx_init_stats(dev);
2517
55d7de9d
WH
2518 set_bit(EVENT_DEV_OPEN, &dev->flags);
2519
2520 netif_start_queue(net);
2521
2522 dev->link_on = false;
2523
2524 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2525done:
2526 usb_autopm_put_interface(dev->intf);
2527
2528out:
2529 return ret;
2530}
2531
2532static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2533{
2534 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2535 DECLARE_WAITQUEUE(wait, current);
2536 int temp;
2537
2538 /* ensure there are no more active urbs */
2539 add_wait_queue(&unlink_wakeup, &wait);
2540 set_current_state(TASK_UNINTERRUPTIBLE);
2541 dev->wait = &unlink_wakeup;
2542 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2543
2544 /* maybe wait for deletions to finish. */
2545 while (!skb_queue_empty(&dev->rxq) &&
2546 !skb_queue_empty(&dev->txq) &&
2547 !skb_queue_empty(&dev->done)) {
2548 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2549 set_current_state(TASK_UNINTERRUPTIBLE);
2550 netif_dbg(dev, ifdown, dev->net,
2551 "waited for %d urb completions\n", temp);
2552 }
2553 set_current_state(TASK_RUNNING);
2554 dev->wait = NULL;
2555 remove_wait_queue(&unlink_wakeup, &wait);
2556}
2557
e0c79ff6 2558static int lan78xx_stop(struct net_device *net)
55d7de9d
WH
2559{
2560 struct lan78xx_net *dev = netdev_priv(net);
2561
20ff5565
WH
2562 if (timer_pending(&dev->stat_monitor))
2563 del_timer_sync(&dev->stat_monitor);
2564
02dc1f3d
WH
2565 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2566 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2567
ce85e13a
WH
2568 phy_stop(net->phydev);
2569 phy_disconnect(net->phydev);
02dc1f3d 2570
ce85e13a
WH
2571 net->phydev = NULL;
2572
55d7de9d
WH
2573 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2574 netif_stop_queue(net);
2575
2576 netif_info(dev, ifdown, dev->net,
2577 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2578 net->stats.rx_packets, net->stats.tx_packets,
2579 net->stats.rx_errors, net->stats.tx_errors);
2580
2581 lan78xx_terminate_urbs(dev);
2582
2583 usb_kill_urb(dev->urb_intr);
2584
2585 skb_queue_purge(&dev->rxq_pause);
2586
2587 /* deferred work (task, timer, softirq) must also stop.
2588 * can't flush_scheduled_work() until we drop rtnl (later),
2589 * else workers could deadlock; so make workers a NOP.
2590 */
2591 dev->flags = 0;
2592 cancel_delayed_work_sync(&dev->wq);
2593 tasklet_kill(&dev->bh);
2594
2595 usb_autopm_put_interface(dev->intf);
2596
2597 return 0;
2598}
2599
2600static int lan78xx_linearize(struct sk_buff *skb)
2601{
2602 return skb_linearize(skb);
2603}
2604
2605static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2606 struct sk_buff *skb, gfp_t flags)
2607{
2608 u32 tx_cmd_a, tx_cmd_b;
2609
d4ca7359 2610 if (skb_cow_head(skb, TX_OVERHEAD)) {
55d7de9d 2611 dev_kfree_skb_any(skb);
d4ca7359 2612 return NULL;
55d7de9d
WH
2613 }
2614
2615 if (lan78xx_linearize(skb) < 0)
2616 return NULL;
2617
2618 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2619
2620 if (skb->ip_summed == CHECKSUM_PARTIAL)
2621 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2622
2623 tx_cmd_b = 0;
2624 if (skb_is_gso(skb)) {
2625 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2626
2627 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2628
2629 tx_cmd_a |= TX_CMD_A_LSO_;
2630 }
2631
2632 if (skb_vlan_tag_present(skb)) {
2633 tx_cmd_a |= TX_CMD_A_IVTG_;
2634 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2635 }
2636
2637 skb_push(skb, 4);
2638 cpu_to_le32s(&tx_cmd_b);
2639 memcpy(skb->data, &tx_cmd_b, 4);
2640
2641 skb_push(skb, 4);
2642 cpu_to_le32s(&tx_cmd_a);
2643 memcpy(skb->data, &tx_cmd_a, 4);
2644
2645 return skb;
2646}
2647
2648static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2649 struct sk_buff_head *list, enum skb_state state)
2650{
2651 unsigned long flags;
2652 enum skb_state old_state;
2653 struct skb_data *entry = (struct skb_data *)skb->cb;
2654
2655 spin_lock_irqsave(&list->lock, flags);
2656 old_state = entry->state;
2657 entry->state = state;
55d7de9d
WH
2658
2659 __skb_unlink(skb, list);
2660 spin_unlock(&list->lock);
2661 spin_lock(&dev->done.lock);
55d7de9d
WH
2662
2663 __skb_queue_tail(&dev->done, skb);
2664 if (skb_queue_len(&dev->done) == 1)
2665 tasklet_schedule(&dev->bh);
2666 spin_unlock_irqrestore(&dev->done.lock, flags);
2667
2668 return old_state;
2669}
2670
2671static void tx_complete(struct urb *urb)
2672{
2673 struct sk_buff *skb = (struct sk_buff *)urb->context;
2674 struct skb_data *entry = (struct skb_data *)skb->cb;
2675 struct lan78xx_net *dev = entry->dev;
2676
2677 if (urb->status == 0) {
74d79a2e 2678 dev->net->stats.tx_packets += entry->num_of_packet;
55d7de9d
WH
2679 dev->net->stats.tx_bytes += entry->length;
2680 } else {
2681 dev->net->stats.tx_errors++;
2682
2683 switch (urb->status) {
2684 case -EPIPE:
2685 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2686 break;
2687
2688 /* software-driven interface shutdown */
2689 case -ECONNRESET:
2690 case -ESHUTDOWN:
2691 break;
2692
2693 case -EPROTO:
2694 case -ETIME:
2695 case -EILSEQ:
2696 netif_stop_queue(dev->net);
2697 break;
2698 default:
2699 netif_dbg(dev, tx_err, dev->net,
2700 "tx err %d\n", entry->urb->status);
2701 break;
2702 }
2703 }
2704
2705 usb_autopm_put_interface_async(dev->intf);
2706
81c38e81 2707 defer_bh(dev, skb, &dev->txq, tx_done);
55d7de9d
WH
2708}
2709
2710static void lan78xx_queue_skb(struct sk_buff_head *list,
2711 struct sk_buff *newsk, enum skb_state state)
2712{
2713 struct skb_data *entry = (struct skb_data *)newsk->cb;
2714
2715 __skb_queue_tail(list, newsk);
2716 entry->state = state;
2717}
2718
e0c79ff6
BX
2719static netdev_tx_t
2720lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
55d7de9d
WH
2721{
2722 struct lan78xx_net *dev = netdev_priv(net);
81c38e81 2723 struct sk_buff *skb2 = NULL;
55d7de9d 2724
81c38e81 2725 if (skb) {
55d7de9d 2726 skb_tx_timestamp(skb);
81c38e81
WH
2727 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2728 }
55d7de9d 2729
81c38e81
WH
2730 if (skb2) {
2731 skb_queue_tail(&dev->txq_pend, skb2);
55d7de9d 2732
4b2a4a96
WH
2733 /* throttle TX patch at slower than SUPER SPEED USB */
2734 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2735 (skb_queue_len(&dev->txq_pend) > 10))
55d7de9d
WH
2736 netif_stop_queue(net);
2737 } else {
2738 netif_dbg(dev, tx_err, dev->net,
2739 "lan78xx_tx_prep return NULL\n");
2740 dev->net->stats.tx_errors++;
2741 dev->net->stats.tx_dropped++;
2742 }
2743
2744 tasklet_schedule(&dev->bh);
2745
2746 return NETDEV_TX_OK;
2747}
2748
e0c79ff6
BX
2749static int
2750lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
55d7de9d
WH
2751{
2752 int tmp;
2753 struct usb_host_interface *alt = NULL;
2754 struct usb_host_endpoint *in = NULL, *out = NULL;
2755 struct usb_host_endpoint *status = NULL;
2756
2757 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2758 unsigned ep;
2759
2760 in = NULL;
2761 out = NULL;
2762 status = NULL;
2763 alt = intf->altsetting + tmp;
2764
2765 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2766 struct usb_host_endpoint *e;
2767 int intr = 0;
2768
2769 e = alt->endpoint + ep;
2770 switch (e->desc.bmAttributes) {
2771 case USB_ENDPOINT_XFER_INT:
2772 if (!usb_endpoint_dir_in(&e->desc))
2773 continue;
2774 intr = 1;
2775 /* FALLTHROUGH */
2776 case USB_ENDPOINT_XFER_BULK:
2777 break;
2778 default:
2779 continue;
2780 }
2781 if (usb_endpoint_dir_in(&e->desc)) {
2782 if (!intr && !in)
2783 in = e;
2784 else if (intr && !status)
2785 status = e;
2786 } else {
2787 if (!out)
2788 out = e;
2789 }
2790 }
2791 if (in && out)
2792 break;
2793 }
2794 if (!alt || !in || !out)
2795 return -EINVAL;
2796
2797 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2798 in->desc.bEndpointAddress &
2799 USB_ENDPOINT_NUMBER_MASK);
2800 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2801 out->desc.bEndpointAddress &
2802 USB_ENDPOINT_NUMBER_MASK);
2803 dev->ep_intr = status;
2804
2805 return 0;
2806}
2807
2808static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2809{
2810 struct lan78xx_priv *pdata = NULL;
2811 int ret;
2812 int i;
2813
2814 ret = lan78xx_get_endpoints(dev, intf);
2815
2816 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2817
2818 pdata = (struct lan78xx_priv *)(dev->data[0]);
2819 if (!pdata) {
2820 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2821 return -ENOMEM;
2822 }
2823
2824 pdata->dev = dev;
2825
2826 spin_lock_init(&pdata->rfe_ctl_lock);
2827 mutex_init(&pdata->dataport_mutex);
2828
2829 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2830
2831 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2832 pdata->vlan_table[i] = 0;
2833
2834 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2835
2836 dev->net->features = 0;
2837
2838 if (DEFAULT_TX_CSUM_ENABLE)
2839 dev->net->features |= NETIF_F_HW_CSUM;
2840
2841 if (DEFAULT_RX_CSUM_ENABLE)
2842 dev->net->features |= NETIF_F_RXCSUM;
2843
2844 if (DEFAULT_TSO_CSUM_ENABLE)
2845 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2846
2847 dev->net->hw_features = dev->net->features;
2848
cc89c323
WH
2849 ret = lan78xx_setup_irq_domain(dev);
2850 if (ret < 0) {
2851 netdev_warn(dev->net,
2852 "lan78xx_setup_irq_domain() failed : %d", ret);
2853 kfree(pdata);
2854 return ret;
2855 }
2856
55d7de9d
WH
2857 /* Init all registers */
2858 ret = lan78xx_reset(dev);
2859
ce85e13a
WH
2860 lan78xx_mdio_init(dev);
2861
55d7de9d
WH
2862 dev->net->flags |= IFF_MULTICAST;
2863
2864 pdata->wol = WAKE_MAGIC;
2865
2866 return 0;
2867}
2868
2869static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2870{
2871 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2872
cc89c323
WH
2873 lan78xx_remove_irq_domain(dev);
2874
ce85e13a
WH
2875 lan78xx_remove_mdio(dev);
2876
55d7de9d
WH
2877 if (pdata) {
2878 netif_dbg(dev, ifdown, dev->net, "free pdata");
2879 kfree(pdata);
2880 pdata = NULL;
2881 dev->data[0] = 0;
2882 }
2883}
2884
2885static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2886 struct sk_buff *skb,
2887 u32 rx_cmd_a, u32 rx_cmd_b)
2888{
2889 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2890 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2891 skb->ip_summed = CHECKSUM_NONE;
2892 } else {
2893 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2894 skb->ip_summed = CHECKSUM_COMPLETE;
2895 }
2896}
2897
e0c79ff6 2898static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
55d7de9d
WH
2899{
2900 int status;
2901
2902 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2903 skb_queue_tail(&dev->rxq_pause, skb);
2904 return;
2905 }
2906
55d7de9d
WH
2907 dev->net->stats.rx_packets++;
2908 dev->net->stats.rx_bytes += skb->len;
2909
74d79a2e
WH
2910 skb->protocol = eth_type_trans(skb, dev->net);
2911
55d7de9d
WH
2912 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2913 skb->len + sizeof(struct ethhdr), skb->protocol);
2914 memset(skb->cb, 0, sizeof(struct skb_data));
2915
2916 if (skb_defer_rx_timestamp(skb))
2917 return;
2918
2919 status = netif_rx(skb);
2920 if (status != NET_RX_SUCCESS)
2921 netif_dbg(dev, rx_err, dev->net,
2922 "netif_rx status %d\n", status);
2923}
2924
2925static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2926{
2927 if (skb->len < dev->net->hard_header_len)
2928 return 0;
2929
2930 while (skb->len > 0) {
2931 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2932 u16 rx_cmd_c;
2933 struct sk_buff *skb2;
2934 unsigned char *packet;
2935
2936 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2937 le32_to_cpus(&rx_cmd_a);
2938 skb_pull(skb, sizeof(rx_cmd_a));
2939
2940 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2941 le32_to_cpus(&rx_cmd_b);
2942 skb_pull(skb, sizeof(rx_cmd_b));
2943
2944 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2945 le16_to_cpus(&rx_cmd_c);
2946 skb_pull(skb, sizeof(rx_cmd_c));
2947
2948 packet = skb->data;
2949
2950 /* get the packet length */
2951 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2952 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2953
2954 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2955 netif_dbg(dev, rx_err, dev->net,
2956 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2957 } else {
2958 /* last frame in this batch */
2959 if (skb->len == size) {
2960 lan78xx_rx_csum_offload(dev, skb,
2961 rx_cmd_a, rx_cmd_b);
2962
2963 skb_trim(skb, skb->len - 4); /* remove fcs */
2964 skb->truesize = size + sizeof(struct sk_buff);
2965
2966 return 1;
2967 }
2968
2969 skb2 = skb_clone(skb, GFP_ATOMIC);
2970 if (unlikely(!skb2)) {
2971 netdev_warn(dev->net, "Error allocating skb");
2972 return 0;
2973 }
2974
2975 skb2->len = size;
2976 skb2->data = packet;
2977 skb_set_tail_pointer(skb2, size);
2978
2979 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2980
2981 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2982 skb2->truesize = size + sizeof(struct sk_buff);
2983
2984 lan78xx_skb_return(dev, skb2);
2985 }
2986
2987 skb_pull(skb, size);
2988
2989 /* padding bytes before the next frame starts */
2990 if (skb->len)
2991 skb_pull(skb, align_count);
2992 }
2993
55d7de9d
WH
2994 return 1;
2995}
2996
2997static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2998{
2999 if (!lan78xx_rx(dev, skb)) {
3000 dev->net->stats.rx_errors++;
3001 goto done;
3002 }
3003
3004 if (skb->len) {
3005 lan78xx_skb_return(dev, skb);
3006 return;
3007 }
3008
3009 netif_dbg(dev, rx_err, dev->net, "drop\n");
3010 dev->net->stats.rx_errors++;
3011done:
3012 skb_queue_tail(&dev->done, skb);
3013}
3014
3015static void rx_complete(struct urb *urb);
3016
3017static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3018{
3019 struct sk_buff *skb;
3020 struct skb_data *entry;
3021 unsigned long lockflags;
3022 size_t size = dev->rx_urb_size;
3023 int ret = 0;
3024
3025 skb = netdev_alloc_skb_ip_align(dev->net, size);
3026 if (!skb) {
3027 usb_free_urb(urb);
3028 return -ENOMEM;
3029 }
3030
3031 entry = (struct skb_data *)skb->cb;
3032 entry->urb = urb;
3033 entry->dev = dev;
3034 entry->length = 0;
3035
3036 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3037 skb->data, size, rx_complete, skb);
3038
3039 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3040
3041 if (netif_device_present(dev->net) &&
3042 netif_running(dev->net) &&
3043 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3044 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3045 ret = usb_submit_urb(urb, GFP_ATOMIC);
3046 switch (ret) {
3047 case 0:
3048 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3049 break;
3050 case -EPIPE:
3051 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3052 break;
3053 case -ENODEV:
3054 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3055 netif_device_detach(dev->net);
3056 break;
3057 case -EHOSTUNREACH:
3058 ret = -ENOLINK;
3059 break;
3060 default:
3061 netif_dbg(dev, rx_err, dev->net,
3062 "rx submit, %d\n", ret);
3063 tasklet_schedule(&dev->bh);
3064 }
3065 } else {
3066 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3067 ret = -ENOLINK;
3068 }
3069 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3070 if (ret) {
3071 dev_kfree_skb_any(skb);
3072 usb_free_urb(urb);
3073 }
3074 return ret;
3075}
3076
3077static void rx_complete(struct urb *urb)
3078{
3079 struct sk_buff *skb = (struct sk_buff *)urb->context;
3080 struct skb_data *entry = (struct skb_data *)skb->cb;
3081 struct lan78xx_net *dev = entry->dev;
3082 int urb_status = urb->status;
3083 enum skb_state state;
3084
3085 skb_put(skb, urb->actual_length);
3086 state = rx_done;
3087 entry->urb = NULL;
3088
3089 switch (urb_status) {
3090 case 0:
3091 if (skb->len < dev->net->hard_header_len) {
3092 state = rx_cleanup;
3093 dev->net->stats.rx_errors++;
3094 dev->net->stats.rx_length_errors++;
3095 netif_dbg(dev, rx_err, dev->net,
3096 "rx length %d\n", skb->len);
3097 }
3098 usb_mark_last_busy(dev->udev);
3099 break;
3100 case -EPIPE:
3101 dev->net->stats.rx_errors++;
3102 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3103 /* FALLTHROUGH */
3104 case -ECONNRESET: /* async unlink */
3105 case -ESHUTDOWN: /* hardware gone */
3106 netif_dbg(dev, ifdown, dev->net,
3107 "rx shutdown, code %d\n", urb_status);
3108 state = rx_cleanup;
3109 entry->urb = urb;
3110 urb = NULL;
3111 break;
3112 case -EPROTO:
3113 case -ETIME:
3114 case -EILSEQ:
3115 dev->net->stats.rx_errors++;
3116 state = rx_cleanup;
3117 entry->urb = urb;
3118 urb = NULL;
3119 break;
3120
3121 /* data overrun ... flush fifo? */
3122 case -EOVERFLOW:
3123 dev->net->stats.rx_over_errors++;
3124 /* FALLTHROUGH */
3125
3126 default:
3127 state = rx_cleanup;
3128 dev->net->stats.rx_errors++;
3129 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3130 break;
3131 }
3132
3133 state = defer_bh(dev, skb, &dev->rxq, state);
3134
3135 if (urb) {
3136 if (netif_running(dev->net) &&
3137 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3138 state != unlink_start) {
3139 rx_submit(dev, urb, GFP_ATOMIC);
3140 return;
3141 }
3142 usb_free_urb(urb);
3143 }
3144 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3145}
3146
3147static void lan78xx_tx_bh(struct lan78xx_net *dev)
3148{
3149 int length;
3150 struct urb *urb = NULL;
3151 struct skb_data *entry;
3152 unsigned long flags;
3153 struct sk_buff_head *tqp = &dev->txq_pend;
3154 struct sk_buff *skb, *skb2;
3155 int ret;
3156 int count, pos;
3157 int skb_totallen, pkt_cnt;
3158
3159 skb_totallen = 0;
3160 pkt_cnt = 0;
74d79a2e
WH
3161 count = 0;
3162 length = 0;
55d7de9d
WH
3163 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3164 if (skb_is_gso(skb)) {
3165 if (pkt_cnt) {
3166 /* handle previous packets first */
3167 break;
3168 }
74d79a2e
WH
3169 count = 1;
3170 length = skb->len - TX_OVERHEAD;
55d7de9d
WH
3171 skb2 = skb_dequeue(tqp);
3172 goto gso_skb;
3173 }
3174
3175 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3176 break;
3177 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3178 pkt_cnt++;
3179 }
3180
3181 /* copy to a single skb */
3182 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3183 if (!skb)
3184 goto drop;
3185
3186 skb_put(skb, skb_totallen);
3187
3188 for (count = pos = 0; count < pkt_cnt; count++) {
3189 skb2 = skb_dequeue(tqp);
3190 if (skb2) {
74d79a2e 3191 length += (skb2->len - TX_OVERHEAD);
55d7de9d
WH
3192 memcpy(skb->data + pos, skb2->data, skb2->len);
3193 pos += roundup(skb2->len, sizeof(u32));
3194 dev_kfree_skb(skb2);
55d7de9d
WH
3195 }
3196 }
3197
55d7de9d
WH
3198gso_skb:
3199 urb = usb_alloc_urb(0, GFP_ATOMIC);
d7c4e84e 3200 if (!urb)
55d7de9d 3201 goto drop;
55d7de9d
WH
3202
3203 entry = (struct skb_data *)skb->cb;
3204 entry->urb = urb;
3205 entry->dev = dev;
3206 entry->length = length;
74d79a2e 3207 entry->num_of_packet = count;
55d7de9d
WH
3208
3209 spin_lock_irqsave(&dev->txq.lock, flags);
3210 ret = usb_autopm_get_interface_async(dev->intf);
3211 if (ret < 0) {
3212 spin_unlock_irqrestore(&dev->txq.lock, flags);
3213 goto drop;
3214 }
3215
3216 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3217 skb->data, skb->len, tx_complete, skb);
3218
3219 if (length % dev->maxpacket == 0) {
3220 /* send USB_ZERO_PACKET */
3221 urb->transfer_flags |= URB_ZERO_PACKET;
3222 }
3223
3224#ifdef CONFIG_PM
3225 /* if this triggers the device is still a sleep */
3226 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3227 /* transmission will be done in resume */
3228 usb_anchor_urb(urb, &dev->deferred);
3229 /* no use to process more packets */
3230 netif_stop_queue(dev->net);
3231 usb_put_urb(urb);
3232 spin_unlock_irqrestore(&dev->txq.lock, flags);
3233 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3234 return;
3235 }
3236#endif
3237
3238 ret = usb_submit_urb(urb, GFP_ATOMIC);
3239 switch (ret) {
3240 case 0:
860e9538 3241 netif_trans_update(dev->net);
55d7de9d
WH
3242 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3243 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3244 netif_stop_queue(dev->net);
3245 break;
3246 case -EPIPE:
3247 netif_stop_queue(dev->net);
3248 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3249 usb_autopm_put_interface_async(dev->intf);
3250 break;
3251 default:
3252 usb_autopm_put_interface_async(dev->intf);
3253 netif_dbg(dev, tx_err, dev->net,
3254 "tx: submit urb err %d\n", ret);
3255 break;
3256 }
3257
3258 spin_unlock_irqrestore(&dev->txq.lock, flags);
3259
3260 if (ret) {
3261 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3262drop:
3263 dev->net->stats.tx_dropped++;
3264 if (skb)
3265 dev_kfree_skb_any(skb);
3266 usb_free_urb(urb);
3267 } else
3268 netif_dbg(dev, tx_queued, dev->net,
3269 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3270}
3271
3272static void lan78xx_rx_bh(struct lan78xx_net *dev)
3273{
3274 struct urb *urb;
3275 int i;
3276
3277 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3278 for (i = 0; i < 10; i++) {
3279 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3280 break;
3281 urb = usb_alloc_urb(0, GFP_ATOMIC);
3282 if (urb)
3283 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3284 return;
3285 }
3286
3287 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3288 tasklet_schedule(&dev->bh);
3289 }
3290 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3291 netif_wake_queue(dev->net);
3292}
3293
3294static void lan78xx_bh(unsigned long param)
3295{
3296 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3297 struct sk_buff *skb;
3298 struct skb_data *entry;
3299
55d7de9d
WH
3300 while ((skb = skb_dequeue(&dev->done))) {
3301 entry = (struct skb_data *)(skb->cb);
3302 switch (entry->state) {
3303 case rx_done:
3304 entry->state = rx_cleanup;
3305 rx_process(dev, skb);
3306 continue;
3307 case tx_done:
3308 usb_free_urb(entry->urb);
3309 dev_kfree_skb(skb);
3310 continue;
3311 case rx_cleanup:
3312 usb_free_urb(entry->urb);
3313 dev_kfree_skb(skb);
3314 continue;
3315 default:
3316 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3317 return;
3318 }
55d7de9d
WH
3319 }
3320
3321 if (netif_device_present(dev->net) && netif_running(dev->net)) {
20ff5565
WH
3322 /* reset update timer delta */
3323 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3324 dev->delta = 1;
3325 mod_timer(&dev->stat_monitor,
3326 jiffies + STAT_UPDATE_TIMER);
3327 }
3328
55d7de9d
WH
3329 if (!skb_queue_empty(&dev->txq_pend))
3330 lan78xx_tx_bh(dev);
3331
3332 if (!timer_pending(&dev->delay) &&
3333 !test_bit(EVENT_RX_HALT, &dev->flags))
3334 lan78xx_rx_bh(dev);
3335 }
3336}
3337
3338static void lan78xx_delayedwork(struct work_struct *work)
3339{
3340 int status;
3341 struct lan78xx_net *dev;
3342
3343 dev = container_of(work, struct lan78xx_net, wq.work);
3344
3345 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3346 unlink_urbs(dev, &dev->txq);
3347 status = usb_autopm_get_interface(dev->intf);
3348 if (status < 0)
3349 goto fail_pipe;
3350 status = usb_clear_halt(dev->udev, dev->pipe_out);
3351 usb_autopm_put_interface(dev->intf);
3352 if (status < 0 &&
3353 status != -EPIPE &&
3354 status != -ESHUTDOWN) {
3355 if (netif_msg_tx_err(dev))
3356fail_pipe:
3357 netdev_err(dev->net,
3358 "can't clear tx halt, status %d\n",
3359 status);
3360 } else {
3361 clear_bit(EVENT_TX_HALT, &dev->flags);
3362 if (status != -ESHUTDOWN)
3363 netif_wake_queue(dev->net);
3364 }
3365 }
3366 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3367 unlink_urbs(dev, &dev->rxq);
3368 status = usb_autopm_get_interface(dev->intf);
3369 if (status < 0)
3370 goto fail_halt;
3371 status = usb_clear_halt(dev->udev, dev->pipe_in);
3372 usb_autopm_put_interface(dev->intf);
3373 if (status < 0 &&
3374 status != -EPIPE &&
3375 status != -ESHUTDOWN) {
3376 if (netif_msg_rx_err(dev))
3377fail_halt:
3378 netdev_err(dev->net,
3379 "can't clear rx halt, status %d\n",
3380 status);
3381 } else {
3382 clear_bit(EVENT_RX_HALT, &dev->flags);
3383 tasklet_schedule(&dev->bh);
3384 }
3385 }
3386
3387 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3388 int ret = 0;
3389
3390 clear_bit(EVENT_LINK_RESET, &dev->flags);
3391 status = usb_autopm_get_interface(dev->intf);
3392 if (status < 0)
3393 goto skip_reset;
3394 if (lan78xx_link_reset(dev) < 0) {
3395 usb_autopm_put_interface(dev->intf);
3396skip_reset:
3397 netdev_info(dev->net, "link reset failed (%d)\n",
3398 ret);
3399 } else {
3400 usb_autopm_put_interface(dev->intf);
3401 }
3402 }
20ff5565
WH
3403
3404 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3405 lan78xx_update_stats(dev);
3406
3407 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3408
3409 mod_timer(&dev->stat_monitor,
3410 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3411
3412 dev->delta = min((dev->delta * 2), 50);
3413 }
55d7de9d
WH
3414}
3415
3416static void intr_complete(struct urb *urb)
3417{
3418 struct lan78xx_net *dev = urb->context;
3419 int status = urb->status;
3420
3421 switch (status) {
3422 /* success */
3423 case 0:
3424 lan78xx_status(dev, urb);
3425 break;
3426
3427 /* software-driven interface shutdown */
3428 case -ENOENT: /* urb killed */
3429 case -ESHUTDOWN: /* hardware gone */
3430 netif_dbg(dev, ifdown, dev->net,
3431 "intr shutdown, code %d\n", status);
3432 return;
3433
3434 /* NOTE: not throttling like RX/TX, since this endpoint
3435 * already polls infrequently
3436 */
3437 default:
3438 netdev_dbg(dev->net, "intr status %d\n", status);
3439 break;
3440 }
3441
3442 if (!netif_running(dev->net))
3443 return;
3444
3445 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3446 status = usb_submit_urb(urb, GFP_ATOMIC);
3447 if (status != 0)
3448 netif_err(dev, timer, dev->net,
3449 "intr resubmit --> %d\n", status);
3450}
3451
3452static void lan78xx_disconnect(struct usb_interface *intf)
3453{
3454 struct lan78xx_net *dev;
3455 struct usb_device *udev;
3456 struct net_device *net;
3457
3458 dev = usb_get_intfdata(intf);
3459 usb_set_intfdata(intf, NULL);
3460 if (!dev)
3461 return;
3462
3463 udev = interface_to_usbdev(intf);
3464
3465 net = dev->net;
3466 unregister_netdev(net);
3467
3468 cancel_delayed_work_sync(&dev->wq);
3469
3470 usb_scuttle_anchored_urbs(&dev->deferred);
3471
3472 lan78xx_unbind(dev, intf);
3473
3474 usb_kill_urb(dev->urb_intr);
3475 usb_free_urb(dev->urb_intr);
3476
3477 free_netdev(net);
3478 usb_put_dev(udev);
3479}
3480
e0c79ff6 3481static void lan78xx_tx_timeout(struct net_device *net)
55d7de9d
WH
3482{
3483 struct lan78xx_net *dev = netdev_priv(net);
3484
3485 unlink_urbs(dev, &dev->txq);
3486 tasklet_schedule(&dev->bh);
3487}
3488
3489static const struct net_device_ops lan78xx_netdev_ops = {
3490 .ndo_open = lan78xx_open,
3491 .ndo_stop = lan78xx_stop,
3492 .ndo_start_xmit = lan78xx_start_xmit,
3493 .ndo_tx_timeout = lan78xx_tx_timeout,
3494 .ndo_change_mtu = lan78xx_change_mtu,
3495 .ndo_set_mac_address = lan78xx_set_mac_addr,
3496 .ndo_validate_addr = eth_validate_addr,
3497 .ndo_do_ioctl = lan78xx_ioctl,
3498 .ndo_set_rx_mode = lan78xx_set_multicast,
3499 .ndo_set_features = lan78xx_set_features,
3500 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3501 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3502};
3503
20ff5565
WH
3504static void lan78xx_stat_monitor(unsigned long param)
3505{
3506 struct lan78xx_net *dev;
3507
3508 dev = (struct lan78xx_net *)param;
3509
3510 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3511}
3512
55d7de9d
WH
3513static int lan78xx_probe(struct usb_interface *intf,
3514 const struct usb_device_id *id)
3515{
3516 struct lan78xx_net *dev;
3517 struct net_device *netdev;
3518 struct usb_device *udev;
3519 int ret;
3520 unsigned maxp;
3521 unsigned period;
3522 u8 *buf = NULL;
3523
3524 udev = interface_to_usbdev(intf);
3525 udev = usb_get_dev(udev);
3526
3527 ret = -ENOMEM;
3528 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3529 if (!netdev) {
3530 dev_err(&intf->dev, "Error: OOM\n");
3531 goto out1;
3532 }
3533
3534 /* netdev_printk() needs this */
3535 SET_NETDEV_DEV(netdev, &intf->dev);
3536
3537 dev = netdev_priv(netdev);
3538 dev->udev = udev;
3539 dev->intf = intf;
3540 dev->net = netdev;
3541 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3542 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3543
3544 skb_queue_head_init(&dev->rxq);
3545 skb_queue_head_init(&dev->txq);
3546 skb_queue_head_init(&dev->done);
3547 skb_queue_head_init(&dev->rxq_pause);
3548 skb_queue_head_init(&dev->txq_pend);
3549 mutex_init(&dev->phy_mutex);
3550
3551 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3552 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3553 init_usb_anchor(&dev->deferred);
3554
3555 netdev->netdev_ops = &lan78xx_netdev_ops;
3556 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3557 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3558
20ff5565
WH
3559 dev->stat_monitor.function = lan78xx_stat_monitor;
3560 dev->stat_monitor.data = (unsigned long)dev;
3561 dev->delta = 1;
3562 init_timer(&dev->stat_monitor);
3563
3564 mutex_init(&dev->stats.access_lock);
3565
55d7de9d
WH
3566 ret = lan78xx_bind(dev, intf);
3567 if (ret < 0)
3568 goto out2;
3569 strcpy(netdev->name, "eth%d");
3570
3571 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3572 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3573
f77f0aee
JW
3574 /* MTU range: 68 - 9000 */
3575 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3576
55d7de9d
WH
3577 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3578 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3579 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3580
3581 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3582 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3583
3584 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3585 dev->ep_intr->desc.bEndpointAddress &
3586 USB_ENDPOINT_NUMBER_MASK);
3587 period = dev->ep_intr->desc.bInterval;
3588
3589 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3590 buf = kmalloc(maxp, GFP_KERNEL);
3591 if (buf) {
3592 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3593 if (!dev->urb_intr) {
51920830 3594 ret = -ENOMEM;
55d7de9d
WH
3595 kfree(buf);
3596 goto out3;
3597 } else {
3598 usb_fill_int_urb(dev->urb_intr, dev->udev,
3599 dev->pipe_intr, buf, maxp,
3600 intr_complete, dev, period);
3601 }
3602 }
3603
3604 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3605
3606 /* driver requires remote-wakeup capability during autosuspend. */
3607 intf->needs_remote_wakeup = 1;
3608
3609 ret = register_netdev(netdev);
3610 if (ret != 0) {
3611 netif_err(dev, probe, netdev, "couldn't register the device\n");
3612 goto out2;
3613 }
3614
3615 usb_set_intfdata(intf, dev);
3616
3617 ret = device_set_wakeup_enable(&udev->dev, true);
3618
3619 /* Default delay of 2sec has more overhead than advantage.
3620 * Set to 10sec as default.
3621 */
3622 pm_runtime_set_autosuspend_delay(&udev->dev,
3623 DEFAULT_AUTOSUSPEND_DELAY);
3624
3625 return 0;
3626
55d7de9d
WH
3627out3:
3628 lan78xx_unbind(dev, intf);
3629out2:
3630 free_netdev(netdev);
3631out1:
3632 usb_put_dev(udev);
3633
3634 return ret;
3635}
3636
3637static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3638{
3639 const u16 crc16poly = 0x8005;
3640 int i;
3641 u16 bit, crc, msb;
3642 u8 data;
3643
3644 crc = 0xFFFF;
3645 for (i = 0; i < len; i++) {
3646 data = *buf++;
3647 for (bit = 0; bit < 8; bit++) {
3648 msb = crc >> 15;
3649 crc <<= 1;
3650
3651 if (msb ^ (u16)(data & 1)) {
3652 crc ^= crc16poly;
3653 crc |= (u16)0x0001U;
3654 }
3655 data >>= 1;
3656 }
3657 }
3658
3659 return crc;
3660}
3661
3662static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3663{
3664 u32 buf;
3665 int ret;
3666 int mask_index;
3667 u16 crc;
3668 u32 temp_wucsr;
3669 u32 temp_pmt_ctl;
3670 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3671 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3672 const u8 arp_type[2] = { 0x08, 0x06 };
3673
3674 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3675 buf &= ~MAC_TX_TXEN_;
3676 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3677 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3678 buf &= ~MAC_RX_RXEN_;
3679 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3680
3681 ret = lan78xx_write_reg(dev, WUCSR, 0);
3682 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3683 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3684
3685 temp_wucsr = 0;
3686
3687 temp_pmt_ctl = 0;
3688 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3689 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3690 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3691
3692 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3693 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3694
3695 mask_index = 0;
3696 if (wol & WAKE_PHY) {
3697 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3698
3699 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3700 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3701 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3702 }
3703 if (wol & WAKE_MAGIC) {
3704 temp_wucsr |= WUCSR_MPEN_;
3705
3706 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3707 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3708 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3709 }
3710 if (wol & WAKE_BCAST) {
3711 temp_wucsr |= WUCSR_BCST_EN_;
3712
3713 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3714 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3715 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3716 }
3717 if (wol & WAKE_MCAST) {
3718 temp_wucsr |= WUCSR_WAKE_EN_;
3719
3720 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3721 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3722 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3723 WUF_CFGX_EN_ |
3724 WUF_CFGX_TYPE_MCAST_ |
3725 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3726 (crc & WUF_CFGX_CRC16_MASK_));
3727
3728 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3729 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3730 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3731 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3732 mask_index++;
3733
3734 /* for IPv6 Multicast */
3735 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3736 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3737 WUF_CFGX_EN_ |
3738 WUF_CFGX_TYPE_MCAST_ |
3739 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3740 (crc & WUF_CFGX_CRC16_MASK_));
3741
3742 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3743 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3744 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3745 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3746 mask_index++;
3747
3748 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3749 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3750 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3751 }
3752 if (wol & WAKE_UCAST) {
3753 temp_wucsr |= WUCSR_PFDA_EN_;
3754
3755 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3756 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3757 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3758 }
3759 if (wol & WAKE_ARP) {
3760 temp_wucsr |= WUCSR_WAKE_EN_;
3761
3762 /* set WUF_CFG & WUF_MASK
3763 * for packettype (offset 12,13) = ARP (0x0806)
3764 */
3765 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3766 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3767 WUF_CFGX_EN_ |
3768 WUF_CFGX_TYPE_ALL_ |
3769 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3770 (crc & WUF_CFGX_CRC16_MASK_));
3771
3772 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3773 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3774 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3775 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3776 mask_index++;
3777
3778 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3779 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3780 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3781 }
3782
3783 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3784
3785 /* when multiple WOL bits are set */
3786 if (hweight_long((unsigned long)wol) > 1) {
3787 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3788 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3789 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3790 }
3791 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3792
3793 /* clear WUPS */
3794 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3795 buf |= PMT_CTL_WUPS_MASK_;
3796 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3797
3798 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3799 buf |= MAC_RX_RXEN_;
3800 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3801
3802 return 0;
3803}
3804
e0c79ff6 3805static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
55d7de9d
WH
3806{
3807 struct lan78xx_net *dev = usb_get_intfdata(intf);
3808 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3809 u32 buf;
3810 int ret;
3811 int event;
3812
55d7de9d
WH
3813 event = message.event;
3814
3815 if (!dev->suspend_count++) {
3816 spin_lock_irq(&dev->txq.lock);
3817 /* don't autosuspend while transmitting */
3818 if ((skb_queue_len(&dev->txq) ||
3819 skb_queue_len(&dev->txq_pend)) &&
3820 PMSG_IS_AUTO(message)) {
3821 spin_unlock_irq(&dev->txq.lock);
3822 ret = -EBUSY;
3823 goto out;
3824 } else {
3825 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3826 spin_unlock_irq(&dev->txq.lock);
3827 }
3828
3829 /* stop TX & RX */
3830 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3831 buf &= ~MAC_TX_TXEN_;
3832 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3833 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3834 buf &= ~MAC_RX_RXEN_;
3835 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3836
3837 /* empty out the rx and queues */
3838 netif_device_detach(dev->net);
3839 lan78xx_terminate_urbs(dev);
3840 usb_kill_urb(dev->urb_intr);
3841
3842 /* reattach */
3843 netif_device_attach(dev->net);
3844 }
3845
3846 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
20ff5565
WH
3847 del_timer(&dev->stat_monitor);
3848
55d7de9d
WH
3849 if (PMSG_IS_AUTO(message)) {
3850 /* auto suspend (selective suspend) */
3851 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3852 buf &= ~MAC_TX_TXEN_;
3853 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3854 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3855 buf &= ~MAC_RX_RXEN_;
3856 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3857
3858 ret = lan78xx_write_reg(dev, WUCSR, 0);
3859 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3860 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3861
3862 /* set goodframe wakeup */
3863 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3864
3865 buf |= WUCSR_RFE_WAKE_EN_;
3866 buf |= WUCSR_STORE_WAKE_;
3867
3868 ret = lan78xx_write_reg(dev, WUCSR, buf);
3869
3870 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3871
3872 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3873 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3874
3875 buf |= PMT_CTL_PHY_WAKE_EN_;
3876 buf |= PMT_CTL_WOL_EN_;
3877 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3878 buf |= PMT_CTL_SUS_MODE_3_;
3879
3880 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3881
3882 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3883
3884 buf |= PMT_CTL_WUPS_MASK_;
3885
3886 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3887
3888 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3889 buf |= MAC_RX_RXEN_;
3890 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3891 } else {
3892 lan78xx_set_suspend(dev, pdata->wol);
3893 }
3894 }
3895
49d28b56 3896 ret = 0;
55d7de9d
WH
3897out:
3898 return ret;
3899}
3900
e0c79ff6 3901static int lan78xx_resume(struct usb_interface *intf)
55d7de9d
WH
3902{
3903 struct lan78xx_net *dev = usb_get_intfdata(intf);
3904 struct sk_buff *skb;
3905 struct urb *res;
3906 int ret;
3907 u32 buf;
3908
20ff5565
WH
3909 if (!timer_pending(&dev->stat_monitor)) {
3910 dev->delta = 1;
3911 mod_timer(&dev->stat_monitor,
3912 jiffies + STAT_UPDATE_TIMER);
3913 }
3914
55d7de9d
WH
3915 if (!--dev->suspend_count) {
3916 /* resume interrupt URBs */
3917 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3918 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3919
3920 spin_lock_irq(&dev->txq.lock);
3921 while ((res = usb_get_from_anchor(&dev->deferred))) {
3922 skb = (struct sk_buff *)res->context;
3923 ret = usb_submit_urb(res, GFP_ATOMIC);
3924 if (ret < 0) {
3925 dev_kfree_skb_any(skb);
3926 usb_free_urb(res);
3927 usb_autopm_put_interface_async(dev->intf);
3928 } else {
860e9538 3929 netif_trans_update(dev->net);
55d7de9d
WH
3930 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3931 }
3932 }
3933
3934 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3935 spin_unlock_irq(&dev->txq.lock);
3936
3937 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3938 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3939 netif_start_queue(dev->net);
3940 tasklet_schedule(&dev->bh);
3941 }
3942 }
3943
3944 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3945 ret = lan78xx_write_reg(dev, WUCSR, 0);
3946 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3947
3948 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3949 WUCSR2_ARP_RCD_ |
3950 WUCSR2_IPV6_TCPSYN_RCD_ |
3951 WUCSR2_IPV4_TCPSYN_RCD_);
3952
3953 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3954 WUCSR_EEE_RX_WAKE_ |
3955 WUCSR_PFDA_FR_ |
3956 WUCSR_RFE_WAKE_FR_ |
3957 WUCSR_WUFR_ |
3958 WUCSR_MPR_ |
3959 WUCSR_BCST_FR_);
3960
3961 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3962 buf |= MAC_TX_TXEN_;
3963 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3964
3965 return 0;
3966}
3967
e0c79ff6 3968static int lan78xx_reset_resume(struct usb_interface *intf)
55d7de9d
WH
3969{
3970 struct lan78xx_net *dev = usb_get_intfdata(intf);
3971
3972 lan78xx_reset(dev);
ce85e13a
WH
3973
3974 lan78xx_phy_init(dev);
3975
55d7de9d
WH
3976 return lan78xx_resume(intf);
3977}
3978
3979static const struct usb_device_id products[] = {
3980 {
3981 /* LAN7800 USB Gigabit Ethernet Device */
3982 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3983 },
3984 {
3985 /* LAN7850 USB Gigabit Ethernet Device */
3986 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3987 },
02dc1f3d
WH
3988 {
3989 /* LAN7801 USB Gigabit Ethernet Device */
3990 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
3991 },
55d7de9d
WH
3992 {},
3993};
3994MODULE_DEVICE_TABLE(usb, products);
3995
3996static struct usb_driver lan78xx_driver = {
3997 .name = DRIVER_NAME,
3998 .id_table = products,
3999 .probe = lan78xx_probe,
4000 .disconnect = lan78xx_disconnect,
4001 .suspend = lan78xx_suspend,
4002 .resume = lan78xx_resume,
4003 .reset_resume = lan78xx_reset_resume,
4004 .supports_autosuspend = 1,
4005 .disable_hub_initiated_lpm = 1,
4006};
4007
4008module_usb_driver(lan78xx_driver);
4009
4010MODULE_AUTHOR(DRIVER_AUTHOR);
4011MODULE_DESCRIPTION(DRIVER_DESC);
4012MODULE_LICENSE("GPL");