]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/usb/lan78xx.c
lan78xx: Connect phy early
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / usb / lan78xx.c
1 /*
2 * Copyright (C) 2015 Microchip Technology
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
40 #include "lan78xx.h"
41
42 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
43 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
44 #define DRIVER_NAME "lan78xx"
45 #define DRIVER_VERSION "1.0.6"
46
47 #define TX_TIMEOUT_JIFFIES (5 * HZ)
48 #define THROTTLE_JIFFIES (HZ / 8)
49 #define UNLINK_TIMEOUT_MS 3
50
51 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
52
53 #define SS_USB_PKT_SIZE (1024)
54 #define HS_USB_PKT_SIZE (512)
55 #define FS_USB_PKT_SIZE (64)
56
57 #define MAX_RX_FIFO_SIZE (12 * 1024)
58 #define MAX_TX_FIFO_SIZE (12 * 1024)
59 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
60 #define DEFAULT_BULK_IN_DELAY (0x0800)
61 #define MAX_SINGLE_PACKET_SIZE (9000)
62 #define DEFAULT_TX_CSUM_ENABLE (true)
63 #define DEFAULT_RX_CSUM_ENABLE (true)
64 #define DEFAULT_TSO_CSUM_ENABLE (true)
65 #define DEFAULT_VLAN_FILTER_ENABLE (true)
66 #define TX_OVERHEAD (8)
67 #define RXW_PADDING 2
68
69 #define LAN78XX_USB_VENDOR_ID (0x0424)
70 #define LAN7800_USB_PRODUCT_ID (0x7800)
71 #define LAN7850_USB_PRODUCT_ID (0x7850)
72 #define LAN7801_USB_PRODUCT_ID (0x7801)
73 #define LAN78XX_EEPROM_MAGIC (0x78A5)
74 #define LAN78XX_OTP_MAGIC (0x78F3)
75
76 #define MII_READ 1
77 #define MII_WRITE 0
78
79 #define EEPROM_INDICATOR (0xA5)
80 #define EEPROM_MAC_OFFSET (0x01)
81 #define MAX_EEPROM_SIZE 512
82 #define OTP_INDICATOR_1 (0xF3)
83 #define OTP_INDICATOR_2 (0xF7)
84
85 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
86 WAKE_MCAST | WAKE_BCAST | \
87 WAKE_ARP | WAKE_MAGIC)
88
89 /* USB related defines */
90 #define BULK_IN_PIPE 1
91 #define BULK_OUT_PIPE 2
92
93 /* default autosuspend delay (mSec)*/
94 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
95
96 /* statistic update interval (mSec) */
97 #define STAT_UPDATE_TIMER (1 * 1000)
98
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP (32)
101 #define INT_EP_INTEP (31)
102 #define INT_EP_OTP_WR_DONE (28)
103 #define INT_EP_EEE_TX_LPI_START (26)
104 #define INT_EP_EEE_TX_LPI_STOP (25)
105 #define INT_EP_EEE_RX_LPI (24)
106 #define INT_EP_MAC_RESET_TIMEOUT (23)
107 #define INT_EP_RDFO (22)
108 #define INT_EP_TXE (21)
109 #define INT_EP_USB_STATUS (20)
110 #define INT_EP_TX_DIS (19)
111 #define INT_EP_RX_DIS (18)
112 #define INT_EP_PHY (17)
113 #define INT_EP_DP (16)
114 #define INT_EP_MAC_ERR (15)
115 #define INT_EP_TDFU (14)
116 #define INT_EP_TDFO (13)
117 #define INT_EP_UTX (12)
118 #define INT_EP_GPIO_11 (11)
119 #define INT_EP_GPIO_10 (10)
120 #define INT_EP_GPIO_9 (9)
121 #define INT_EP_GPIO_8 (8)
122 #define INT_EP_GPIO_7 (7)
123 #define INT_EP_GPIO_6 (6)
124 #define INT_EP_GPIO_5 (5)
125 #define INT_EP_GPIO_4 (4)
126 #define INT_EP_GPIO_3 (3)
127 #define INT_EP_GPIO_2 (2)
128 #define INT_EP_GPIO_1 (1)
129 #define INT_EP_GPIO_0 (0)
130
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
132 "RX FCS Errors",
133 "RX Alignment Errors",
134 "Rx Fragment Errors",
135 "RX Jabber Errors",
136 "RX Undersize Frame Errors",
137 "RX Oversize Frame Errors",
138 "RX Dropped Frames",
139 "RX Unicast Byte Count",
140 "RX Broadcast Byte Count",
141 "RX Multicast Byte Count",
142 "RX Unicast Frames",
143 "RX Broadcast Frames",
144 "RX Multicast Frames",
145 "RX Pause Frames",
146 "RX 64 Byte Frames",
147 "RX 65 - 127 Byte Frames",
148 "RX 128 - 255 Byte Frames",
149 "RX 256 - 511 Bytes Frames",
150 "RX 512 - 1023 Byte Frames",
151 "RX 1024 - 1518 Byte Frames",
152 "RX Greater 1518 Byte Frames",
153 "EEE RX LPI Transitions",
154 "EEE RX LPI Time",
155 "TX FCS Errors",
156 "TX Excess Deferral Errors",
157 "TX Carrier Errors",
158 "TX Bad Byte Count",
159 "TX Single Collisions",
160 "TX Multiple Collisions",
161 "TX Excessive Collision",
162 "TX Late Collisions",
163 "TX Unicast Byte Count",
164 "TX Broadcast Byte Count",
165 "TX Multicast Byte Count",
166 "TX Unicast Frames",
167 "TX Broadcast Frames",
168 "TX Multicast Frames",
169 "TX Pause Frames",
170 "TX 64 Byte Frames",
171 "TX 65 - 127 Byte Frames",
172 "TX 128 - 255 Byte Frames",
173 "TX 256 - 511 Bytes Frames",
174 "TX 512 - 1023 Byte Frames",
175 "TX 1024 - 1518 Byte Frames",
176 "TX Greater 1518 Byte Frames",
177 "EEE TX LPI Transitions",
178 "EEE TX LPI Time",
179 };
180
181 struct lan78xx_statstage {
182 u32 rx_fcs_errors;
183 u32 rx_alignment_errors;
184 u32 rx_fragment_errors;
185 u32 rx_jabber_errors;
186 u32 rx_undersize_frame_errors;
187 u32 rx_oversize_frame_errors;
188 u32 rx_dropped_frames;
189 u32 rx_unicast_byte_count;
190 u32 rx_broadcast_byte_count;
191 u32 rx_multicast_byte_count;
192 u32 rx_unicast_frames;
193 u32 rx_broadcast_frames;
194 u32 rx_multicast_frames;
195 u32 rx_pause_frames;
196 u32 rx_64_byte_frames;
197 u32 rx_65_127_byte_frames;
198 u32 rx_128_255_byte_frames;
199 u32 rx_256_511_bytes_frames;
200 u32 rx_512_1023_byte_frames;
201 u32 rx_1024_1518_byte_frames;
202 u32 rx_greater_1518_byte_frames;
203 u32 eee_rx_lpi_transitions;
204 u32 eee_rx_lpi_time;
205 u32 tx_fcs_errors;
206 u32 tx_excess_deferral_errors;
207 u32 tx_carrier_errors;
208 u32 tx_bad_byte_count;
209 u32 tx_single_collisions;
210 u32 tx_multiple_collisions;
211 u32 tx_excessive_collision;
212 u32 tx_late_collisions;
213 u32 tx_unicast_byte_count;
214 u32 tx_broadcast_byte_count;
215 u32 tx_multicast_byte_count;
216 u32 tx_unicast_frames;
217 u32 tx_broadcast_frames;
218 u32 tx_multicast_frames;
219 u32 tx_pause_frames;
220 u32 tx_64_byte_frames;
221 u32 tx_65_127_byte_frames;
222 u32 tx_128_255_byte_frames;
223 u32 tx_256_511_bytes_frames;
224 u32 tx_512_1023_byte_frames;
225 u32 tx_1024_1518_byte_frames;
226 u32 tx_greater_1518_byte_frames;
227 u32 eee_tx_lpi_transitions;
228 u32 eee_tx_lpi_time;
229 };
230
231 struct lan78xx_statstage64 {
232 u64 rx_fcs_errors;
233 u64 rx_alignment_errors;
234 u64 rx_fragment_errors;
235 u64 rx_jabber_errors;
236 u64 rx_undersize_frame_errors;
237 u64 rx_oversize_frame_errors;
238 u64 rx_dropped_frames;
239 u64 rx_unicast_byte_count;
240 u64 rx_broadcast_byte_count;
241 u64 rx_multicast_byte_count;
242 u64 rx_unicast_frames;
243 u64 rx_broadcast_frames;
244 u64 rx_multicast_frames;
245 u64 rx_pause_frames;
246 u64 rx_64_byte_frames;
247 u64 rx_65_127_byte_frames;
248 u64 rx_128_255_byte_frames;
249 u64 rx_256_511_bytes_frames;
250 u64 rx_512_1023_byte_frames;
251 u64 rx_1024_1518_byte_frames;
252 u64 rx_greater_1518_byte_frames;
253 u64 eee_rx_lpi_transitions;
254 u64 eee_rx_lpi_time;
255 u64 tx_fcs_errors;
256 u64 tx_excess_deferral_errors;
257 u64 tx_carrier_errors;
258 u64 tx_bad_byte_count;
259 u64 tx_single_collisions;
260 u64 tx_multiple_collisions;
261 u64 tx_excessive_collision;
262 u64 tx_late_collisions;
263 u64 tx_unicast_byte_count;
264 u64 tx_broadcast_byte_count;
265 u64 tx_multicast_byte_count;
266 u64 tx_unicast_frames;
267 u64 tx_broadcast_frames;
268 u64 tx_multicast_frames;
269 u64 tx_pause_frames;
270 u64 tx_64_byte_frames;
271 u64 tx_65_127_byte_frames;
272 u64 tx_128_255_byte_frames;
273 u64 tx_256_511_bytes_frames;
274 u64 tx_512_1023_byte_frames;
275 u64 tx_1024_1518_byte_frames;
276 u64 tx_greater_1518_byte_frames;
277 u64 eee_tx_lpi_transitions;
278 u64 eee_tx_lpi_time;
279 };
280
281 struct lan78xx_net;
282
283 struct lan78xx_priv {
284 struct lan78xx_net *dev;
285 u32 rfe_ctl;
286 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
287 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
288 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
289 struct mutex dataport_mutex; /* for dataport access */
290 spinlock_t rfe_ctl_lock; /* for rfe register access */
291 struct work_struct set_multicast;
292 struct work_struct set_vlan;
293 u32 wol;
294 };
295
296 enum skb_state {
297 illegal = 0,
298 tx_start,
299 tx_done,
300 rx_start,
301 rx_done,
302 rx_cleanup,
303 unlink_start
304 };
305
306 struct skb_data { /* skb->cb is one of these */
307 struct urb *urb;
308 struct lan78xx_net *dev;
309 enum skb_state state;
310 size_t length;
311 int num_of_packet;
312 };
313
314 struct usb_context {
315 struct usb_ctrlrequest req;
316 struct lan78xx_net *dev;
317 };
318
319 #define EVENT_TX_HALT 0
320 #define EVENT_RX_HALT 1
321 #define EVENT_RX_MEMORY 2
322 #define EVENT_STS_SPLIT 3
323 #define EVENT_LINK_RESET 4
324 #define EVENT_RX_PAUSED 5
325 #define EVENT_DEV_WAKING 6
326 #define EVENT_DEV_ASLEEP 7
327 #define EVENT_DEV_OPEN 8
328 #define EVENT_STAT_UPDATE 9
329
330 struct statstage {
331 struct mutex access_lock; /* for stats access */
332 struct lan78xx_statstage saved;
333 struct lan78xx_statstage rollover_count;
334 struct lan78xx_statstage rollover_max;
335 struct lan78xx_statstage64 curr_stat;
336 };
337
338 struct irq_domain_data {
339 struct irq_domain *irqdomain;
340 unsigned int phyirq;
341 struct irq_chip *irqchip;
342 irq_flow_handler_t irq_handler;
343 u32 irqenable;
344 struct mutex irq_lock; /* for irq bus access */
345 };
346
347 struct lan78xx_net {
348 struct net_device *net;
349 struct usb_device *udev;
350 struct usb_interface *intf;
351 void *driver_priv;
352
353 int rx_qlen;
354 int tx_qlen;
355 struct sk_buff_head rxq;
356 struct sk_buff_head txq;
357 struct sk_buff_head done;
358 struct sk_buff_head rxq_pause;
359 struct sk_buff_head txq_pend;
360
361 struct tasklet_struct bh;
362 struct delayed_work wq;
363
364 struct usb_host_endpoint *ep_blkin;
365 struct usb_host_endpoint *ep_blkout;
366 struct usb_host_endpoint *ep_intr;
367
368 int msg_enable;
369
370 struct urb *urb_intr;
371 struct usb_anchor deferred;
372
373 struct mutex phy_mutex; /* for phy access */
374 unsigned pipe_in, pipe_out, pipe_intr;
375
376 u32 hard_mtu; /* count any extra framing */
377 size_t rx_urb_size; /* size for rx urbs */
378
379 unsigned long flags;
380
381 wait_queue_head_t *wait;
382 unsigned char suspend_count;
383
384 unsigned maxpacket;
385 struct timer_list delay;
386 struct timer_list stat_monitor;
387
388 unsigned long data[5];
389
390 int link_on;
391 u8 mdix_ctrl;
392
393 u32 chipid;
394 u32 chiprev;
395 struct mii_bus *mdiobus;
396 phy_interface_t interface;
397
398 int fc_autoneg;
399 u8 fc_request_control;
400
401 int delta;
402 struct statstage stats;
403
404 struct irq_domain_data domain_data;
405 };
406
407 /* define external phy id */
408 #define PHY_LAN8835 (0x0007C130)
409 #define PHY_KSZ9031RNX (0x00221620)
410
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
415
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
417 {
418 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
419 int ret;
420
421 if (!buf)
422 return -ENOMEM;
423
424 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425 USB_VENDOR_REQUEST_READ_REGISTER,
426 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428 if (likely(ret >= 0)) {
429 le32_to_cpus(buf);
430 *data = *buf;
431 } else {
432 netdev_warn(dev->net,
433 "Failed to read register index 0x%08x. ret = %d",
434 index, ret);
435 }
436
437 kfree(buf);
438
439 return ret;
440 }
441
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
443 {
444 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445 int ret;
446
447 if (!buf)
448 return -ENOMEM;
449
450 *buf = data;
451 cpu_to_le32s(buf);
452
453 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454 USB_VENDOR_REQUEST_WRITE_REGISTER,
455 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457 if (unlikely(ret < 0)) {
458 netdev_warn(dev->net,
459 "Failed to write register index 0x%08x. ret = %d",
460 index, ret);
461 }
462
463 kfree(buf);
464
465 return ret;
466 }
467
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469 struct lan78xx_statstage *data)
470 {
471 int ret = 0;
472 int i;
473 struct lan78xx_statstage *stats;
474 u32 *src;
475 u32 *dst;
476
477 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
478 if (!stats)
479 return -ENOMEM;
480
481 ret = usb_control_msg(dev->udev,
482 usb_rcvctrlpipe(dev->udev, 0),
483 USB_VENDOR_REQUEST_GET_STATS,
484 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
485 0,
486 0,
487 (void *)stats,
488 sizeof(*stats),
489 USB_CTRL_SET_TIMEOUT);
490 if (likely(ret >= 0)) {
491 src = (u32 *)stats;
492 dst = (u32 *)data;
493 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494 le32_to_cpus(&src[i]);
495 dst[i] = src[i];
496 }
497 } else {
498 netdev_warn(dev->net,
499 "Failed to read stat ret = 0x%x", ret);
500 }
501
502 kfree(stats);
503
504 return ret;
505 }
506
507 #define check_counter_rollover(struct1, dev_stats, member) { \
508 if (struct1->member < dev_stats.saved.member) \
509 dev_stats.rollover_count.member++; \
510 }
511
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513 struct lan78xx_statstage *stats)
514 {
515 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528 check_counter_rollover(stats, dev->stats, rx_pause_frames);
529 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542 check_counter_rollover(stats, dev->stats, tx_single_collisions);
543 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545 check_counter_rollover(stats, dev->stats, tx_late_collisions);
546 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552 check_counter_rollover(stats, dev->stats, tx_pause_frames);
553 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
562
563 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
564 }
565
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
567 {
568 u32 *p, *count, *max;
569 u64 *data;
570 int i;
571 struct lan78xx_statstage lan78xx_stats;
572
573 if (usb_autopm_get_interface(dev->intf) < 0)
574 return;
575
576 p = (u32 *)&lan78xx_stats;
577 count = (u32 *)&dev->stats.rollover_count;
578 max = (u32 *)&dev->stats.rollover_max;
579 data = (u64 *)&dev->stats.curr_stat;
580
581 mutex_lock(&dev->stats.access_lock);
582
583 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
585
586 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
588
589 mutex_unlock(&dev->stats.access_lock);
590
591 usb_autopm_put_interface(dev->intf);
592 }
593
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
596 {
597 unsigned long start_time = jiffies;
598 u32 val;
599 int ret;
600
601 do {
602 ret = lan78xx_read_reg(dev, MII_ACC, &val);
603 if (unlikely(ret < 0))
604 return -EIO;
605
606 if (!(val & MII_ACC_MII_BUSY_))
607 return 0;
608 } while (!time_after(jiffies, start_time + HZ));
609
610 return -EIO;
611 }
612
613 static inline u32 mii_access(int id, int index, int read)
614 {
615 u32 ret;
616
617 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
619 if (read)
620 ret |= MII_ACC_MII_READ_;
621 else
622 ret |= MII_ACC_MII_WRITE_;
623 ret |= MII_ACC_MII_BUSY_;
624
625 return ret;
626 }
627
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
629 {
630 unsigned long start_time = jiffies;
631 u32 val;
632 int ret;
633
634 do {
635 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636 if (unlikely(ret < 0))
637 return -EIO;
638
639 if (!(val & E2P_CMD_EPC_BUSY_) ||
640 (val & E2P_CMD_EPC_TIMEOUT_))
641 break;
642 usleep_range(40, 100);
643 } while (!time_after(jiffies, start_time + HZ));
644
645 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646 netdev_warn(dev->net, "EEPROM read operation timeout");
647 return -EIO;
648 }
649
650 return 0;
651 }
652
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
654 {
655 unsigned long start_time = jiffies;
656 u32 val;
657 int ret;
658
659 do {
660 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661 if (unlikely(ret < 0))
662 return -EIO;
663
664 if (!(val & E2P_CMD_EPC_BUSY_))
665 return 0;
666
667 usleep_range(40, 100);
668 } while (!time_after(jiffies, start_time + HZ));
669
670 netdev_warn(dev->net, "EEPROM is busy");
671 return -EIO;
672 }
673
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675 u32 length, u8 *data)
676 {
677 u32 val;
678 u32 saved;
679 int i, ret;
680 int retval;
681
682 /* depends on chip, some EEPROM pins are muxed with LED function.
683 * disable & restore LED function to access EEPROM.
684 */
685 ret = lan78xx_read_reg(dev, HW_CFG, &val);
686 saved = val;
687 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689 ret = lan78xx_write_reg(dev, HW_CFG, val);
690 }
691
692 retval = lan78xx_eeprom_confirm_not_busy(dev);
693 if (retval)
694 return retval;
695
696 for (i = 0; i < length; i++) {
697 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699 ret = lan78xx_write_reg(dev, E2P_CMD, val);
700 if (unlikely(ret < 0)) {
701 retval = -EIO;
702 goto exit;
703 }
704
705 retval = lan78xx_wait_eeprom(dev);
706 if (retval < 0)
707 goto exit;
708
709 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710 if (unlikely(ret < 0)) {
711 retval = -EIO;
712 goto exit;
713 }
714
715 data[i] = val & 0xFF;
716 offset++;
717 }
718
719 retval = 0;
720 exit:
721 if (dev->chipid == ID_REV_CHIP_ID_7800_)
722 ret = lan78xx_write_reg(dev, HW_CFG, saved);
723
724 return retval;
725 }
726
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728 u32 length, u8 *data)
729 {
730 u8 sig;
731 int ret;
732
733 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734 if ((ret == 0) && (sig == EEPROM_INDICATOR))
735 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
736 else
737 ret = -EINVAL;
738
739 return ret;
740 }
741
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743 u32 length, u8 *data)
744 {
745 u32 val;
746 u32 saved;
747 int i, ret;
748 int retval;
749
750 /* depends on chip, some EEPROM pins are muxed with LED function.
751 * disable & restore LED function to access EEPROM.
752 */
753 ret = lan78xx_read_reg(dev, HW_CFG, &val);
754 saved = val;
755 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757 ret = lan78xx_write_reg(dev, HW_CFG, val);
758 }
759
760 retval = lan78xx_eeprom_confirm_not_busy(dev);
761 if (retval)
762 goto exit;
763
764 /* Issue write/erase enable command */
765 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766 ret = lan78xx_write_reg(dev, E2P_CMD, val);
767 if (unlikely(ret < 0)) {
768 retval = -EIO;
769 goto exit;
770 }
771
772 retval = lan78xx_wait_eeprom(dev);
773 if (retval < 0)
774 goto exit;
775
776 for (i = 0; i < length; i++) {
777 /* Fill data register */
778 val = data[i];
779 ret = lan78xx_write_reg(dev, E2P_DATA, val);
780 if (ret < 0) {
781 retval = -EIO;
782 goto exit;
783 }
784
785 /* Send "write" command */
786 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788 ret = lan78xx_write_reg(dev, E2P_CMD, val);
789 if (ret < 0) {
790 retval = -EIO;
791 goto exit;
792 }
793
794 retval = lan78xx_wait_eeprom(dev);
795 if (retval < 0)
796 goto exit;
797
798 offset++;
799 }
800
801 retval = 0;
802 exit:
803 if (dev->chipid == ID_REV_CHIP_ID_7800_)
804 ret = lan78xx_write_reg(dev, HW_CFG, saved);
805
806 return retval;
807 }
808
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810 u32 length, u8 *data)
811 {
812 int i;
813 int ret;
814 u32 buf;
815 unsigned long timeout;
816
817 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
818
819 if (buf & OTP_PWR_DN_PWRDN_N_) {
820 /* clear it and wait to be cleared */
821 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
822
823 timeout = jiffies + HZ;
824 do {
825 usleep_range(1, 10);
826 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827 if (time_after(jiffies, timeout)) {
828 netdev_warn(dev->net,
829 "timeout on OTP_PWR_DN");
830 return -EIO;
831 }
832 } while (buf & OTP_PWR_DN_PWRDN_N_);
833 }
834
835 for (i = 0; i < length; i++) {
836 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837 ((offset + i) >> 8) & OTP_ADDR1_15_11);
838 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839 ((offset + i) & OTP_ADDR2_10_3));
840
841 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843
844 timeout = jiffies + HZ;
845 do {
846 udelay(1);
847 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848 if (time_after(jiffies, timeout)) {
849 netdev_warn(dev->net,
850 "timeout on OTP_STATUS");
851 return -EIO;
852 }
853 } while (buf & OTP_STATUS_BUSY_);
854
855 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
856
857 data[i] = (u8)(buf & 0xFF);
858 }
859
860 return 0;
861 }
862
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864 u32 length, u8 *data)
865 {
866 int i;
867 int ret;
868 u32 buf;
869 unsigned long timeout;
870
871 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
872
873 if (buf & OTP_PWR_DN_PWRDN_N_) {
874 /* clear it and wait to be cleared */
875 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
876
877 timeout = jiffies + HZ;
878 do {
879 udelay(1);
880 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881 if (time_after(jiffies, timeout)) {
882 netdev_warn(dev->net,
883 "timeout on OTP_PWR_DN completion");
884 return -EIO;
885 }
886 } while (buf & OTP_PWR_DN_PWRDN_N_);
887 }
888
889 /* set to BYTE program mode */
890 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
891
892 for (i = 0; i < length; i++) {
893 ret = lan78xx_write_reg(dev, OTP_ADDR1,
894 ((offset + i) >> 8) & OTP_ADDR1_15_11);
895 ret = lan78xx_write_reg(dev, OTP_ADDR2,
896 ((offset + i) & OTP_ADDR2_10_3));
897 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
900
901 timeout = jiffies + HZ;
902 do {
903 udelay(1);
904 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905 if (time_after(jiffies, timeout)) {
906 netdev_warn(dev->net,
907 "Timeout on OTP_STATUS completion");
908 return -EIO;
909 }
910 } while (buf & OTP_STATUS_BUSY_);
911 }
912
913 return 0;
914 }
915
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917 u32 length, u8 *data)
918 {
919 u8 sig;
920 int ret;
921
922 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
923
924 if (ret == 0) {
925 if (sig == OTP_INDICATOR_1)
926 offset = offset;
927 else if (sig == OTP_INDICATOR_2)
928 offset += 0x100;
929 else
930 ret = -EINVAL;
931 ret = lan78xx_read_raw_otp(dev, offset, length, data);
932 }
933
934 return ret;
935 }
936
937 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
938 {
939 int i, ret;
940
941 for (i = 0; i < 100; i++) {
942 u32 dp_sel;
943
944 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
945 if (unlikely(ret < 0))
946 return -EIO;
947
948 if (dp_sel & DP_SEL_DPRDY_)
949 return 0;
950
951 usleep_range(40, 100);
952 }
953
954 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
955
956 return -EIO;
957 }
958
959 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
960 u32 addr, u32 length, u32 *buf)
961 {
962 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
963 u32 dp_sel;
964 int i, ret;
965
966 if (usb_autopm_get_interface(dev->intf) < 0)
967 return 0;
968
969 mutex_lock(&pdata->dataport_mutex);
970
971 ret = lan78xx_dataport_wait_not_busy(dev);
972 if (ret < 0)
973 goto done;
974
975 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
976
977 dp_sel &= ~DP_SEL_RSEL_MASK_;
978 dp_sel |= ram_select;
979 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
980
981 for (i = 0; i < length; i++) {
982 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
983
984 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
985
986 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
987
988 ret = lan78xx_dataport_wait_not_busy(dev);
989 if (ret < 0)
990 goto done;
991 }
992
993 done:
994 mutex_unlock(&pdata->dataport_mutex);
995 usb_autopm_put_interface(dev->intf);
996
997 return ret;
998 }
999
1000 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1001 int index, u8 addr[ETH_ALEN])
1002 {
1003 u32 temp;
1004
1005 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1006 temp = addr[3];
1007 temp = addr[2] | (temp << 8);
1008 temp = addr[1] | (temp << 8);
1009 temp = addr[0] | (temp << 8);
1010 pdata->pfilter_table[index][1] = temp;
1011 temp = addr[5];
1012 temp = addr[4] | (temp << 8);
1013 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1014 pdata->pfilter_table[index][0] = temp;
1015 }
1016 }
1017
1018 /* returns hash bit number for given MAC address */
1019 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1020 {
1021 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1022 }
1023
1024 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1025 {
1026 struct lan78xx_priv *pdata =
1027 container_of(param, struct lan78xx_priv, set_multicast);
1028 struct lan78xx_net *dev = pdata->dev;
1029 int i;
1030 int ret;
1031
1032 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1033 pdata->rfe_ctl);
1034
1035 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1036 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1037
1038 for (i = 1; i < NUM_OF_MAF; i++) {
1039 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1040 ret = lan78xx_write_reg(dev, MAF_LO(i),
1041 pdata->pfilter_table[i][1]);
1042 ret = lan78xx_write_reg(dev, MAF_HI(i),
1043 pdata->pfilter_table[i][0]);
1044 }
1045
1046 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1047 }
1048
1049 static void lan78xx_set_multicast(struct net_device *netdev)
1050 {
1051 struct lan78xx_net *dev = netdev_priv(netdev);
1052 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1053 unsigned long flags;
1054 int i;
1055
1056 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1057
1058 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1059 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1060
1061 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1062 pdata->mchash_table[i] = 0;
1063 /* pfilter_table[0] has own HW address */
1064 for (i = 1; i < NUM_OF_MAF; i++) {
1065 pdata->pfilter_table[i][0] =
1066 pdata->pfilter_table[i][1] = 0;
1067 }
1068
1069 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1070
1071 if (dev->net->flags & IFF_PROMISC) {
1072 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1073 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1074 } else {
1075 if (dev->net->flags & IFF_ALLMULTI) {
1076 netif_dbg(dev, drv, dev->net,
1077 "receive all multicast enabled");
1078 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1079 }
1080 }
1081
1082 if (netdev_mc_count(dev->net)) {
1083 struct netdev_hw_addr *ha;
1084 int i;
1085
1086 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1087
1088 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1089
1090 i = 1;
1091 netdev_for_each_mc_addr(ha, netdev) {
1092 /* set first 32 into Perfect Filter */
1093 if (i < 33) {
1094 lan78xx_set_addr_filter(pdata, i, ha->addr);
1095 } else {
1096 u32 bitnum = lan78xx_hash(ha->addr);
1097
1098 pdata->mchash_table[bitnum / 32] |=
1099 (1 << (bitnum % 32));
1100 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1101 }
1102 i++;
1103 }
1104 }
1105
1106 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1107
1108 /* defer register writes to a sleepable context */
1109 schedule_work(&pdata->set_multicast);
1110 }
1111
1112 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1113 u16 lcladv, u16 rmtadv)
1114 {
1115 u32 flow = 0, fct_flow = 0;
1116 int ret;
1117 u8 cap;
1118
1119 if (dev->fc_autoneg)
1120 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1121 else
1122 cap = dev->fc_request_control;
1123
1124 if (cap & FLOW_CTRL_TX)
1125 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1126
1127 if (cap & FLOW_CTRL_RX)
1128 flow |= FLOW_CR_RX_FCEN_;
1129
1130 if (dev->udev->speed == USB_SPEED_SUPER)
1131 fct_flow = 0x817;
1132 else if (dev->udev->speed == USB_SPEED_HIGH)
1133 fct_flow = 0x211;
1134
1135 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1136 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1137 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1138
1139 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1140
1141 /* threshold value should be set before enabling flow */
1142 ret = lan78xx_write_reg(dev, FLOW, flow);
1143
1144 return 0;
1145 }
1146
1147 static int lan78xx_link_reset(struct lan78xx_net *dev)
1148 {
1149 struct phy_device *phydev = dev->net->phydev;
1150 struct ethtool_link_ksettings ecmd;
1151 int ladv, radv, ret;
1152 u32 buf;
1153
1154 /* clear LAN78xx interrupt status */
1155 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1156 if (unlikely(ret < 0))
1157 return -EIO;
1158
1159 phy_read_status(phydev);
1160
1161 if (!phydev->link && dev->link_on) {
1162 dev->link_on = false;
1163
1164 /* reset MAC */
1165 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1166 if (unlikely(ret < 0))
1167 return -EIO;
1168 buf |= MAC_CR_RST_;
1169 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1170 if (unlikely(ret < 0))
1171 return -EIO;
1172
1173 del_timer(&dev->stat_monitor);
1174 } else if (phydev->link && !dev->link_on) {
1175 dev->link_on = true;
1176
1177 phy_ethtool_ksettings_get(phydev, &ecmd);
1178
1179 if (dev->udev->speed == USB_SPEED_SUPER) {
1180 if (ecmd.base.speed == 1000) {
1181 /* disable U2 */
1182 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1183 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1184 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1185 /* enable U1 */
1186 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1187 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1188 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1189 } else {
1190 /* enable U1 & U2 */
1191 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1192 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1193 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1194 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1195 }
1196 }
1197
1198 ladv = phy_read(phydev, MII_ADVERTISE);
1199 if (ladv < 0)
1200 return ladv;
1201
1202 radv = phy_read(phydev, MII_LPA);
1203 if (radv < 0)
1204 return radv;
1205
1206 netif_dbg(dev, link, dev->net,
1207 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1208 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1209
1210 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1211 radv);
1212
1213 if (!timer_pending(&dev->stat_monitor)) {
1214 dev->delta = 1;
1215 mod_timer(&dev->stat_monitor,
1216 jiffies + STAT_UPDATE_TIMER);
1217 }
1218 }
1219
1220 return ret;
1221 }
1222
1223 /* some work can't be done in tasklets, so we use keventd
1224 *
1225 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1226 * but tasklet_schedule() doesn't. hope the failure is rare.
1227 */
1228 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1229 {
1230 set_bit(work, &dev->flags);
1231 if (!schedule_delayed_work(&dev->wq, 0))
1232 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1233 }
1234
1235 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1236 {
1237 u32 intdata;
1238
1239 if (urb->actual_length != 4) {
1240 netdev_warn(dev->net,
1241 "unexpected urb length %d", urb->actual_length);
1242 return;
1243 }
1244
1245 memcpy(&intdata, urb->transfer_buffer, 4);
1246 le32_to_cpus(&intdata);
1247
1248 if (intdata & INT_ENP_PHY_INT) {
1249 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1250 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1251
1252 if (dev->domain_data.phyirq > 0)
1253 generic_handle_irq(dev->domain_data.phyirq);
1254 } else
1255 netdev_warn(dev->net,
1256 "unexpected interrupt: 0x%08x\n", intdata);
1257 }
1258
1259 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1260 {
1261 return MAX_EEPROM_SIZE;
1262 }
1263
1264 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1265 struct ethtool_eeprom *ee, u8 *data)
1266 {
1267 struct lan78xx_net *dev = netdev_priv(netdev);
1268 int ret;
1269
1270 ret = usb_autopm_get_interface(dev->intf);
1271 if (ret)
1272 return ret;
1273
1274 ee->magic = LAN78XX_EEPROM_MAGIC;
1275
1276 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1277
1278 usb_autopm_put_interface(dev->intf);
1279
1280 return ret;
1281 }
1282
1283 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1284 struct ethtool_eeprom *ee, u8 *data)
1285 {
1286 struct lan78xx_net *dev = netdev_priv(netdev);
1287 int ret;
1288
1289 ret = usb_autopm_get_interface(dev->intf);
1290 if (ret)
1291 return ret;
1292
1293 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1294 * to load data from EEPROM
1295 */
1296 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1297 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1298 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1299 (ee->offset == 0) &&
1300 (ee->len == 512) &&
1301 (data[0] == OTP_INDICATOR_1))
1302 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1303
1304 usb_autopm_put_interface(dev->intf);
1305
1306 return ret;
1307 }
1308
1309 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1310 u8 *data)
1311 {
1312 if (stringset == ETH_SS_STATS)
1313 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1314 }
1315
1316 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1317 {
1318 if (sset == ETH_SS_STATS)
1319 return ARRAY_SIZE(lan78xx_gstrings);
1320 else
1321 return -EOPNOTSUPP;
1322 }
1323
1324 static void lan78xx_get_stats(struct net_device *netdev,
1325 struct ethtool_stats *stats, u64 *data)
1326 {
1327 struct lan78xx_net *dev = netdev_priv(netdev);
1328
1329 lan78xx_update_stats(dev);
1330
1331 mutex_lock(&dev->stats.access_lock);
1332 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1333 mutex_unlock(&dev->stats.access_lock);
1334 }
1335
1336 static void lan78xx_get_wol(struct net_device *netdev,
1337 struct ethtool_wolinfo *wol)
1338 {
1339 struct lan78xx_net *dev = netdev_priv(netdev);
1340 int ret;
1341 u32 buf;
1342 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1343
1344 if (usb_autopm_get_interface(dev->intf) < 0)
1345 return;
1346
1347 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1348 if (unlikely(ret < 0)) {
1349 wol->supported = 0;
1350 wol->wolopts = 0;
1351 } else {
1352 if (buf & USB_CFG_RMT_WKP_) {
1353 wol->supported = WAKE_ALL;
1354 wol->wolopts = pdata->wol;
1355 } else {
1356 wol->supported = 0;
1357 wol->wolopts = 0;
1358 }
1359 }
1360
1361 usb_autopm_put_interface(dev->intf);
1362 }
1363
1364 static int lan78xx_set_wol(struct net_device *netdev,
1365 struct ethtool_wolinfo *wol)
1366 {
1367 struct lan78xx_net *dev = netdev_priv(netdev);
1368 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1369 int ret;
1370
1371 ret = usb_autopm_get_interface(dev->intf);
1372 if (ret < 0)
1373 return ret;
1374
1375 pdata->wol = 0;
1376 if (wol->wolopts & WAKE_UCAST)
1377 pdata->wol |= WAKE_UCAST;
1378 if (wol->wolopts & WAKE_MCAST)
1379 pdata->wol |= WAKE_MCAST;
1380 if (wol->wolopts & WAKE_BCAST)
1381 pdata->wol |= WAKE_BCAST;
1382 if (wol->wolopts & WAKE_MAGIC)
1383 pdata->wol |= WAKE_MAGIC;
1384 if (wol->wolopts & WAKE_PHY)
1385 pdata->wol |= WAKE_PHY;
1386 if (wol->wolopts & WAKE_ARP)
1387 pdata->wol |= WAKE_ARP;
1388
1389 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1390
1391 phy_ethtool_set_wol(netdev->phydev, wol);
1392
1393 usb_autopm_put_interface(dev->intf);
1394
1395 return ret;
1396 }
1397
1398 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1399 {
1400 struct lan78xx_net *dev = netdev_priv(net);
1401 struct phy_device *phydev = net->phydev;
1402 int ret;
1403 u32 buf;
1404
1405 ret = usb_autopm_get_interface(dev->intf);
1406 if (ret < 0)
1407 return ret;
1408
1409 ret = phy_ethtool_get_eee(phydev, edata);
1410 if (ret < 0)
1411 goto exit;
1412
1413 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1414 if (buf & MAC_CR_EEE_EN_) {
1415 edata->eee_enabled = true;
1416 edata->eee_active = !!(edata->advertised &
1417 edata->lp_advertised);
1418 edata->tx_lpi_enabled = true;
1419 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1420 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1421 edata->tx_lpi_timer = buf;
1422 } else {
1423 edata->eee_enabled = false;
1424 edata->eee_active = false;
1425 edata->tx_lpi_enabled = false;
1426 edata->tx_lpi_timer = 0;
1427 }
1428
1429 ret = 0;
1430 exit:
1431 usb_autopm_put_interface(dev->intf);
1432
1433 return ret;
1434 }
1435
1436 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1437 {
1438 struct lan78xx_net *dev = netdev_priv(net);
1439 int ret;
1440 u32 buf;
1441
1442 ret = usb_autopm_get_interface(dev->intf);
1443 if (ret < 0)
1444 return ret;
1445
1446 if (edata->eee_enabled) {
1447 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1448 buf |= MAC_CR_EEE_EN_;
1449 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1450
1451 phy_ethtool_set_eee(net->phydev, edata);
1452
1453 buf = (u32)edata->tx_lpi_timer;
1454 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1455 } else {
1456 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1457 buf &= ~MAC_CR_EEE_EN_;
1458 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1459 }
1460
1461 usb_autopm_put_interface(dev->intf);
1462
1463 return 0;
1464 }
1465
1466 static u32 lan78xx_get_link(struct net_device *net)
1467 {
1468 phy_read_status(net->phydev);
1469
1470 return net->phydev->link;
1471 }
1472
1473 static void lan78xx_get_drvinfo(struct net_device *net,
1474 struct ethtool_drvinfo *info)
1475 {
1476 struct lan78xx_net *dev = netdev_priv(net);
1477
1478 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1479 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1480 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1481 }
1482
1483 static u32 lan78xx_get_msglevel(struct net_device *net)
1484 {
1485 struct lan78xx_net *dev = netdev_priv(net);
1486
1487 return dev->msg_enable;
1488 }
1489
1490 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1491 {
1492 struct lan78xx_net *dev = netdev_priv(net);
1493
1494 dev->msg_enable = level;
1495 }
1496
1497 static int lan78xx_get_link_ksettings(struct net_device *net,
1498 struct ethtool_link_ksettings *cmd)
1499 {
1500 struct lan78xx_net *dev = netdev_priv(net);
1501 struct phy_device *phydev = net->phydev;
1502 int ret;
1503
1504 ret = usb_autopm_get_interface(dev->intf);
1505 if (ret < 0)
1506 return ret;
1507
1508 phy_ethtool_ksettings_get(phydev, cmd);
1509
1510 usb_autopm_put_interface(dev->intf);
1511
1512 return ret;
1513 }
1514
1515 static int lan78xx_set_link_ksettings(struct net_device *net,
1516 const struct ethtool_link_ksettings *cmd)
1517 {
1518 struct lan78xx_net *dev = netdev_priv(net);
1519 struct phy_device *phydev = net->phydev;
1520 int ret = 0;
1521 int temp;
1522
1523 ret = usb_autopm_get_interface(dev->intf);
1524 if (ret < 0)
1525 return ret;
1526
1527 /* change speed & duplex */
1528 ret = phy_ethtool_ksettings_set(phydev, cmd);
1529
1530 if (!cmd->base.autoneg) {
1531 /* force link down */
1532 temp = phy_read(phydev, MII_BMCR);
1533 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1534 mdelay(1);
1535 phy_write(phydev, MII_BMCR, temp);
1536 }
1537
1538 usb_autopm_put_interface(dev->intf);
1539
1540 return ret;
1541 }
1542
1543 static void lan78xx_get_pause(struct net_device *net,
1544 struct ethtool_pauseparam *pause)
1545 {
1546 struct lan78xx_net *dev = netdev_priv(net);
1547 struct phy_device *phydev = net->phydev;
1548 struct ethtool_link_ksettings ecmd;
1549
1550 phy_ethtool_ksettings_get(phydev, &ecmd);
1551
1552 pause->autoneg = dev->fc_autoneg;
1553
1554 if (dev->fc_request_control & FLOW_CTRL_TX)
1555 pause->tx_pause = 1;
1556
1557 if (dev->fc_request_control & FLOW_CTRL_RX)
1558 pause->rx_pause = 1;
1559 }
1560
1561 static int lan78xx_set_pause(struct net_device *net,
1562 struct ethtool_pauseparam *pause)
1563 {
1564 struct lan78xx_net *dev = netdev_priv(net);
1565 struct phy_device *phydev = net->phydev;
1566 struct ethtool_link_ksettings ecmd;
1567 int ret;
1568
1569 phy_ethtool_ksettings_get(phydev, &ecmd);
1570
1571 if (pause->autoneg && !ecmd.base.autoneg) {
1572 ret = -EINVAL;
1573 goto exit;
1574 }
1575
1576 dev->fc_request_control = 0;
1577 if (pause->rx_pause)
1578 dev->fc_request_control |= FLOW_CTRL_RX;
1579
1580 if (pause->tx_pause)
1581 dev->fc_request_control |= FLOW_CTRL_TX;
1582
1583 if (ecmd.base.autoneg) {
1584 u32 mii_adv;
1585 u32 advertising;
1586
1587 ethtool_convert_link_mode_to_legacy_u32(
1588 &advertising, ecmd.link_modes.advertising);
1589
1590 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1591 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1592 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1593
1594 ethtool_convert_legacy_u32_to_link_mode(
1595 ecmd.link_modes.advertising, advertising);
1596
1597 phy_ethtool_ksettings_set(phydev, &ecmd);
1598 }
1599
1600 dev->fc_autoneg = pause->autoneg;
1601
1602 ret = 0;
1603 exit:
1604 return ret;
1605 }
1606
1607 static const struct ethtool_ops lan78xx_ethtool_ops = {
1608 .get_link = lan78xx_get_link,
1609 .nway_reset = phy_ethtool_nway_reset,
1610 .get_drvinfo = lan78xx_get_drvinfo,
1611 .get_msglevel = lan78xx_get_msglevel,
1612 .set_msglevel = lan78xx_set_msglevel,
1613 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1614 .get_eeprom = lan78xx_ethtool_get_eeprom,
1615 .set_eeprom = lan78xx_ethtool_set_eeprom,
1616 .get_ethtool_stats = lan78xx_get_stats,
1617 .get_sset_count = lan78xx_get_sset_count,
1618 .get_strings = lan78xx_get_strings,
1619 .get_wol = lan78xx_get_wol,
1620 .set_wol = lan78xx_set_wol,
1621 .get_eee = lan78xx_get_eee,
1622 .set_eee = lan78xx_set_eee,
1623 .get_pauseparam = lan78xx_get_pause,
1624 .set_pauseparam = lan78xx_set_pause,
1625 .get_link_ksettings = lan78xx_get_link_ksettings,
1626 .set_link_ksettings = lan78xx_set_link_ksettings,
1627 };
1628
1629 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1630 {
1631 if (!netif_running(netdev))
1632 return -EINVAL;
1633
1634 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1635 }
1636
1637 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1638 {
1639 u32 addr_lo, addr_hi;
1640 int ret;
1641 u8 addr[6];
1642
1643 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1644 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1645
1646 addr[0] = addr_lo & 0xFF;
1647 addr[1] = (addr_lo >> 8) & 0xFF;
1648 addr[2] = (addr_lo >> 16) & 0xFF;
1649 addr[3] = (addr_lo >> 24) & 0xFF;
1650 addr[4] = addr_hi & 0xFF;
1651 addr[5] = (addr_hi >> 8) & 0xFF;
1652
1653 if (!is_valid_ether_addr(addr)) {
1654 /* reading mac address from EEPROM or OTP */
1655 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1656 addr) == 0) ||
1657 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1658 addr) == 0)) {
1659 if (is_valid_ether_addr(addr)) {
1660 /* eeprom values are valid so use them */
1661 netif_dbg(dev, ifup, dev->net,
1662 "MAC address read from EEPROM");
1663 } else {
1664 /* generate random MAC */
1665 random_ether_addr(addr);
1666 netif_dbg(dev, ifup, dev->net,
1667 "MAC address set to random addr");
1668 }
1669
1670 addr_lo = addr[0] | (addr[1] << 8) |
1671 (addr[2] << 16) | (addr[3] << 24);
1672 addr_hi = addr[4] | (addr[5] << 8);
1673
1674 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1675 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1676 } else {
1677 /* generate random MAC */
1678 random_ether_addr(addr);
1679 netif_dbg(dev, ifup, dev->net,
1680 "MAC address set to random addr");
1681 }
1682 }
1683
1684 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1685 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1686
1687 ether_addr_copy(dev->net->dev_addr, addr);
1688 }
1689
1690 /* MDIO read and write wrappers for phylib */
1691 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1692 {
1693 struct lan78xx_net *dev = bus->priv;
1694 u32 val, addr;
1695 int ret;
1696
1697 ret = usb_autopm_get_interface(dev->intf);
1698 if (ret < 0)
1699 return ret;
1700
1701 mutex_lock(&dev->phy_mutex);
1702
1703 /* confirm MII not busy */
1704 ret = lan78xx_phy_wait_not_busy(dev);
1705 if (ret < 0)
1706 goto done;
1707
1708 /* set the address, index & direction (read from PHY) */
1709 addr = mii_access(phy_id, idx, MII_READ);
1710 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1711
1712 ret = lan78xx_phy_wait_not_busy(dev);
1713 if (ret < 0)
1714 goto done;
1715
1716 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1717
1718 ret = (int)(val & 0xFFFF);
1719
1720 done:
1721 mutex_unlock(&dev->phy_mutex);
1722 usb_autopm_put_interface(dev->intf);
1723
1724 return ret;
1725 }
1726
1727 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1728 u16 regval)
1729 {
1730 struct lan78xx_net *dev = bus->priv;
1731 u32 val, addr;
1732 int ret;
1733
1734 ret = usb_autopm_get_interface(dev->intf);
1735 if (ret < 0)
1736 return ret;
1737
1738 mutex_lock(&dev->phy_mutex);
1739
1740 /* confirm MII not busy */
1741 ret = lan78xx_phy_wait_not_busy(dev);
1742 if (ret < 0)
1743 goto done;
1744
1745 val = (u32)regval;
1746 ret = lan78xx_write_reg(dev, MII_DATA, val);
1747
1748 /* set the address, index & direction (write to PHY) */
1749 addr = mii_access(phy_id, idx, MII_WRITE);
1750 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1751
1752 ret = lan78xx_phy_wait_not_busy(dev);
1753 if (ret < 0)
1754 goto done;
1755
1756 done:
1757 mutex_unlock(&dev->phy_mutex);
1758 usb_autopm_put_interface(dev->intf);
1759 return 0;
1760 }
1761
1762 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1763 {
1764 int ret;
1765
1766 dev->mdiobus = mdiobus_alloc();
1767 if (!dev->mdiobus) {
1768 netdev_err(dev->net, "can't allocate MDIO bus\n");
1769 return -ENOMEM;
1770 }
1771
1772 dev->mdiobus->priv = (void *)dev;
1773 dev->mdiobus->read = lan78xx_mdiobus_read;
1774 dev->mdiobus->write = lan78xx_mdiobus_write;
1775 dev->mdiobus->name = "lan78xx-mdiobus";
1776
1777 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1778 dev->udev->bus->busnum, dev->udev->devnum);
1779
1780 switch (dev->chipid) {
1781 case ID_REV_CHIP_ID_7800_:
1782 case ID_REV_CHIP_ID_7850_:
1783 /* set to internal PHY id */
1784 dev->mdiobus->phy_mask = ~(1 << 1);
1785 break;
1786 case ID_REV_CHIP_ID_7801_:
1787 /* scan thru PHYAD[2..0] */
1788 dev->mdiobus->phy_mask = ~(0xFF);
1789 break;
1790 }
1791
1792 ret = mdiobus_register(dev->mdiobus);
1793 if (ret) {
1794 netdev_err(dev->net, "can't register MDIO bus\n");
1795 goto exit1;
1796 }
1797
1798 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1799 return 0;
1800 exit1:
1801 mdiobus_free(dev->mdiobus);
1802 return ret;
1803 }
1804
1805 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1806 {
1807 mdiobus_unregister(dev->mdiobus);
1808 mdiobus_free(dev->mdiobus);
1809 }
1810
1811 static void lan78xx_link_status_change(struct net_device *net)
1812 {
1813 struct phy_device *phydev = net->phydev;
1814 int ret, temp;
1815
1816 /* At forced 100 F/H mode, chip may fail to set mode correctly
1817 * when cable is switched between long(~50+m) and short one.
1818 * As workaround, set to 10 before setting to 100
1819 * at forced 100 F/H mode.
1820 */
1821 if (!phydev->autoneg && (phydev->speed == 100)) {
1822 /* disable phy interrupt */
1823 temp = phy_read(phydev, LAN88XX_INT_MASK);
1824 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1825 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1826
1827 temp = phy_read(phydev, MII_BMCR);
1828 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1829 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1830 temp |= BMCR_SPEED100;
1831 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1832
1833 /* clear pending interrupt generated while workaround */
1834 temp = phy_read(phydev, LAN88XX_INT_STS);
1835
1836 /* enable phy interrupt back */
1837 temp = phy_read(phydev, LAN88XX_INT_MASK);
1838 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1839 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1840 }
1841 }
1842
1843 static int irq_map(struct irq_domain *d, unsigned int irq,
1844 irq_hw_number_t hwirq)
1845 {
1846 struct irq_domain_data *data = d->host_data;
1847
1848 irq_set_chip_data(irq, data);
1849 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1850 irq_set_noprobe(irq);
1851
1852 return 0;
1853 }
1854
1855 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1856 {
1857 irq_set_chip_and_handler(irq, NULL, NULL);
1858 irq_set_chip_data(irq, NULL);
1859 }
1860
1861 static const struct irq_domain_ops chip_domain_ops = {
1862 .map = irq_map,
1863 .unmap = irq_unmap,
1864 };
1865
1866 static void lan78xx_irq_mask(struct irq_data *irqd)
1867 {
1868 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1869
1870 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1871 }
1872
1873 static void lan78xx_irq_unmask(struct irq_data *irqd)
1874 {
1875 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1876
1877 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1878 }
1879
1880 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1881 {
1882 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1883
1884 mutex_lock(&data->irq_lock);
1885 }
1886
1887 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1888 {
1889 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1890 struct lan78xx_net *dev =
1891 container_of(data, struct lan78xx_net, domain_data);
1892 u32 buf;
1893 int ret;
1894
1895 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1896 * are only two callbacks executed in non-atomic contex.
1897 */
1898 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1899 if (buf != data->irqenable)
1900 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1901
1902 mutex_unlock(&data->irq_lock);
1903 }
1904
1905 static struct irq_chip lan78xx_irqchip = {
1906 .name = "lan78xx-irqs",
1907 .irq_mask = lan78xx_irq_mask,
1908 .irq_unmask = lan78xx_irq_unmask,
1909 .irq_bus_lock = lan78xx_irq_bus_lock,
1910 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1911 };
1912
1913 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1914 {
1915 struct device_node *of_node;
1916 struct irq_domain *irqdomain;
1917 unsigned int irqmap = 0;
1918 u32 buf;
1919 int ret = 0;
1920
1921 of_node = dev->udev->dev.parent->of_node;
1922
1923 mutex_init(&dev->domain_data.irq_lock);
1924
1925 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1926 dev->domain_data.irqenable = buf;
1927
1928 dev->domain_data.irqchip = &lan78xx_irqchip;
1929 dev->domain_data.irq_handler = handle_simple_irq;
1930
1931 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1932 &chip_domain_ops, &dev->domain_data);
1933 if (irqdomain) {
1934 /* create mapping for PHY interrupt */
1935 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1936 if (!irqmap) {
1937 irq_domain_remove(irqdomain);
1938
1939 irqdomain = NULL;
1940 ret = -EINVAL;
1941 }
1942 } else {
1943 ret = -EINVAL;
1944 }
1945
1946 dev->domain_data.irqdomain = irqdomain;
1947 dev->domain_data.phyirq = irqmap;
1948
1949 return ret;
1950 }
1951
1952 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1953 {
1954 if (dev->domain_data.phyirq > 0) {
1955 irq_dispose_mapping(dev->domain_data.phyirq);
1956
1957 if (dev->domain_data.irqdomain)
1958 irq_domain_remove(dev->domain_data.irqdomain);
1959 }
1960 dev->domain_data.phyirq = 0;
1961 dev->domain_data.irqdomain = NULL;
1962 }
1963
1964 static int lan8835_fixup(struct phy_device *phydev)
1965 {
1966 int buf;
1967 int ret;
1968 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1969
1970 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1971 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1972 buf &= ~0x1800;
1973 buf |= 0x0800;
1974 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1975
1976 /* RGMII MAC TXC Delay Enable */
1977 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1978 MAC_RGMII_ID_TXC_DELAY_EN_);
1979
1980 /* RGMII TX DLL Tune Adjust */
1981 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1982
1983 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1984
1985 return 1;
1986 }
1987
1988 static int ksz9031rnx_fixup(struct phy_device *phydev)
1989 {
1990 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1991
1992 /* Micrel9301RNX PHY configuration */
1993 /* RGMII Control Signal Pad Skew */
1994 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1995 /* RGMII RX Data Pad Skew */
1996 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1997 /* RGMII RX Clock Pad Skew */
1998 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1999
2000 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2001
2002 return 1;
2003 }
2004
2005 static int lan78xx_phy_init(struct lan78xx_net *dev)
2006 {
2007 int ret;
2008 u32 mii_adv;
2009 struct phy_device *phydev;
2010
2011 phydev = phy_find_first(dev->mdiobus);
2012 if (!phydev) {
2013 netdev_err(dev->net, "no PHY found\n");
2014 return -EIO;
2015 }
2016
2017 if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2018 (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2019 phydev->is_internal = true;
2020 dev->interface = PHY_INTERFACE_MODE_GMII;
2021
2022 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2023 if (!phydev->drv) {
2024 netdev_err(dev->net, "no PHY driver found\n");
2025 return -EIO;
2026 }
2027
2028 dev->interface = PHY_INTERFACE_MODE_RGMII;
2029
2030 /* external PHY fixup for KSZ9031RNX */
2031 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2032 ksz9031rnx_fixup);
2033 if (ret < 0) {
2034 netdev_err(dev->net, "fail to register fixup\n");
2035 return ret;
2036 }
2037 /* external PHY fixup for LAN8835 */
2038 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2039 lan8835_fixup);
2040 if (ret < 0) {
2041 netdev_err(dev->net, "fail to register fixup\n");
2042 return ret;
2043 }
2044 /* add more external PHY fixup here if needed */
2045
2046 phydev->is_internal = false;
2047 } else {
2048 netdev_err(dev->net, "unknown ID found\n");
2049 ret = -EIO;
2050 goto error;
2051 }
2052
2053 /* if phyirq is not set, use polling mode in phylib */
2054 if (dev->domain_data.phyirq > 0)
2055 phydev->irq = dev->domain_data.phyirq;
2056 else
2057 phydev->irq = 0;
2058 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2059
2060 /* set to AUTOMDIX */
2061 phydev->mdix = ETH_TP_MDI_AUTO;
2062
2063 ret = phy_connect_direct(dev->net, phydev,
2064 lan78xx_link_status_change,
2065 dev->interface);
2066 if (ret) {
2067 netdev_err(dev->net, "can't attach PHY to %s\n",
2068 dev->mdiobus->id);
2069 return -EIO;
2070 }
2071
2072 /* MAC doesn't support 1000T Half */
2073 phydev->supported &= ~SUPPORTED_1000baseT_Half;
2074
2075 /* support both flow controls */
2076 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2077 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2078 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2079 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2080
2081 genphy_config_aneg(phydev);
2082
2083 dev->fc_autoneg = phydev->autoneg;
2084
2085 return 0;
2086
2087 error:
2088 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2089 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2090
2091 return ret;
2092 }
2093
2094 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2095 {
2096 int ret = 0;
2097 u32 buf;
2098 bool rxenabled;
2099
2100 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2101
2102 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2103
2104 if (rxenabled) {
2105 buf &= ~MAC_RX_RXEN_;
2106 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2107 }
2108
2109 /* add 4 to size for FCS */
2110 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2111 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2112
2113 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2114
2115 if (rxenabled) {
2116 buf |= MAC_RX_RXEN_;
2117 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2118 }
2119
2120 return 0;
2121 }
2122
2123 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2124 {
2125 struct sk_buff *skb;
2126 unsigned long flags;
2127 int count = 0;
2128
2129 spin_lock_irqsave(&q->lock, flags);
2130 while (!skb_queue_empty(q)) {
2131 struct skb_data *entry;
2132 struct urb *urb;
2133 int ret;
2134
2135 skb_queue_walk(q, skb) {
2136 entry = (struct skb_data *)skb->cb;
2137 if (entry->state != unlink_start)
2138 goto found;
2139 }
2140 break;
2141 found:
2142 entry->state = unlink_start;
2143 urb = entry->urb;
2144
2145 /* Get reference count of the URB to avoid it to be
2146 * freed during usb_unlink_urb, which may trigger
2147 * use-after-free problem inside usb_unlink_urb since
2148 * usb_unlink_urb is always racing with .complete
2149 * handler(include defer_bh).
2150 */
2151 usb_get_urb(urb);
2152 spin_unlock_irqrestore(&q->lock, flags);
2153 /* during some PM-driven resume scenarios,
2154 * these (async) unlinks complete immediately
2155 */
2156 ret = usb_unlink_urb(urb);
2157 if (ret != -EINPROGRESS && ret != 0)
2158 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2159 else
2160 count++;
2161 usb_put_urb(urb);
2162 spin_lock_irqsave(&q->lock, flags);
2163 }
2164 spin_unlock_irqrestore(&q->lock, flags);
2165 return count;
2166 }
2167
2168 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2169 {
2170 struct lan78xx_net *dev = netdev_priv(netdev);
2171 int ll_mtu = new_mtu + netdev->hard_header_len;
2172 int old_hard_mtu = dev->hard_mtu;
2173 int old_rx_urb_size = dev->rx_urb_size;
2174 int ret;
2175
2176 /* no second zero-length packet read wanted after mtu-sized packets */
2177 if ((ll_mtu % dev->maxpacket) == 0)
2178 return -EDOM;
2179
2180 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2181
2182 netdev->mtu = new_mtu;
2183
2184 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2185 if (dev->rx_urb_size == old_hard_mtu) {
2186 dev->rx_urb_size = dev->hard_mtu;
2187 if (dev->rx_urb_size > old_rx_urb_size) {
2188 if (netif_running(dev->net)) {
2189 unlink_urbs(dev, &dev->rxq);
2190 tasklet_schedule(&dev->bh);
2191 }
2192 }
2193 }
2194
2195 return 0;
2196 }
2197
2198 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2199 {
2200 struct lan78xx_net *dev = netdev_priv(netdev);
2201 struct sockaddr *addr = p;
2202 u32 addr_lo, addr_hi;
2203 int ret;
2204
2205 if (netif_running(netdev))
2206 return -EBUSY;
2207
2208 if (!is_valid_ether_addr(addr->sa_data))
2209 return -EADDRNOTAVAIL;
2210
2211 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2212
2213 addr_lo = netdev->dev_addr[0] |
2214 netdev->dev_addr[1] << 8 |
2215 netdev->dev_addr[2] << 16 |
2216 netdev->dev_addr[3] << 24;
2217 addr_hi = netdev->dev_addr[4] |
2218 netdev->dev_addr[5] << 8;
2219
2220 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2221 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2222
2223 return 0;
2224 }
2225
2226 /* Enable or disable Rx checksum offload engine */
2227 static int lan78xx_set_features(struct net_device *netdev,
2228 netdev_features_t features)
2229 {
2230 struct lan78xx_net *dev = netdev_priv(netdev);
2231 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2232 unsigned long flags;
2233 int ret;
2234
2235 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2236
2237 if (features & NETIF_F_RXCSUM) {
2238 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2239 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2240 } else {
2241 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2242 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2243 }
2244
2245 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2246 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2247 else
2248 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2249
2250 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2251
2252 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2253
2254 return 0;
2255 }
2256
2257 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2258 {
2259 struct lan78xx_priv *pdata =
2260 container_of(param, struct lan78xx_priv, set_vlan);
2261 struct lan78xx_net *dev = pdata->dev;
2262
2263 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2264 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2265 }
2266
2267 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2268 __be16 proto, u16 vid)
2269 {
2270 struct lan78xx_net *dev = netdev_priv(netdev);
2271 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2272 u16 vid_bit_index;
2273 u16 vid_dword_index;
2274
2275 vid_dword_index = (vid >> 5) & 0x7F;
2276 vid_bit_index = vid & 0x1F;
2277
2278 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2279
2280 /* defer register writes to a sleepable context */
2281 schedule_work(&pdata->set_vlan);
2282
2283 return 0;
2284 }
2285
2286 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2287 __be16 proto, u16 vid)
2288 {
2289 struct lan78xx_net *dev = netdev_priv(netdev);
2290 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2291 u16 vid_bit_index;
2292 u16 vid_dword_index;
2293
2294 vid_dword_index = (vid >> 5) & 0x7F;
2295 vid_bit_index = vid & 0x1F;
2296
2297 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2298
2299 /* defer register writes to a sleepable context */
2300 schedule_work(&pdata->set_vlan);
2301
2302 return 0;
2303 }
2304
2305 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2306 {
2307 int ret;
2308 u32 buf;
2309 u32 regs[6] = { 0 };
2310
2311 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2312 if (buf & USB_CFG1_LTM_ENABLE_) {
2313 u8 temp[2];
2314 /* Get values from EEPROM first */
2315 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2316 if (temp[0] == 24) {
2317 ret = lan78xx_read_raw_eeprom(dev,
2318 temp[1] * 2,
2319 24,
2320 (u8 *)regs);
2321 if (ret < 0)
2322 return;
2323 }
2324 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2325 if (temp[0] == 24) {
2326 ret = lan78xx_read_raw_otp(dev,
2327 temp[1] * 2,
2328 24,
2329 (u8 *)regs);
2330 if (ret < 0)
2331 return;
2332 }
2333 }
2334 }
2335
2336 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2337 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2338 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2339 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2340 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2341 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2342 }
2343
2344 static int lan78xx_reset(struct lan78xx_net *dev)
2345 {
2346 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2347 u32 buf;
2348 int ret = 0;
2349 unsigned long timeout;
2350 u8 sig;
2351
2352 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2353 buf |= HW_CFG_LRST_;
2354 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2355
2356 timeout = jiffies + HZ;
2357 do {
2358 mdelay(1);
2359 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2360 if (time_after(jiffies, timeout)) {
2361 netdev_warn(dev->net,
2362 "timeout on completion of LiteReset");
2363 return -EIO;
2364 }
2365 } while (buf & HW_CFG_LRST_);
2366
2367 lan78xx_init_mac_address(dev);
2368
2369 /* save DEVID for later usage */
2370 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2371 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2372 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2373
2374 /* Respond to the IN token with a NAK */
2375 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2376 buf |= USB_CFG_BIR_;
2377 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2378
2379 /* Init LTM */
2380 lan78xx_init_ltm(dev);
2381
2382 if (dev->udev->speed == USB_SPEED_SUPER) {
2383 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2384 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2385 dev->rx_qlen = 4;
2386 dev->tx_qlen = 4;
2387 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2388 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2389 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2390 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2391 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2392 } else {
2393 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2394 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2395 dev->rx_qlen = 4;
2396 dev->tx_qlen = 4;
2397 }
2398
2399 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2400 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2401
2402 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2403 buf |= HW_CFG_MEF_;
2404 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2405
2406 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2407 buf |= USB_CFG_BCE_;
2408 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2409
2410 /* set FIFO sizes */
2411 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2412 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2413
2414 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2415 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2416
2417 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2418 ret = lan78xx_write_reg(dev, FLOW, 0);
2419 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2420
2421 /* Don't need rfe_ctl_lock during initialisation */
2422 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2423 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2424 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2425
2426 /* Enable or disable checksum offload engines */
2427 lan78xx_set_features(dev->net, dev->net->features);
2428
2429 lan78xx_set_multicast(dev->net);
2430
2431 /* reset PHY */
2432 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2433 buf |= PMT_CTL_PHY_RST_;
2434 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2435
2436 timeout = jiffies + HZ;
2437 do {
2438 mdelay(1);
2439 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2440 if (time_after(jiffies, timeout)) {
2441 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2442 return -EIO;
2443 }
2444 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2445
2446 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2447 /* LAN7801 only has RGMII mode */
2448 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2449 buf &= ~MAC_CR_GMII_EN_;
2450
2451 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2452 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2453 if (!ret && sig != EEPROM_INDICATOR) {
2454 /* Implies there is no external eeprom. Set mac speed */
2455 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2456 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2457 }
2458 }
2459 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2460
2461 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2462 buf |= MAC_TX_TXEN_;
2463 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2464
2465 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2466 buf |= FCT_TX_CTL_EN_;
2467 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2468
2469 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2470
2471 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2472 buf |= MAC_RX_RXEN_;
2473 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2474
2475 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2476 buf |= FCT_RX_CTL_EN_;
2477 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2478
2479 return 0;
2480 }
2481
2482 static void lan78xx_init_stats(struct lan78xx_net *dev)
2483 {
2484 u32 *p;
2485 int i;
2486
2487 /* initialize for stats update
2488 * some counters are 20bits and some are 32bits
2489 */
2490 p = (u32 *)&dev->stats.rollover_max;
2491 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2492 p[i] = 0xFFFFF;
2493
2494 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2495 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2496 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2497 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2498 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2499 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2500 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2501 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2502 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2503 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2504
2505 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2506 }
2507
2508 static int lan78xx_open(struct net_device *net)
2509 {
2510 struct lan78xx_net *dev = netdev_priv(net);
2511 int ret;
2512
2513 ret = usb_autopm_get_interface(dev->intf);
2514 if (ret < 0)
2515 goto out;
2516
2517 ret = lan78xx_reset(dev);
2518 if (ret < 0)
2519 goto done;
2520
2521 phy_start(net->phydev);
2522
2523 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2524
2525 /* for Link Check */
2526 if (dev->urb_intr) {
2527 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2528 if (ret < 0) {
2529 netif_err(dev, ifup, dev->net,
2530 "intr submit %d\n", ret);
2531 goto done;
2532 }
2533 }
2534
2535 lan78xx_init_stats(dev);
2536
2537 set_bit(EVENT_DEV_OPEN, &dev->flags);
2538
2539 netif_start_queue(net);
2540
2541 dev->link_on = false;
2542
2543 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2544 done:
2545 usb_autopm_put_interface(dev->intf);
2546
2547 out:
2548 return ret;
2549 }
2550
2551 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2552 {
2553 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2554 DECLARE_WAITQUEUE(wait, current);
2555 int temp;
2556
2557 /* ensure there are no more active urbs */
2558 add_wait_queue(&unlink_wakeup, &wait);
2559 set_current_state(TASK_UNINTERRUPTIBLE);
2560 dev->wait = &unlink_wakeup;
2561 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2562
2563 /* maybe wait for deletions to finish. */
2564 while (!skb_queue_empty(&dev->rxq) &&
2565 !skb_queue_empty(&dev->txq) &&
2566 !skb_queue_empty(&dev->done)) {
2567 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2568 set_current_state(TASK_UNINTERRUPTIBLE);
2569 netif_dbg(dev, ifdown, dev->net,
2570 "waited for %d urb completions\n", temp);
2571 }
2572 set_current_state(TASK_RUNNING);
2573 dev->wait = NULL;
2574 remove_wait_queue(&unlink_wakeup, &wait);
2575 }
2576
2577 static int lan78xx_stop(struct net_device *net)
2578 {
2579 struct lan78xx_net *dev = netdev_priv(net);
2580
2581 if (timer_pending(&dev->stat_monitor))
2582 del_timer_sync(&dev->stat_monitor);
2583
2584 if (net->phydev)
2585 phy_stop(net->phydev);
2586
2587 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2588 netif_stop_queue(net);
2589
2590 netif_info(dev, ifdown, dev->net,
2591 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2592 net->stats.rx_packets, net->stats.tx_packets,
2593 net->stats.rx_errors, net->stats.tx_errors);
2594
2595 lan78xx_terminate_urbs(dev);
2596
2597 usb_kill_urb(dev->urb_intr);
2598
2599 skb_queue_purge(&dev->rxq_pause);
2600
2601 /* deferred work (task, timer, softirq) must also stop.
2602 * can't flush_scheduled_work() until we drop rtnl (later),
2603 * else workers could deadlock; so make workers a NOP.
2604 */
2605 dev->flags = 0;
2606 cancel_delayed_work_sync(&dev->wq);
2607 tasklet_kill(&dev->bh);
2608
2609 usb_autopm_put_interface(dev->intf);
2610
2611 return 0;
2612 }
2613
2614 static int lan78xx_linearize(struct sk_buff *skb)
2615 {
2616 return skb_linearize(skb);
2617 }
2618
2619 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2620 struct sk_buff *skb, gfp_t flags)
2621 {
2622 u32 tx_cmd_a, tx_cmd_b;
2623
2624 if (skb_cow_head(skb, TX_OVERHEAD)) {
2625 dev_kfree_skb_any(skb);
2626 return NULL;
2627 }
2628
2629 if (lan78xx_linearize(skb) < 0)
2630 return NULL;
2631
2632 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2633
2634 if (skb->ip_summed == CHECKSUM_PARTIAL)
2635 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2636
2637 tx_cmd_b = 0;
2638 if (skb_is_gso(skb)) {
2639 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2640
2641 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2642
2643 tx_cmd_a |= TX_CMD_A_LSO_;
2644 }
2645
2646 if (skb_vlan_tag_present(skb)) {
2647 tx_cmd_a |= TX_CMD_A_IVTG_;
2648 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2649 }
2650
2651 skb_push(skb, 4);
2652 cpu_to_le32s(&tx_cmd_b);
2653 memcpy(skb->data, &tx_cmd_b, 4);
2654
2655 skb_push(skb, 4);
2656 cpu_to_le32s(&tx_cmd_a);
2657 memcpy(skb->data, &tx_cmd_a, 4);
2658
2659 return skb;
2660 }
2661
2662 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2663 struct sk_buff_head *list, enum skb_state state)
2664 {
2665 unsigned long flags;
2666 enum skb_state old_state;
2667 struct skb_data *entry = (struct skb_data *)skb->cb;
2668
2669 spin_lock_irqsave(&list->lock, flags);
2670 old_state = entry->state;
2671 entry->state = state;
2672
2673 __skb_unlink(skb, list);
2674 spin_unlock(&list->lock);
2675 spin_lock(&dev->done.lock);
2676
2677 __skb_queue_tail(&dev->done, skb);
2678 if (skb_queue_len(&dev->done) == 1)
2679 tasklet_schedule(&dev->bh);
2680 spin_unlock_irqrestore(&dev->done.lock, flags);
2681
2682 return old_state;
2683 }
2684
2685 static void tx_complete(struct urb *urb)
2686 {
2687 struct sk_buff *skb = (struct sk_buff *)urb->context;
2688 struct skb_data *entry = (struct skb_data *)skb->cb;
2689 struct lan78xx_net *dev = entry->dev;
2690
2691 if (urb->status == 0) {
2692 dev->net->stats.tx_packets += entry->num_of_packet;
2693 dev->net->stats.tx_bytes += entry->length;
2694 } else {
2695 dev->net->stats.tx_errors++;
2696
2697 switch (urb->status) {
2698 case -EPIPE:
2699 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2700 break;
2701
2702 /* software-driven interface shutdown */
2703 case -ECONNRESET:
2704 case -ESHUTDOWN:
2705 break;
2706
2707 case -EPROTO:
2708 case -ETIME:
2709 case -EILSEQ:
2710 netif_stop_queue(dev->net);
2711 break;
2712 default:
2713 netif_dbg(dev, tx_err, dev->net,
2714 "tx err %d\n", entry->urb->status);
2715 break;
2716 }
2717 }
2718
2719 usb_autopm_put_interface_async(dev->intf);
2720
2721 defer_bh(dev, skb, &dev->txq, tx_done);
2722 }
2723
2724 static void lan78xx_queue_skb(struct sk_buff_head *list,
2725 struct sk_buff *newsk, enum skb_state state)
2726 {
2727 struct skb_data *entry = (struct skb_data *)newsk->cb;
2728
2729 __skb_queue_tail(list, newsk);
2730 entry->state = state;
2731 }
2732
2733 static netdev_tx_t
2734 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2735 {
2736 struct lan78xx_net *dev = netdev_priv(net);
2737 struct sk_buff *skb2 = NULL;
2738
2739 if (skb) {
2740 skb_tx_timestamp(skb);
2741 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2742 }
2743
2744 if (skb2) {
2745 skb_queue_tail(&dev->txq_pend, skb2);
2746
2747 /* throttle TX patch at slower than SUPER SPEED USB */
2748 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2749 (skb_queue_len(&dev->txq_pend) > 10))
2750 netif_stop_queue(net);
2751 } else {
2752 netif_dbg(dev, tx_err, dev->net,
2753 "lan78xx_tx_prep return NULL\n");
2754 dev->net->stats.tx_errors++;
2755 dev->net->stats.tx_dropped++;
2756 }
2757
2758 tasklet_schedule(&dev->bh);
2759
2760 return NETDEV_TX_OK;
2761 }
2762
2763 static int
2764 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2765 {
2766 int tmp;
2767 struct usb_host_interface *alt = NULL;
2768 struct usb_host_endpoint *in = NULL, *out = NULL;
2769 struct usb_host_endpoint *status = NULL;
2770
2771 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2772 unsigned ep;
2773
2774 in = NULL;
2775 out = NULL;
2776 status = NULL;
2777 alt = intf->altsetting + tmp;
2778
2779 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2780 struct usb_host_endpoint *e;
2781 int intr = 0;
2782
2783 e = alt->endpoint + ep;
2784 switch (e->desc.bmAttributes) {
2785 case USB_ENDPOINT_XFER_INT:
2786 if (!usb_endpoint_dir_in(&e->desc))
2787 continue;
2788 intr = 1;
2789 /* FALLTHROUGH */
2790 case USB_ENDPOINT_XFER_BULK:
2791 break;
2792 default:
2793 continue;
2794 }
2795 if (usb_endpoint_dir_in(&e->desc)) {
2796 if (!intr && !in)
2797 in = e;
2798 else if (intr && !status)
2799 status = e;
2800 } else {
2801 if (!out)
2802 out = e;
2803 }
2804 }
2805 if (in && out)
2806 break;
2807 }
2808 if (!alt || !in || !out)
2809 return -EINVAL;
2810
2811 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2812 in->desc.bEndpointAddress &
2813 USB_ENDPOINT_NUMBER_MASK);
2814 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2815 out->desc.bEndpointAddress &
2816 USB_ENDPOINT_NUMBER_MASK);
2817 dev->ep_intr = status;
2818
2819 return 0;
2820 }
2821
2822 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2823 {
2824 struct lan78xx_priv *pdata = NULL;
2825 int ret;
2826 int i;
2827
2828 ret = lan78xx_get_endpoints(dev, intf);
2829
2830 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2831
2832 pdata = (struct lan78xx_priv *)(dev->data[0]);
2833 if (!pdata) {
2834 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2835 return -ENOMEM;
2836 }
2837
2838 pdata->dev = dev;
2839
2840 spin_lock_init(&pdata->rfe_ctl_lock);
2841 mutex_init(&pdata->dataport_mutex);
2842
2843 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2844
2845 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2846 pdata->vlan_table[i] = 0;
2847
2848 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2849
2850 dev->net->features = 0;
2851
2852 if (DEFAULT_TX_CSUM_ENABLE)
2853 dev->net->features |= NETIF_F_HW_CSUM;
2854
2855 if (DEFAULT_RX_CSUM_ENABLE)
2856 dev->net->features |= NETIF_F_RXCSUM;
2857
2858 if (DEFAULT_TSO_CSUM_ENABLE)
2859 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2860
2861 dev->net->hw_features = dev->net->features;
2862
2863 ret = lan78xx_setup_irq_domain(dev);
2864 if (ret < 0) {
2865 netdev_warn(dev->net,
2866 "lan78xx_setup_irq_domain() failed : %d", ret);
2867 goto out1;
2868 }
2869
2870 dev->net->hard_header_len += TX_OVERHEAD;
2871 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2872
2873 /* Init all registers */
2874 ret = lan78xx_reset(dev);
2875 if (ret) {
2876 netdev_warn(dev->net, "Registers INIT FAILED....");
2877 goto out2;
2878 }
2879
2880 ret = lan78xx_mdio_init(dev);
2881 if (ret) {
2882 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2883 goto out2;
2884 }
2885
2886 dev->net->flags |= IFF_MULTICAST;
2887
2888 pdata->wol = WAKE_MAGIC;
2889
2890 return ret;
2891
2892 out2:
2893 lan78xx_remove_irq_domain(dev);
2894
2895 out1:
2896 netdev_warn(dev->net, "Bind routine FAILED");
2897 cancel_work_sync(&pdata->set_multicast);
2898 cancel_work_sync(&pdata->set_vlan);
2899 kfree(pdata);
2900 return ret;
2901 }
2902
2903 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2904 {
2905 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2906
2907 lan78xx_remove_irq_domain(dev);
2908
2909 lan78xx_remove_mdio(dev);
2910
2911 if (pdata) {
2912 cancel_work_sync(&pdata->set_multicast);
2913 cancel_work_sync(&pdata->set_vlan);
2914 netif_dbg(dev, ifdown, dev->net, "free pdata");
2915 kfree(pdata);
2916 pdata = NULL;
2917 dev->data[0] = 0;
2918 }
2919 }
2920
2921 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2922 struct sk_buff *skb,
2923 u32 rx_cmd_a, u32 rx_cmd_b)
2924 {
2925 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2926 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2927 skb->ip_summed = CHECKSUM_NONE;
2928 } else {
2929 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2930 skb->ip_summed = CHECKSUM_COMPLETE;
2931 }
2932 }
2933
2934 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2935 {
2936 int status;
2937
2938 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2939 skb_queue_tail(&dev->rxq_pause, skb);
2940 return;
2941 }
2942
2943 dev->net->stats.rx_packets++;
2944 dev->net->stats.rx_bytes += skb->len;
2945
2946 skb->protocol = eth_type_trans(skb, dev->net);
2947
2948 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2949 skb->len + sizeof(struct ethhdr), skb->protocol);
2950 memset(skb->cb, 0, sizeof(struct skb_data));
2951
2952 if (skb_defer_rx_timestamp(skb))
2953 return;
2954
2955 status = netif_rx(skb);
2956 if (status != NET_RX_SUCCESS)
2957 netif_dbg(dev, rx_err, dev->net,
2958 "netif_rx status %d\n", status);
2959 }
2960
2961 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2962 {
2963 if (skb->len < dev->net->hard_header_len)
2964 return 0;
2965
2966 while (skb->len > 0) {
2967 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2968 u16 rx_cmd_c;
2969 struct sk_buff *skb2;
2970 unsigned char *packet;
2971
2972 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2973 le32_to_cpus(&rx_cmd_a);
2974 skb_pull(skb, sizeof(rx_cmd_a));
2975
2976 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2977 le32_to_cpus(&rx_cmd_b);
2978 skb_pull(skb, sizeof(rx_cmd_b));
2979
2980 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2981 le16_to_cpus(&rx_cmd_c);
2982 skb_pull(skb, sizeof(rx_cmd_c));
2983
2984 packet = skb->data;
2985
2986 /* get the packet length */
2987 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2988 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2989
2990 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2991 netif_dbg(dev, rx_err, dev->net,
2992 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2993 } else {
2994 /* last frame in this batch */
2995 if (skb->len == size) {
2996 lan78xx_rx_csum_offload(dev, skb,
2997 rx_cmd_a, rx_cmd_b);
2998
2999 skb_trim(skb, skb->len - 4); /* remove fcs */
3000 skb->truesize = size + sizeof(struct sk_buff);
3001
3002 return 1;
3003 }
3004
3005 skb2 = skb_clone(skb, GFP_ATOMIC);
3006 if (unlikely(!skb2)) {
3007 netdev_warn(dev->net, "Error allocating skb");
3008 return 0;
3009 }
3010
3011 skb2->len = size;
3012 skb2->data = packet;
3013 skb_set_tail_pointer(skb2, size);
3014
3015 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3016
3017 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3018 skb2->truesize = size + sizeof(struct sk_buff);
3019
3020 lan78xx_skb_return(dev, skb2);
3021 }
3022
3023 skb_pull(skb, size);
3024
3025 /* padding bytes before the next frame starts */
3026 if (skb->len)
3027 skb_pull(skb, align_count);
3028 }
3029
3030 return 1;
3031 }
3032
3033 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3034 {
3035 if (!lan78xx_rx(dev, skb)) {
3036 dev->net->stats.rx_errors++;
3037 goto done;
3038 }
3039
3040 if (skb->len) {
3041 lan78xx_skb_return(dev, skb);
3042 return;
3043 }
3044
3045 netif_dbg(dev, rx_err, dev->net, "drop\n");
3046 dev->net->stats.rx_errors++;
3047 done:
3048 skb_queue_tail(&dev->done, skb);
3049 }
3050
3051 static void rx_complete(struct urb *urb);
3052
3053 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3054 {
3055 struct sk_buff *skb;
3056 struct skb_data *entry;
3057 unsigned long lockflags;
3058 size_t size = dev->rx_urb_size;
3059 int ret = 0;
3060
3061 skb = netdev_alloc_skb_ip_align(dev->net, size);
3062 if (!skb) {
3063 usb_free_urb(urb);
3064 return -ENOMEM;
3065 }
3066
3067 entry = (struct skb_data *)skb->cb;
3068 entry->urb = urb;
3069 entry->dev = dev;
3070 entry->length = 0;
3071
3072 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3073 skb->data, size, rx_complete, skb);
3074
3075 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3076
3077 if (netif_device_present(dev->net) &&
3078 netif_running(dev->net) &&
3079 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3080 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3081 ret = usb_submit_urb(urb, GFP_ATOMIC);
3082 switch (ret) {
3083 case 0:
3084 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3085 break;
3086 case -EPIPE:
3087 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3088 break;
3089 case -ENODEV:
3090 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3091 netif_device_detach(dev->net);
3092 break;
3093 case -EHOSTUNREACH:
3094 ret = -ENOLINK;
3095 break;
3096 default:
3097 netif_dbg(dev, rx_err, dev->net,
3098 "rx submit, %d\n", ret);
3099 tasklet_schedule(&dev->bh);
3100 }
3101 } else {
3102 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3103 ret = -ENOLINK;
3104 }
3105 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3106 if (ret) {
3107 dev_kfree_skb_any(skb);
3108 usb_free_urb(urb);
3109 }
3110 return ret;
3111 }
3112
3113 static void rx_complete(struct urb *urb)
3114 {
3115 struct sk_buff *skb = (struct sk_buff *)urb->context;
3116 struct skb_data *entry = (struct skb_data *)skb->cb;
3117 struct lan78xx_net *dev = entry->dev;
3118 int urb_status = urb->status;
3119 enum skb_state state;
3120
3121 skb_put(skb, urb->actual_length);
3122 state = rx_done;
3123 entry->urb = NULL;
3124
3125 switch (urb_status) {
3126 case 0:
3127 if (skb->len < dev->net->hard_header_len) {
3128 state = rx_cleanup;
3129 dev->net->stats.rx_errors++;
3130 dev->net->stats.rx_length_errors++;
3131 netif_dbg(dev, rx_err, dev->net,
3132 "rx length %d\n", skb->len);
3133 }
3134 usb_mark_last_busy(dev->udev);
3135 break;
3136 case -EPIPE:
3137 dev->net->stats.rx_errors++;
3138 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3139 /* FALLTHROUGH */
3140 case -ECONNRESET: /* async unlink */
3141 case -ESHUTDOWN: /* hardware gone */
3142 netif_dbg(dev, ifdown, dev->net,
3143 "rx shutdown, code %d\n", urb_status);
3144 state = rx_cleanup;
3145 entry->urb = urb;
3146 urb = NULL;
3147 break;
3148 case -EPROTO:
3149 case -ETIME:
3150 case -EILSEQ:
3151 dev->net->stats.rx_errors++;
3152 state = rx_cleanup;
3153 entry->urb = urb;
3154 urb = NULL;
3155 break;
3156
3157 /* data overrun ... flush fifo? */
3158 case -EOVERFLOW:
3159 dev->net->stats.rx_over_errors++;
3160 /* FALLTHROUGH */
3161
3162 default:
3163 state = rx_cleanup;
3164 dev->net->stats.rx_errors++;
3165 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3166 break;
3167 }
3168
3169 state = defer_bh(dev, skb, &dev->rxq, state);
3170
3171 if (urb) {
3172 if (netif_running(dev->net) &&
3173 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3174 state != unlink_start) {
3175 rx_submit(dev, urb, GFP_ATOMIC);
3176 return;
3177 }
3178 usb_free_urb(urb);
3179 }
3180 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3181 }
3182
3183 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3184 {
3185 int length;
3186 struct urb *urb = NULL;
3187 struct skb_data *entry;
3188 unsigned long flags;
3189 struct sk_buff_head *tqp = &dev->txq_pend;
3190 struct sk_buff *skb, *skb2;
3191 int ret;
3192 int count, pos;
3193 int skb_totallen, pkt_cnt;
3194
3195 skb_totallen = 0;
3196 pkt_cnt = 0;
3197 count = 0;
3198 length = 0;
3199 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3200 if (skb_is_gso(skb)) {
3201 if (pkt_cnt) {
3202 /* handle previous packets first */
3203 break;
3204 }
3205 count = 1;
3206 length = skb->len - TX_OVERHEAD;
3207 skb2 = skb_dequeue(tqp);
3208 goto gso_skb;
3209 }
3210
3211 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3212 break;
3213 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3214 pkt_cnt++;
3215 }
3216
3217 /* copy to a single skb */
3218 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3219 if (!skb)
3220 goto drop;
3221
3222 skb_put(skb, skb_totallen);
3223
3224 for (count = pos = 0; count < pkt_cnt; count++) {
3225 skb2 = skb_dequeue(tqp);
3226 if (skb2) {
3227 length += (skb2->len - TX_OVERHEAD);
3228 memcpy(skb->data + pos, skb2->data, skb2->len);
3229 pos += roundup(skb2->len, sizeof(u32));
3230 dev_kfree_skb(skb2);
3231 }
3232 }
3233
3234 gso_skb:
3235 urb = usb_alloc_urb(0, GFP_ATOMIC);
3236 if (!urb)
3237 goto drop;
3238
3239 entry = (struct skb_data *)skb->cb;
3240 entry->urb = urb;
3241 entry->dev = dev;
3242 entry->length = length;
3243 entry->num_of_packet = count;
3244
3245 spin_lock_irqsave(&dev->txq.lock, flags);
3246 ret = usb_autopm_get_interface_async(dev->intf);
3247 if (ret < 0) {
3248 spin_unlock_irqrestore(&dev->txq.lock, flags);
3249 goto drop;
3250 }
3251
3252 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3253 skb->data, skb->len, tx_complete, skb);
3254
3255 if (length % dev->maxpacket == 0) {
3256 /* send USB_ZERO_PACKET */
3257 urb->transfer_flags |= URB_ZERO_PACKET;
3258 }
3259
3260 #ifdef CONFIG_PM
3261 /* if this triggers the device is still a sleep */
3262 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3263 /* transmission will be done in resume */
3264 usb_anchor_urb(urb, &dev->deferred);
3265 /* no use to process more packets */
3266 netif_stop_queue(dev->net);
3267 usb_put_urb(urb);
3268 spin_unlock_irqrestore(&dev->txq.lock, flags);
3269 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3270 return;
3271 }
3272 #endif
3273
3274 ret = usb_submit_urb(urb, GFP_ATOMIC);
3275 switch (ret) {
3276 case 0:
3277 netif_trans_update(dev->net);
3278 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3279 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3280 netif_stop_queue(dev->net);
3281 break;
3282 case -EPIPE:
3283 netif_stop_queue(dev->net);
3284 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3285 usb_autopm_put_interface_async(dev->intf);
3286 break;
3287 default:
3288 usb_autopm_put_interface_async(dev->intf);
3289 netif_dbg(dev, tx_err, dev->net,
3290 "tx: submit urb err %d\n", ret);
3291 break;
3292 }
3293
3294 spin_unlock_irqrestore(&dev->txq.lock, flags);
3295
3296 if (ret) {
3297 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3298 drop:
3299 dev->net->stats.tx_dropped++;
3300 if (skb)
3301 dev_kfree_skb_any(skb);
3302 usb_free_urb(urb);
3303 } else
3304 netif_dbg(dev, tx_queued, dev->net,
3305 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3306 }
3307
3308 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3309 {
3310 struct urb *urb;
3311 int i;
3312
3313 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3314 for (i = 0; i < 10; i++) {
3315 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3316 break;
3317 urb = usb_alloc_urb(0, GFP_ATOMIC);
3318 if (urb)
3319 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3320 return;
3321 }
3322
3323 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3324 tasklet_schedule(&dev->bh);
3325 }
3326 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3327 netif_wake_queue(dev->net);
3328 }
3329
3330 static void lan78xx_bh(unsigned long param)
3331 {
3332 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3333 struct sk_buff *skb;
3334 struct skb_data *entry;
3335
3336 while ((skb = skb_dequeue(&dev->done))) {
3337 entry = (struct skb_data *)(skb->cb);
3338 switch (entry->state) {
3339 case rx_done:
3340 entry->state = rx_cleanup;
3341 rx_process(dev, skb);
3342 continue;
3343 case tx_done:
3344 usb_free_urb(entry->urb);
3345 dev_kfree_skb(skb);
3346 continue;
3347 case rx_cleanup:
3348 usb_free_urb(entry->urb);
3349 dev_kfree_skb(skb);
3350 continue;
3351 default:
3352 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3353 return;
3354 }
3355 }
3356
3357 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3358 /* reset update timer delta */
3359 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3360 dev->delta = 1;
3361 mod_timer(&dev->stat_monitor,
3362 jiffies + STAT_UPDATE_TIMER);
3363 }
3364
3365 if (!skb_queue_empty(&dev->txq_pend))
3366 lan78xx_tx_bh(dev);
3367
3368 if (!timer_pending(&dev->delay) &&
3369 !test_bit(EVENT_RX_HALT, &dev->flags))
3370 lan78xx_rx_bh(dev);
3371 }
3372 }
3373
3374 static void lan78xx_delayedwork(struct work_struct *work)
3375 {
3376 int status;
3377 struct lan78xx_net *dev;
3378
3379 dev = container_of(work, struct lan78xx_net, wq.work);
3380
3381 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3382 unlink_urbs(dev, &dev->txq);
3383 status = usb_autopm_get_interface(dev->intf);
3384 if (status < 0)
3385 goto fail_pipe;
3386 status = usb_clear_halt(dev->udev, dev->pipe_out);
3387 usb_autopm_put_interface(dev->intf);
3388 if (status < 0 &&
3389 status != -EPIPE &&
3390 status != -ESHUTDOWN) {
3391 if (netif_msg_tx_err(dev))
3392 fail_pipe:
3393 netdev_err(dev->net,
3394 "can't clear tx halt, status %d\n",
3395 status);
3396 } else {
3397 clear_bit(EVENT_TX_HALT, &dev->flags);
3398 if (status != -ESHUTDOWN)
3399 netif_wake_queue(dev->net);
3400 }
3401 }
3402 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3403 unlink_urbs(dev, &dev->rxq);
3404 status = usb_autopm_get_interface(dev->intf);
3405 if (status < 0)
3406 goto fail_halt;
3407 status = usb_clear_halt(dev->udev, dev->pipe_in);
3408 usb_autopm_put_interface(dev->intf);
3409 if (status < 0 &&
3410 status != -EPIPE &&
3411 status != -ESHUTDOWN) {
3412 if (netif_msg_rx_err(dev))
3413 fail_halt:
3414 netdev_err(dev->net,
3415 "can't clear rx halt, status %d\n",
3416 status);
3417 } else {
3418 clear_bit(EVENT_RX_HALT, &dev->flags);
3419 tasklet_schedule(&dev->bh);
3420 }
3421 }
3422
3423 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3424 int ret = 0;
3425
3426 clear_bit(EVENT_LINK_RESET, &dev->flags);
3427 status = usb_autopm_get_interface(dev->intf);
3428 if (status < 0)
3429 goto skip_reset;
3430 if (lan78xx_link_reset(dev) < 0) {
3431 usb_autopm_put_interface(dev->intf);
3432 skip_reset:
3433 netdev_info(dev->net, "link reset failed (%d)\n",
3434 ret);
3435 } else {
3436 usb_autopm_put_interface(dev->intf);
3437 }
3438 }
3439
3440 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3441 lan78xx_update_stats(dev);
3442
3443 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3444
3445 mod_timer(&dev->stat_monitor,
3446 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3447
3448 dev->delta = min((dev->delta * 2), 50);
3449 }
3450 }
3451
3452 static void intr_complete(struct urb *urb)
3453 {
3454 struct lan78xx_net *dev = urb->context;
3455 int status = urb->status;
3456
3457 switch (status) {
3458 /* success */
3459 case 0:
3460 lan78xx_status(dev, urb);
3461 break;
3462
3463 /* software-driven interface shutdown */
3464 case -ENOENT: /* urb killed */
3465 case -ESHUTDOWN: /* hardware gone */
3466 netif_dbg(dev, ifdown, dev->net,
3467 "intr shutdown, code %d\n", status);
3468 return;
3469
3470 /* NOTE: not throttling like RX/TX, since this endpoint
3471 * already polls infrequently
3472 */
3473 default:
3474 netdev_dbg(dev->net, "intr status %d\n", status);
3475 break;
3476 }
3477
3478 if (!netif_running(dev->net))
3479 return;
3480
3481 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3482 status = usb_submit_urb(urb, GFP_ATOMIC);
3483 if (status != 0)
3484 netif_err(dev, timer, dev->net,
3485 "intr resubmit --> %d\n", status);
3486 }
3487
3488 static void lan78xx_disconnect(struct usb_interface *intf)
3489 {
3490 struct lan78xx_net *dev;
3491 struct usb_device *udev;
3492 struct net_device *net;
3493
3494 dev = usb_get_intfdata(intf);
3495 usb_set_intfdata(intf, NULL);
3496 if (!dev)
3497 return;
3498
3499 udev = interface_to_usbdev(intf);
3500 net = dev->net;
3501
3502 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3503 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3504
3505 phy_disconnect(net->phydev);
3506
3507 unregister_netdev(net);
3508
3509 cancel_delayed_work_sync(&dev->wq);
3510
3511 usb_scuttle_anchored_urbs(&dev->deferred);
3512
3513 lan78xx_unbind(dev, intf);
3514
3515 usb_kill_urb(dev->urb_intr);
3516 usb_free_urb(dev->urb_intr);
3517
3518 free_netdev(net);
3519 usb_put_dev(udev);
3520 }
3521
3522 static void lan78xx_tx_timeout(struct net_device *net)
3523 {
3524 struct lan78xx_net *dev = netdev_priv(net);
3525
3526 unlink_urbs(dev, &dev->txq);
3527 tasklet_schedule(&dev->bh);
3528 }
3529
3530 static const struct net_device_ops lan78xx_netdev_ops = {
3531 .ndo_open = lan78xx_open,
3532 .ndo_stop = lan78xx_stop,
3533 .ndo_start_xmit = lan78xx_start_xmit,
3534 .ndo_tx_timeout = lan78xx_tx_timeout,
3535 .ndo_change_mtu = lan78xx_change_mtu,
3536 .ndo_set_mac_address = lan78xx_set_mac_addr,
3537 .ndo_validate_addr = eth_validate_addr,
3538 .ndo_do_ioctl = lan78xx_ioctl,
3539 .ndo_set_rx_mode = lan78xx_set_multicast,
3540 .ndo_set_features = lan78xx_set_features,
3541 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3542 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3543 };
3544
3545 static void lan78xx_stat_monitor(struct timer_list *t)
3546 {
3547 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3548
3549 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3550 }
3551
3552 static int lan78xx_probe(struct usb_interface *intf,
3553 const struct usb_device_id *id)
3554 {
3555 struct lan78xx_net *dev;
3556 struct net_device *netdev;
3557 struct usb_device *udev;
3558 int ret;
3559 unsigned maxp;
3560 unsigned period;
3561 u8 *buf = NULL;
3562
3563 udev = interface_to_usbdev(intf);
3564 udev = usb_get_dev(udev);
3565
3566 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3567 if (!netdev) {
3568 dev_err(&intf->dev, "Error: OOM\n");
3569 ret = -ENOMEM;
3570 goto out1;
3571 }
3572
3573 /* netdev_printk() needs this */
3574 SET_NETDEV_DEV(netdev, &intf->dev);
3575
3576 dev = netdev_priv(netdev);
3577 dev->udev = udev;
3578 dev->intf = intf;
3579 dev->net = netdev;
3580 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3581 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3582
3583 skb_queue_head_init(&dev->rxq);
3584 skb_queue_head_init(&dev->txq);
3585 skb_queue_head_init(&dev->done);
3586 skb_queue_head_init(&dev->rxq_pause);
3587 skb_queue_head_init(&dev->txq_pend);
3588 mutex_init(&dev->phy_mutex);
3589
3590 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3591 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3592 init_usb_anchor(&dev->deferred);
3593
3594 netdev->netdev_ops = &lan78xx_netdev_ops;
3595 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3596 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3597
3598 dev->delta = 1;
3599 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3600
3601 mutex_init(&dev->stats.access_lock);
3602
3603 ret = lan78xx_bind(dev, intf);
3604 if (ret < 0)
3605 goto out2;
3606 strcpy(netdev->name, "eth%d");
3607
3608 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3609 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3610
3611 /* MTU range: 68 - 9000 */
3612 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3613
3614 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3615 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3616 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3617
3618 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3619 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3620
3621 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3622 dev->ep_intr->desc.bEndpointAddress &
3623 USB_ENDPOINT_NUMBER_MASK);
3624 period = dev->ep_intr->desc.bInterval;
3625
3626 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3627 buf = kmalloc(maxp, GFP_KERNEL);
3628 if (buf) {
3629 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3630 if (!dev->urb_intr) {
3631 ret = -ENOMEM;
3632 kfree(buf);
3633 goto out3;
3634 } else {
3635 usb_fill_int_urb(dev->urb_intr, dev->udev,
3636 dev->pipe_intr, buf, maxp,
3637 intr_complete, dev, period);
3638 }
3639 }
3640
3641 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3642
3643 /* driver requires remote-wakeup capability during autosuspend. */
3644 intf->needs_remote_wakeup = 1;
3645
3646 ret = register_netdev(netdev);
3647 if (ret != 0) {
3648 netif_err(dev, probe, netdev, "couldn't register the device\n");
3649 goto out3;
3650 }
3651
3652 usb_set_intfdata(intf, dev);
3653
3654 ret = device_set_wakeup_enable(&udev->dev, true);
3655
3656 /* Default delay of 2sec has more overhead than advantage.
3657 * Set to 10sec as default.
3658 */
3659 pm_runtime_set_autosuspend_delay(&udev->dev,
3660 DEFAULT_AUTOSUSPEND_DELAY);
3661
3662 ret = lan78xx_phy_init(dev);
3663 if (ret < 0)
3664 goto out4;
3665
3666 return 0;
3667
3668 out4:
3669 unregister_netdev(netdev);
3670 out3:
3671 lan78xx_unbind(dev, intf);
3672 out2:
3673 free_netdev(netdev);
3674 out1:
3675 usb_put_dev(udev);
3676
3677 return ret;
3678 }
3679
3680 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3681 {
3682 const u16 crc16poly = 0x8005;
3683 int i;
3684 u16 bit, crc, msb;
3685 u8 data;
3686
3687 crc = 0xFFFF;
3688 for (i = 0; i < len; i++) {
3689 data = *buf++;
3690 for (bit = 0; bit < 8; bit++) {
3691 msb = crc >> 15;
3692 crc <<= 1;
3693
3694 if (msb ^ (u16)(data & 1)) {
3695 crc ^= crc16poly;
3696 crc |= (u16)0x0001U;
3697 }
3698 data >>= 1;
3699 }
3700 }
3701
3702 return crc;
3703 }
3704
3705 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3706 {
3707 u32 buf;
3708 int ret;
3709 int mask_index;
3710 u16 crc;
3711 u32 temp_wucsr;
3712 u32 temp_pmt_ctl;
3713 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3714 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3715 const u8 arp_type[2] = { 0x08, 0x06 };
3716
3717 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3718 buf &= ~MAC_TX_TXEN_;
3719 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3720 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3721 buf &= ~MAC_RX_RXEN_;
3722 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3723
3724 ret = lan78xx_write_reg(dev, WUCSR, 0);
3725 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3726 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3727
3728 temp_wucsr = 0;
3729
3730 temp_pmt_ctl = 0;
3731 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3732 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3733 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3734
3735 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3736 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3737
3738 mask_index = 0;
3739 if (wol & WAKE_PHY) {
3740 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3741
3742 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3743 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3744 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3745 }
3746 if (wol & WAKE_MAGIC) {
3747 temp_wucsr |= WUCSR_MPEN_;
3748
3749 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3750 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3751 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3752 }
3753 if (wol & WAKE_BCAST) {
3754 temp_wucsr |= WUCSR_BCST_EN_;
3755
3756 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3757 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3758 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3759 }
3760 if (wol & WAKE_MCAST) {
3761 temp_wucsr |= WUCSR_WAKE_EN_;
3762
3763 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3764 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3765 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3766 WUF_CFGX_EN_ |
3767 WUF_CFGX_TYPE_MCAST_ |
3768 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3769 (crc & WUF_CFGX_CRC16_MASK_));
3770
3771 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3772 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3773 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3774 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3775 mask_index++;
3776
3777 /* for IPv6 Multicast */
3778 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3779 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3780 WUF_CFGX_EN_ |
3781 WUF_CFGX_TYPE_MCAST_ |
3782 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3783 (crc & WUF_CFGX_CRC16_MASK_));
3784
3785 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3786 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3787 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3788 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3789 mask_index++;
3790
3791 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3792 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3793 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3794 }
3795 if (wol & WAKE_UCAST) {
3796 temp_wucsr |= WUCSR_PFDA_EN_;
3797
3798 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3799 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3800 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3801 }
3802 if (wol & WAKE_ARP) {
3803 temp_wucsr |= WUCSR_WAKE_EN_;
3804
3805 /* set WUF_CFG & WUF_MASK
3806 * for packettype (offset 12,13) = ARP (0x0806)
3807 */
3808 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3809 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3810 WUF_CFGX_EN_ |
3811 WUF_CFGX_TYPE_ALL_ |
3812 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3813 (crc & WUF_CFGX_CRC16_MASK_));
3814
3815 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3816 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3817 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3818 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3819 mask_index++;
3820
3821 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3822 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3823 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3824 }
3825
3826 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3827
3828 /* when multiple WOL bits are set */
3829 if (hweight_long((unsigned long)wol) > 1) {
3830 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3831 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3832 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3833 }
3834 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3835
3836 /* clear WUPS */
3837 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3838 buf |= PMT_CTL_WUPS_MASK_;
3839 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3840
3841 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3842 buf |= MAC_RX_RXEN_;
3843 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3844
3845 return 0;
3846 }
3847
3848 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3849 {
3850 struct lan78xx_net *dev = usb_get_intfdata(intf);
3851 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3852 u32 buf;
3853 int ret;
3854 int event;
3855
3856 event = message.event;
3857
3858 if (!dev->suspend_count++) {
3859 spin_lock_irq(&dev->txq.lock);
3860 /* don't autosuspend while transmitting */
3861 if ((skb_queue_len(&dev->txq) ||
3862 skb_queue_len(&dev->txq_pend)) &&
3863 PMSG_IS_AUTO(message)) {
3864 spin_unlock_irq(&dev->txq.lock);
3865 ret = -EBUSY;
3866 goto out;
3867 } else {
3868 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3869 spin_unlock_irq(&dev->txq.lock);
3870 }
3871
3872 /* stop TX & RX */
3873 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3874 buf &= ~MAC_TX_TXEN_;
3875 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3876 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3877 buf &= ~MAC_RX_RXEN_;
3878 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3879
3880 /* empty out the rx and queues */
3881 netif_device_detach(dev->net);
3882 lan78xx_terminate_urbs(dev);
3883 usb_kill_urb(dev->urb_intr);
3884
3885 /* reattach */
3886 netif_device_attach(dev->net);
3887 }
3888
3889 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3890 del_timer(&dev->stat_monitor);
3891
3892 if (PMSG_IS_AUTO(message)) {
3893 /* auto suspend (selective suspend) */
3894 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3895 buf &= ~MAC_TX_TXEN_;
3896 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3897 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3898 buf &= ~MAC_RX_RXEN_;
3899 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3900
3901 ret = lan78xx_write_reg(dev, WUCSR, 0);
3902 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3903 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3904
3905 /* set goodframe wakeup */
3906 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3907
3908 buf |= WUCSR_RFE_WAKE_EN_;
3909 buf |= WUCSR_STORE_WAKE_;
3910
3911 ret = lan78xx_write_reg(dev, WUCSR, buf);
3912
3913 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3914
3915 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3916 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3917
3918 buf |= PMT_CTL_PHY_WAKE_EN_;
3919 buf |= PMT_CTL_WOL_EN_;
3920 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3921 buf |= PMT_CTL_SUS_MODE_3_;
3922
3923 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3924
3925 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3926
3927 buf |= PMT_CTL_WUPS_MASK_;
3928
3929 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3930
3931 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3932 buf |= MAC_RX_RXEN_;
3933 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3934 } else {
3935 lan78xx_set_suspend(dev, pdata->wol);
3936 }
3937 }
3938
3939 ret = 0;
3940 out:
3941 return ret;
3942 }
3943
3944 static int lan78xx_resume(struct usb_interface *intf)
3945 {
3946 struct lan78xx_net *dev = usb_get_intfdata(intf);
3947 struct sk_buff *skb;
3948 struct urb *res;
3949 int ret;
3950 u32 buf;
3951
3952 if (!timer_pending(&dev->stat_monitor)) {
3953 dev->delta = 1;
3954 mod_timer(&dev->stat_monitor,
3955 jiffies + STAT_UPDATE_TIMER);
3956 }
3957
3958 if (!--dev->suspend_count) {
3959 /* resume interrupt URBs */
3960 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3961 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3962
3963 spin_lock_irq(&dev->txq.lock);
3964 while ((res = usb_get_from_anchor(&dev->deferred))) {
3965 skb = (struct sk_buff *)res->context;
3966 ret = usb_submit_urb(res, GFP_ATOMIC);
3967 if (ret < 0) {
3968 dev_kfree_skb_any(skb);
3969 usb_free_urb(res);
3970 usb_autopm_put_interface_async(dev->intf);
3971 } else {
3972 netif_trans_update(dev->net);
3973 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3974 }
3975 }
3976
3977 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3978 spin_unlock_irq(&dev->txq.lock);
3979
3980 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3981 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3982 netif_start_queue(dev->net);
3983 tasklet_schedule(&dev->bh);
3984 }
3985 }
3986
3987 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3988 ret = lan78xx_write_reg(dev, WUCSR, 0);
3989 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3990
3991 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3992 WUCSR2_ARP_RCD_ |
3993 WUCSR2_IPV6_TCPSYN_RCD_ |
3994 WUCSR2_IPV4_TCPSYN_RCD_);
3995
3996 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3997 WUCSR_EEE_RX_WAKE_ |
3998 WUCSR_PFDA_FR_ |
3999 WUCSR_RFE_WAKE_FR_ |
4000 WUCSR_WUFR_ |
4001 WUCSR_MPR_ |
4002 WUCSR_BCST_FR_);
4003
4004 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4005 buf |= MAC_TX_TXEN_;
4006 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4007
4008 return 0;
4009 }
4010
4011 static int lan78xx_reset_resume(struct usb_interface *intf)
4012 {
4013 struct lan78xx_net *dev = usb_get_intfdata(intf);
4014
4015 lan78xx_reset(dev);
4016
4017 phy_start(dev->net->phydev);
4018
4019 return lan78xx_resume(intf);
4020 }
4021
4022 static const struct usb_device_id products[] = {
4023 {
4024 /* LAN7800 USB Gigabit Ethernet Device */
4025 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4026 },
4027 {
4028 /* LAN7850 USB Gigabit Ethernet Device */
4029 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4030 },
4031 {
4032 /* LAN7801 USB Gigabit Ethernet Device */
4033 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4034 },
4035 {},
4036 };
4037 MODULE_DEVICE_TABLE(usb, products);
4038
4039 static struct usb_driver lan78xx_driver = {
4040 .name = DRIVER_NAME,
4041 .id_table = products,
4042 .probe = lan78xx_probe,
4043 .disconnect = lan78xx_disconnect,
4044 .suspend = lan78xx_suspend,
4045 .resume = lan78xx_resume,
4046 .reset_resume = lan78xx_reset_resume,
4047 .supports_autosuspend = 1,
4048 .disable_hub_initiated_lpm = 1,
4049 };
4050
4051 module_usb_driver(lan78xx_driver);
4052
4053 MODULE_AUTHOR(DRIVER_AUTHOR);
4054 MODULE_DESCRIPTION(DRIVER_DESC);
4055 MODULE_LICENSE("GPL");