]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/phy/phy.c
net: phy: export phy_{read,write}_mmd_indirect
[mirror_ubuntu-bionic-kernel.git] / drivers / net / phy / phy.c
1 /* Framework for configuring and reading PHY devices
2 * Based on code in sungem_phy.c and gianfar_phy.c
3 *
4 * Author: Andy Fleming
5 *
6 * Copyright (c) 2004 Freescale Semiconductor, Inc.
7 * Copyright (c) 2006, 2007 Maciej W. Rozycki
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/timer.h>
33 #include <linux/workqueue.h>
34 #include <linux/mdio.h>
35 #include <linux/io.h>
36 #include <linux/uaccess.h>
37 #include <linux/atomic.h>
38
39 #include <asm/irq.h>
40
41 static const char *phy_speed_to_str(int speed)
42 {
43 switch (speed) {
44 case SPEED_10:
45 return "10Mbps";
46 case SPEED_100:
47 return "100Mbps";
48 case SPEED_1000:
49 return "1Gbps";
50 case SPEED_2500:
51 return "2.5Gbps";
52 case SPEED_10000:
53 return "10Gbps";
54 case SPEED_UNKNOWN:
55 return "Unknown";
56 default:
57 return "Unsupported (update phy.c)";
58 }
59 }
60
61 /**
62 * phy_print_status - Convenience function to print out the current phy status
63 * @phydev: the phy_device struct
64 */
65 void phy_print_status(struct phy_device *phydev)
66 {
67 if (phydev->link) {
68 netdev_info(phydev->attached_dev,
69 "Link is Up - %s/%s - flow control %s\n",
70 phy_speed_to_str(phydev->speed),
71 DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
72 phydev->pause ? "rx/tx" : "off");
73 } else {
74 netdev_info(phydev->attached_dev, "Link is Down\n");
75 }
76 }
77 EXPORT_SYMBOL(phy_print_status);
78
79 /**
80 * phy_clear_interrupt - Ack the phy device's interrupt
81 * @phydev: the phy_device struct
82 *
83 * If the @phydev driver has an ack_interrupt function, call it to
84 * ack and clear the phy device's interrupt.
85 *
86 * Returns 0 on success or < 0 on error.
87 */
88 static int phy_clear_interrupt(struct phy_device *phydev)
89 {
90 if (phydev->drv->ack_interrupt)
91 return phydev->drv->ack_interrupt(phydev);
92
93 return 0;
94 }
95
96 /**
97 * phy_config_interrupt - configure the PHY device for the requested interrupts
98 * @phydev: the phy_device struct
99 * @interrupts: interrupt flags to configure for this @phydev
100 *
101 * Returns 0 on success or < 0 on error.
102 */
103 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
104 {
105 phydev->interrupts = interrupts;
106 if (phydev->drv->config_intr)
107 return phydev->drv->config_intr(phydev);
108
109 return 0;
110 }
111
112
113 /**
114 * phy_aneg_done - return auto-negotiation status
115 * @phydev: target phy_device struct
116 *
117 * Description: Return the auto-negotiation status from this @phydev
118 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
119 * is still pending.
120 */
121 static inline int phy_aneg_done(struct phy_device *phydev)
122 {
123 if (phydev->drv->aneg_done)
124 return phydev->drv->aneg_done(phydev);
125
126 return genphy_aneg_done(phydev);
127 }
128
129 /* A structure for mapping a particular speed and duplex
130 * combination to a particular SUPPORTED and ADVERTISED value
131 */
132 struct phy_setting {
133 int speed;
134 int duplex;
135 u32 setting;
136 };
137
138 /* A mapping of all SUPPORTED settings to speed/duplex */
139 static const struct phy_setting settings[] = {
140 {
141 .speed = SPEED_10000,
142 .duplex = DUPLEX_FULL,
143 .setting = SUPPORTED_10000baseKR_Full,
144 },
145 {
146 .speed = SPEED_10000,
147 .duplex = DUPLEX_FULL,
148 .setting = SUPPORTED_10000baseKX4_Full,
149 },
150 {
151 .speed = SPEED_10000,
152 .duplex = DUPLEX_FULL,
153 .setting = SUPPORTED_10000baseT_Full,
154 },
155 {
156 .speed = SPEED_2500,
157 .duplex = DUPLEX_FULL,
158 .setting = SUPPORTED_2500baseX_Full,
159 },
160 {
161 .speed = SPEED_1000,
162 .duplex = DUPLEX_FULL,
163 .setting = SUPPORTED_1000baseKX_Full,
164 },
165 {
166 .speed = SPEED_1000,
167 .duplex = DUPLEX_FULL,
168 .setting = SUPPORTED_1000baseT_Full,
169 },
170 {
171 .speed = SPEED_1000,
172 .duplex = DUPLEX_HALF,
173 .setting = SUPPORTED_1000baseT_Half,
174 },
175 {
176 .speed = SPEED_100,
177 .duplex = DUPLEX_FULL,
178 .setting = SUPPORTED_100baseT_Full,
179 },
180 {
181 .speed = SPEED_100,
182 .duplex = DUPLEX_HALF,
183 .setting = SUPPORTED_100baseT_Half,
184 },
185 {
186 .speed = SPEED_10,
187 .duplex = DUPLEX_FULL,
188 .setting = SUPPORTED_10baseT_Full,
189 },
190 {
191 .speed = SPEED_10,
192 .duplex = DUPLEX_HALF,
193 .setting = SUPPORTED_10baseT_Half,
194 },
195 };
196
197 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
198
199 /**
200 * phy_find_setting - find a PHY settings array entry that matches speed & duplex
201 * @speed: speed to match
202 * @duplex: duplex to match
203 *
204 * Description: Searches the settings array for the setting which
205 * matches the desired speed and duplex, and returns the index
206 * of that setting. Returns the index of the last setting if
207 * none of the others match.
208 */
209 static inline unsigned int phy_find_setting(int speed, int duplex)
210 {
211 unsigned int idx = 0;
212
213 while (idx < ARRAY_SIZE(settings) &&
214 (settings[idx].speed != speed || settings[idx].duplex != duplex))
215 idx++;
216
217 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
218 }
219
220 /**
221 * phy_find_valid - find a PHY setting that matches the requested features mask
222 * @idx: The first index in settings[] to search
223 * @features: A mask of the valid settings
224 *
225 * Description: Returns the index of the first valid setting less
226 * than or equal to the one pointed to by idx, as determined by
227 * the mask in features. Returns the index of the last setting
228 * if nothing else matches.
229 */
230 static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
231 {
232 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
233 idx++;
234
235 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
236 }
237
238 /**
239 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
240 * @phydev: the target phy_device struct
241 *
242 * Description: Make sure the PHY is set to supported speeds and
243 * duplexes. Drop down by one in this order: 1000/FULL,
244 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
245 */
246 static void phy_sanitize_settings(struct phy_device *phydev)
247 {
248 u32 features = phydev->supported;
249 unsigned int idx;
250
251 /* Sanitize settings based on PHY capabilities */
252 if ((features & SUPPORTED_Autoneg) == 0)
253 phydev->autoneg = AUTONEG_DISABLE;
254
255 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
256 features);
257
258 phydev->speed = settings[idx].speed;
259 phydev->duplex = settings[idx].duplex;
260 }
261
262 /**
263 * phy_ethtool_sset - generic ethtool sset function, handles all the details
264 * @phydev: target phy_device struct
265 * @cmd: ethtool_cmd
266 *
267 * A few notes about parameter checking:
268 * - We don't set port or transceiver, so we don't care what they
269 * were set to.
270 * - phy_start_aneg() will make sure forced settings are sane, and
271 * choose the next best ones from the ones selected, so we don't
272 * care if ethtool tries to give us bad values.
273 */
274 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
275 {
276 u32 speed = ethtool_cmd_speed(cmd);
277
278 if (cmd->phy_address != phydev->addr)
279 return -EINVAL;
280
281 /* We make sure that we don't pass unsupported values in to the PHY */
282 cmd->advertising &= phydev->supported;
283
284 /* Verify the settings we care about. */
285 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
286 return -EINVAL;
287
288 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
289 return -EINVAL;
290
291 if (cmd->autoneg == AUTONEG_DISABLE &&
292 ((speed != SPEED_1000 &&
293 speed != SPEED_100 &&
294 speed != SPEED_10) ||
295 (cmd->duplex != DUPLEX_HALF &&
296 cmd->duplex != DUPLEX_FULL)))
297 return -EINVAL;
298
299 phydev->autoneg = cmd->autoneg;
300
301 phydev->speed = speed;
302
303 phydev->advertising = cmd->advertising;
304
305 if (AUTONEG_ENABLE == cmd->autoneg)
306 phydev->advertising |= ADVERTISED_Autoneg;
307 else
308 phydev->advertising &= ~ADVERTISED_Autoneg;
309
310 phydev->duplex = cmd->duplex;
311
312 /* Restart the PHY */
313 phy_start_aneg(phydev);
314
315 return 0;
316 }
317 EXPORT_SYMBOL(phy_ethtool_sset);
318
319 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
320 {
321 cmd->supported = phydev->supported;
322
323 cmd->advertising = phydev->advertising;
324 cmd->lp_advertising = phydev->lp_advertising;
325
326 ethtool_cmd_speed_set(cmd, phydev->speed);
327 cmd->duplex = phydev->duplex;
328 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
329 cmd->port = PORT_BNC;
330 else
331 cmd->port = PORT_MII;
332 cmd->phy_address = phydev->addr;
333 cmd->transceiver = phy_is_internal(phydev) ?
334 XCVR_INTERNAL : XCVR_EXTERNAL;
335 cmd->autoneg = phydev->autoneg;
336
337 return 0;
338 }
339 EXPORT_SYMBOL(phy_ethtool_gset);
340
341 /**
342 * phy_mii_ioctl - generic PHY MII ioctl interface
343 * @phydev: the phy_device struct
344 * @ifr: &struct ifreq for socket ioctl's
345 * @cmd: ioctl cmd to execute
346 *
347 * Note that this function is currently incompatible with the
348 * PHYCONTROL layer. It changes registers without regard to
349 * current state. Use at own risk.
350 */
351 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
352 {
353 struct mii_ioctl_data *mii_data = if_mii(ifr);
354 u16 val = mii_data->val_in;
355
356 switch (cmd) {
357 case SIOCGMIIPHY:
358 mii_data->phy_id = phydev->addr;
359 /* fall through */
360
361 case SIOCGMIIREG:
362 mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
363 mii_data->reg_num);
364 return 0;
365
366 case SIOCSMIIREG:
367 if (mii_data->phy_id == phydev->addr) {
368 switch (mii_data->reg_num) {
369 case MII_BMCR:
370 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
371 phydev->autoneg = AUTONEG_DISABLE;
372 else
373 phydev->autoneg = AUTONEG_ENABLE;
374 if (!phydev->autoneg && (val & BMCR_FULLDPLX))
375 phydev->duplex = DUPLEX_FULL;
376 else
377 phydev->duplex = DUPLEX_HALF;
378 if (!phydev->autoneg && (val & BMCR_SPEED1000))
379 phydev->speed = SPEED_1000;
380 else if (!phydev->autoneg &&
381 (val & BMCR_SPEED100))
382 phydev->speed = SPEED_100;
383 break;
384 case MII_ADVERTISE:
385 phydev->advertising = val;
386 break;
387 default:
388 /* do nothing */
389 break;
390 }
391 }
392
393 mdiobus_write(phydev->bus, mii_data->phy_id,
394 mii_data->reg_num, val);
395
396 if (mii_data->reg_num == MII_BMCR &&
397 val & BMCR_RESET)
398 return phy_init_hw(phydev);
399 return 0;
400
401 case SIOCSHWTSTAMP:
402 if (phydev->drv->hwtstamp)
403 return phydev->drv->hwtstamp(phydev, ifr);
404 /* fall through */
405
406 default:
407 return -EOPNOTSUPP;
408 }
409 }
410 EXPORT_SYMBOL(phy_mii_ioctl);
411
412 /**
413 * phy_start_aneg - start auto-negotiation for this PHY device
414 * @phydev: the phy_device struct
415 *
416 * Description: Sanitizes the settings (if we're not autonegotiating
417 * them), and then calls the driver's config_aneg function.
418 * If the PHYCONTROL Layer is operating, we change the state to
419 * reflect the beginning of Auto-negotiation or forcing.
420 */
421 int phy_start_aneg(struct phy_device *phydev)
422 {
423 int err;
424
425 mutex_lock(&phydev->lock);
426
427 if (AUTONEG_DISABLE == phydev->autoneg)
428 phy_sanitize_settings(phydev);
429
430 err = phydev->drv->config_aneg(phydev);
431 if (err < 0)
432 goto out_unlock;
433
434 if (phydev->state != PHY_HALTED) {
435 if (AUTONEG_ENABLE == phydev->autoneg) {
436 phydev->state = PHY_AN;
437 phydev->link_timeout = PHY_AN_TIMEOUT;
438 } else {
439 phydev->state = PHY_FORCING;
440 phydev->link_timeout = PHY_FORCE_TIMEOUT;
441 }
442 }
443
444 out_unlock:
445 mutex_unlock(&phydev->lock);
446 return err;
447 }
448 EXPORT_SYMBOL(phy_start_aneg);
449
450 /**
451 * phy_start_machine - start PHY state machine tracking
452 * @phydev: the phy_device struct
453 *
454 * Description: The PHY infrastructure can run a state machine
455 * which tracks whether the PHY is starting up, negotiating,
456 * etc. This function starts the timer which tracks the state
457 * of the PHY. If you want to maintain your own state machine,
458 * do not call this function.
459 */
460 void phy_start_machine(struct phy_device *phydev)
461 {
462 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
463 }
464
465 /**
466 * phy_stop_machine - stop the PHY state machine tracking
467 * @phydev: target phy_device struct
468 *
469 * Description: Stops the state machine timer, sets the state to UP
470 * (unless it wasn't up yet). This function must be called BEFORE
471 * phy_detach.
472 */
473 void phy_stop_machine(struct phy_device *phydev)
474 {
475 cancel_delayed_work_sync(&phydev->state_queue);
476
477 mutex_lock(&phydev->lock);
478 if (phydev->state > PHY_UP)
479 phydev->state = PHY_UP;
480 mutex_unlock(&phydev->lock);
481 }
482
483 /**
484 * phy_error - enter HALTED state for this PHY device
485 * @phydev: target phy_device struct
486 *
487 * Moves the PHY to the HALTED state in response to a read
488 * or write error, and tells the controller the link is down.
489 * Must not be called from interrupt context, or while the
490 * phydev->lock is held.
491 */
492 static void phy_error(struct phy_device *phydev)
493 {
494 mutex_lock(&phydev->lock);
495 phydev->state = PHY_HALTED;
496 mutex_unlock(&phydev->lock);
497 }
498
499 /**
500 * phy_interrupt - PHY interrupt handler
501 * @irq: interrupt line
502 * @phy_dat: phy_device pointer
503 *
504 * Description: When a PHY interrupt occurs, the handler disables
505 * interrupts, and schedules a work task to clear the interrupt.
506 */
507 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
508 {
509 struct phy_device *phydev = phy_dat;
510
511 if (PHY_HALTED == phydev->state)
512 return IRQ_NONE; /* It can't be ours. */
513
514 /* The MDIO bus is not allowed to be written in interrupt
515 * context, so we need to disable the irq here. A work
516 * queue will write the PHY to disable and clear the
517 * interrupt, and then reenable the irq line.
518 */
519 disable_irq_nosync(irq);
520 atomic_inc(&phydev->irq_disable);
521
522 queue_work(system_power_efficient_wq, &phydev->phy_queue);
523
524 return IRQ_HANDLED;
525 }
526
527 /**
528 * phy_enable_interrupts - Enable the interrupts from the PHY side
529 * @phydev: target phy_device struct
530 */
531 static int phy_enable_interrupts(struct phy_device *phydev)
532 {
533 int err = phy_clear_interrupt(phydev);
534
535 if (err < 0)
536 return err;
537
538 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
539 }
540
541 /**
542 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
543 * @phydev: target phy_device struct
544 */
545 static int phy_disable_interrupts(struct phy_device *phydev)
546 {
547 int err;
548
549 /* Disable PHY interrupts */
550 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
551 if (err)
552 goto phy_err;
553
554 /* Clear the interrupt */
555 err = phy_clear_interrupt(phydev);
556 if (err)
557 goto phy_err;
558
559 return 0;
560
561 phy_err:
562 phy_error(phydev);
563
564 return err;
565 }
566
567 /**
568 * phy_start_interrupts - request and enable interrupts for a PHY device
569 * @phydev: target phy_device struct
570 *
571 * Description: Request the interrupt for the given PHY.
572 * If this fails, then we set irq to PHY_POLL.
573 * Otherwise, we enable the interrupts in the PHY.
574 * This should only be called with a valid IRQ number.
575 * Returns 0 on success or < 0 on error.
576 */
577 int phy_start_interrupts(struct phy_device *phydev)
578 {
579 atomic_set(&phydev->irq_disable, 0);
580 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
581 phydev) < 0) {
582 pr_warn("%s: Can't get IRQ %d (PHY)\n",
583 phydev->bus->name, phydev->irq);
584 phydev->irq = PHY_POLL;
585 return 0;
586 }
587
588 return phy_enable_interrupts(phydev);
589 }
590 EXPORT_SYMBOL(phy_start_interrupts);
591
592 /**
593 * phy_stop_interrupts - disable interrupts from a PHY device
594 * @phydev: target phy_device struct
595 */
596 int phy_stop_interrupts(struct phy_device *phydev)
597 {
598 int err = phy_disable_interrupts(phydev);
599
600 if (err)
601 phy_error(phydev);
602
603 free_irq(phydev->irq, phydev);
604
605 /* Cannot call flush_scheduled_work() here as desired because
606 * of rtnl_lock(), but we do not really care about what would
607 * be done, except from enable_irq(), so cancel any work
608 * possibly pending and take care of the matter below.
609 */
610 cancel_work_sync(&phydev->phy_queue);
611 /* If work indeed has been cancelled, disable_irq() will have
612 * been left unbalanced from phy_interrupt() and enable_irq()
613 * has to be called so that other devices on the line work.
614 */
615 while (atomic_dec_return(&phydev->irq_disable) >= 0)
616 enable_irq(phydev->irq);
617
618 return err;
619 }
620 EXPORT_SYMBOL(phy_stop_interrupts);
621
622 /**
623 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
624 * @work: work_struct that describes the work to be done
625 */
626 void phy_change(struct work_struct *work)
627 {
628 struct phy_device *phydev =
629 container_of(work, struct phy_device, phy_queue);
630
631 if (phydev->drv->did_interrupt &&
632 !phydev->drv->did_interrupt(phydev))
633 goto ignore;
634
635 if (phy_disable_interrupts(phydev))
636 goto phy_err;
637
638 mutex_lock(&phydev->lock);
639 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
640 phydev->state = PHY_CHANGELINK;
641 mutex_unlock(&phydev->lock);
642
643 atomic_dec(&phydev->irq_disable);
644 enable_irq(phydev->irq);
645
646 /* Reenable interrupts */
647 if (PHY_HALTED != phydev->state &&
648 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
649 goto irq_enable_err;
650
651 /* reschedule state queue work to run as soon as possible */
652 cancel_delayed_work_sync(&phydev->state_queue);
653 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
654 return;
655
656 ignore:
657 atomic_dec(&phydev->irq_disable);
658 enable_irq(phydev->irq);
659 return;
660
661 irq_enable_err:
662 disable_irq(phydev->irq);
663 atomic_inc(&phydev->irq_disable);
664 phy_err:
665 phy_error(phydev);
666 }
667
668 /**
669 * phy_stop - Bring down the PHY link, and stop checking the status
670 * @phydev: target phy_device struct
671 */
672 void phy_stop(struct phy_device *phydev)
673 {
674 mutex_lock(&phydev->lock);
675
676 if (PHY_HALTED == phydev->state)
677 goto out_unlock;
678
679 if (phy_interrupt_is_valid(phydev)) {
680 /* Disable PHY Interrupts */
681 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
682
683 /* Clear any pending interrupts */
684 phy_clear_interrupt(phydev);
685 }
686
687 phydev->state = PHY_HALTED;
688
689 out_unlock:
690 mutex_unlock(&phydev->lock);
691
692 /* Cannot call flush_scheduled_work() here as desired because
693 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
694 * will not reenable interrupts.
695 */
696 }
697 EXPORT_SYMBOL(phy_stop);
698
699 /**
700 * phy_start - start or restart a PHY device
701 * @phydev: target phy_device struct
702 *
703 * Description: Indicates the attached device's readiness to
704 * handle PHY-related work. Used during startup to start the
705 * PHY, and after a call to phy_stop() to resume operation.
706 * Also used to indicate the MDIO bus has cleared an error
707 * condition.
708 */
709 void phy_start(struct phy_device *phydev)
710 {
711 mutex_lock(&phydev->lock);
712
713 switch (phydev->state) {
714 case PHY_STARTING:
715 phydev->state = PHY_PENDING;
716 break;
717 case PHY_READY:
718 phydev->state = PHY_UP;
719 break;
720 case PHY_HALTED:
721 phydev->state = PHY_RESUMING;
722 default:
723 break;
724 }
725 mutex_unlock(&phydev->lock);
726 }
727 EXPORT_SYMBOL(phy_start);
728
729 /**
730 * phy_state_machine - Handle the state machine
731 * @work: work_struct that describes the work to be done
732 */
733 void phy_state_machine(struct work_struct *work)
734 {
735 struct delayed_work *dwork = to_delayed_work(work);
736 struct phy_device *phydev =
737 container_of(dwork, struct phy_device, state_queue);
738 bool needs_aneg = false, do_suspend = false, do_resume = false;
739 int err = 0;
740
741 mutex_lock(&phydev->lock);
742
743 if (phydev->drv->link_change_notify)
744 phydev->drv->link_change_notify(phydev);
745
746 switch (phydev->state) {
747 case PHY_DOWN:
748 case PHY_STARTING:
749 case PHY_READY:
750 case PHY_PENDING:
751 break;
752 case PHY_UP:
753 needs_aneg = true;
754
755 phydev->link_timeout = PHY_AN_TIMEOUT;
756
757 break;
758 case PHY_AN:
759 err = phy_read_status(phydev);
760 if (err < 0)
761 break;
762
763 /* If the link is down, give up on negotiation for now */
764 if (!phydev->link) {
765 phydev->state = PHY_NOLINK;
766 netif_carrier_off(phydev->attached_dev);
767 phydev->adjust_link(phydev->attached_dev);
768 break;
769 }
770
771 /* Check if negotiation is done. Break if there's an error */
772 err = phy_aneg_done(phydev);
773 if (err < 0)
774 break;
775
776 /* If AN is done, we're running */
777 if (err > 0) {
778 phydev->state = PHY_RUNNING;
779 netif_carrier_on(phydev->attached_dev);
780 phydev->adjust_link(phydev->attached_dev);
781
782 } else if (0 == phydev->link_timeout--)
783 needs_aneg = true;
784 break;
785 case PHY_NOLINK:
786 err = phy_read_status(phydev);
787 if (err)
788 break;
789
790 if (phydev->link) {
791 if (AUTONEG_ENABLE == phydev->autoneg) {
792 err = phy_aneg_done(phydev);
793 if (err < 0)
794 break;
795
796 if (!err) {
797 phydev->state = PHY_AN;
798 phydev->link_timeout = PHY_AN_TIMEOUT;
799 break;
800 }
801 }
802 phydev->state = PHY_RUNNING;
803 netif_carrier_on(phydev->attached_dev);
804 phydev->adjust_link(phydev->attached_dev);
805 }
806 break;
807 case PHY_FORCING:
808 err = genphy_update_link(phydev);
809 if (err)
810 break;
811
812 if (phydev->link) {
813 phydev->state = PHY_RUNNING;
814 netif_carrier_on(phydev->attached_dev);
815 } else {
816 if (0 == phydev->link_timeout--)
817 needs_aneg = true;
818 }
819
820 phydev->adjust_link(phydev->attached_dev);
821 break;
822 case PHY_RUNNING:
823 /* Only register a CHANGE if we are
824 * polling or ignoring interrupts
825 */
826 if (!phy_interrupt_is_valid(phydev))
827 phydev->state = PHY_CHANGELINK;
828 break;
829 case PHY_CHANGELINK:
830 err = phy_read_status(phydev);
831 if (err)
832 break;
833
834 if (phydev->link) {
835 phydev->state = PHY_RUNNING;
836 netif_carrier_on(phydev->attached_dev);
837 } else {
838 phydev->state = PHY_NOLINK;
839 netif_carrier_off(phydev->attached_dev);
840 }
841
842 phydev->adjust_link(phydev->attached_dev);
843
844 if (phy_interrupt_is_valid(phydev))
845 err = phy_config_interrupt(phydev,
846 PHY_INTERRUPT_ENABLED);
847 break;
848 case PHY_HALTED:
849 if (phydev->link) {
850 phydev->link = 0;
851 netif_carrier_off(phydev->attached_dev);
852 phydev->adjust_link(phydev->attached_dev);
853 do_suspend = true;
854 }
855 break;
856 case PHY_RESUMING:
857 err = phy_clear_interrupt(phydev);
858 if (err)
859 break;
860
861 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
862 if (err)
863 break;
864
865 if (AUTONEG_ENABLE == phydev->autoneg) {
866 err = phy_aneg_done(phydev);
867 if (err < 0)
868 break;
869
870 /* err > 0 if AN is done.
871 * Otherwise, it's 0, and we're still waiting for AN
872 */
873 if (err > 0) {
874 err = phy_read_status(phydev);
875 if (err)
876 break;
877
878 if (phydev->link) {
879 phydev->state = PHY_RUNNING;
880 netif_carrier_on(phydev->attached_dev);
881 } else {
882 phydev->state = PHY_NOLINK;
883 }
884 phydev->adjust_link(phydev->attached_dev);
885 } else {
886 phydev->state = PHY_AN;
887 phydev->link_timeout = PHY_AN_TIMEOUT;
888 }
889 } else {
890 err = phy_read_status(phydev);
891 if (err)
892 break;
893
894 if (phydev->link) {
895 phydev->state = PHY_RUNNING;
896 netif_carrier_on(phydev->attached_dev);
897 } else {
898 phydev->state = PHY_NOLINK;
899 }
900 phydev->adjust_link(phydev->attached_dev);
901 }
902 do_resume = true;
903 break;
904 }
905
906 mutex_unlock(&phydev->lock);
907
908 if (needs_aneg)
909 err = phy_start_aneg(phydev);
910 else if (do_suspend)
911 phy_suspend(phydev);
912 else if (do_resume)
913 phy_resume(phydev);
914
915 if (err < 0)
916 phy_error(phydev);
917
918 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
919 PHY_STATE_TIME * HZ);
920 }
921
922 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
923 {
924 cancel_work_sync(&phydev->phy_queue);
925 phydev->link = new_link;
926 schedule_work(&phydev->phy_queue);
927 }
928 EXPORT_SYMBOL(phy_mac_interrupt);
929
930 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
931 int addr)
932 {
933 /* Write the desired MMD Devad */
934 bus->write(bus, addr, MII_MMD_CTRL, devad);
935
936 /* Write the desired MMD register address */
937 bus->write(bus, addr, MII_MMD_DATA, prtad);
938
939 /* Select the Function : DATA with no post increment */
940 bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
941 }
942
943 /**
944 * phy_read_mmd_indirect - reads data from the MMD registers
945 * @phydev: The PHY device bus
946 * @prtad: MMD Address
947 * @devad: MMD DEVAD
948 * @addr: PHY address on the MII bus
949 *
950 * Description: it reads data from the MMD registers (clause 22 to access to
951 * clause 45) of the specified phy address.
952 * To read these register we have:
953 * 1) Write reg 13 // DEVAD
954 * 2) Write reg 14 // MMD Address
955 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
956 * 3) Read reg 14 // Read MMD data
957 */
958 int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
959 int devad, int addr)
960 {
961 struct phy_driver *phydrv = phydev->drv;
962 int value = -1;
963
964 if (phydrv->read_mmd_indirect == NULL) {
965 mmd_phy_indirect(phydev->bus, prtad, devad, addr);
966
967 /* Read the content of the MMD's selected register */
968 value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
969 } else {
970 value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
971 }
972 return value;
973 }
974 EXPORT_SYMBOL(phy_read_mmd_indirect);
975
976 /**
977 * phy_write_mmd_indirect - writes data to the MMD registers
978 * @phydev: The PHY device
979 * @prtad: MMD Address
980 * @devad: MMD DEVAD
981 * @addr: PHY address on the MII bus
982 * @data: data to write in the MMD register
983 *
984 * Description: Write data from the MMD registers of the specified
985 * phy address.
986 * To write these register we have:
987 * 1) Write reg 13 // DEVAD
988 * 2) Write reg 14 // MMD Address
989 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
990 * 3) Write reg 14 // Write MMD data
991 */
992 void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
993 int devad, int addr, u32 data)
994 {
995 struct phy_driver *phydrv = phydev->drv;
996
997 if (phydrv->write_mmd_indirect == NULL) {
998 mmd_phy_indirect(phydev->bus, prtad, devad, addr);
999
1000 /* Write the data into MMD's selected register */
1001 phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
1002 } else {
1003 phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
1004 }
1005 }
1006 EXPORT_SYMBOL(phy_write_mmd_indirect);
1007
1008 /**
1009 * phy_init_eee - init and check the EEE feature
1010 * @phydev: target phy_device struct
1011 * @clk_stop_enable: PHY may stop the clock during LPI
1012 *
1013 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1014 * is supported by looking at the MMD registers 3.20 and 7.60/61
1015 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1016 * bit if required.
1017 */
1018 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1019 {
1020 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1021 * Also EEE feature is active when core is operating with MII, GMII
1022 * or RGMII.
1023 */
1024 if ((phydev->duplex == DUPLEX_FULL) &&
1025 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1026 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1027 (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
1028 int eee_lp, eee_cap, eee_adv;
1029 u32 lp, cap, adv;
1030 int status;
1031 unsigned int idx;
1032
1033 /* Read phy status to properly get the right settings */
1034 status = phy_read_status(phydev);
1035 if (status)
1036 return status;
1037
1038 /* First check if the EEE ability is supported */
1039 eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1040 MDIO_MMD_PCS, phydev->addr);
1041 if (eee_cap < 0)
1042 return eee_cap;
1043
1044 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1045 if (!cap)
1046 return -EPROTONOSUPPORT;
1047
1048 /* Check which link settings negotiated and verify it in
1049 * the EEE advertising registers.
1050 */
1051 eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1052 MDIO_MMD_AN, phydev->addr);
1053 if (eee_lp < 0)
1054 return eee_lp;
1055
1056 eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1057 MDIO_MMD_AN, phydev->addr);
1058 if (eee_adv < 0)
1059 return eee_adv;
1060
1061 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1062 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1063 idx = phy_find_setting(phydev->speed, phydev->duplex);
1064 if (!(lp & adv & settings[idx].setting))
1065 return -EPROTONOSUPPORT;
1066
1067 if (clk_stop_enable) {
1068 /* Configure the PHY to stop receiving xMII
1069 * clock while it is signaling LPI.
1070 */
1071 int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
1072 MDIO_MMD_PCS,
1073 phydev->addr);
1074 if (val < 0)
1075 return val;
1076
1077 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1078 phy_write_mmd_indirect(phydev, MDIO_CTRL1,
1079 MDIO_MMD_PCS, phydev->addr,
1080 val);
1081 }
1082
1083 return 0; /* EEE supported */
1084 }
1085
1086 return -EPROTONOSUPPORT;
1087 }
1088 EXPORT_SYMBOL(phy_init_eee);
1089
1090 /**
1091 * phy_get_eee_err - report the EEE wake error count
1092 * @phydev: target phy_device struct
1093 *
1094 * Description: it is to report the number of time where the PHY
1095 * failed to complete its normal wake sequence.
1096 */
1097 int phy_get_eee_err(struct phy_device *phydev)
1098 {
1099 return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR,
1100 MDIO_MMD_PCS, phydev->addr);
1101 }
1102 EXPORT_SYMBOL(phy_get_eee_err);
1103
1104 /**
1105 * phy_ethtool_get_eee - get EEE supported and status
1106 * @phydev: target phy_device struct
1107 * @data: ethtool_eee data
1108 *
1109 * Description: it reportes the Supported/Advertisement/LP Advertisement
1110 * capabilities.
1111 */
1112 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1113 {
1114 int val;
1115
1116 /* Get Supported EEE */
1117 val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1118 MDIO_MMD_PCS, phydev->addr);
1119 if (val < 0)
1120 return val;
1121 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1122
1123 /* Get advertisement EEE */
1124 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1125 MDIO_MMD_AN, phydev->addr);
1126 if (val < 0)
1127 return val;
1128 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1129
1130 /* Get LP advertisement EEE */
1131 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1132 MDIO_MMD_AN, phydev->addr);
1133 if (val < 0)
1134 return val;
1135 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1136
1137 return 0;
1138 }
1139 EXPORT_SYMBOL(phy_ethtool_get_eee);
1140
1141 /**
1142 * phy_ethtool_set_eee - set EEE supported and status
1143 * @phydev: target phy_device struct
1144 * @data: ethtool_eee data
1145 *
1146 * Description: it is to program the Advertisement EEE register.
1147 */
1148 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1149 {
1150 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1151
1152 phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1153 phydev->addr, val);
1154
1155 return 0;
1156 }
1157 EXPORT_SYMBOL(phy_ethtool_set_eee);
1158
1159 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1160 {
1161 if (phydev->drv->set_wol)
1162 return phydev->drv->set_wol(phydev, wol);
1163
1164 return -EOPNOTSUPP;
1165 }
1166 EXPORT_SYMBOL(phy_ethtool_set_wol);
1167
1168 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1169 {
1170 if (phydev->drv->get_wol)
1171 phydev->drv->get_wol(phydev, wol);
1172 }
1173 EXPORT_SYMBOL(phy_ethtool_get_wol);