]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/phy/phy.c
Merge branch 'akpm' (patches from Andrew)
[mirror_ubuntu-eoan-kernel.git] / drivers / net / phy / phy.c
1 /* Framework for configuring and reading PHY devices
2 * Based on code in sungem_phy.c and gianfar_phy.c
3 *
4 * Author: Andy Fleming
5 *
6 * Copyright (c) 2004 Freescale Semiconductor, Inc.
7 * Copyright (c) 2006, 2007 Maciej W. Rozycki
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/phy_led_triggers.h>
33 #include <linux/workqueue.h>
34 #include <linux/mdio.h>
35 #include <linux/io.h>
36 #include <linux/uaccess.h>
37 #include <linux/atomic.h>
38
39 #include <asm/irq.h>
40
41 #define PHY_STATE_STR(_state) \
42 case PHY_##_state: \
43 return __stringify(_state); \
44
45 static const char *phy_state_to_str(enum phy_state st)
46 {
47 switch (st) {
48 PHY_STATE_STR(DOWN)
49 PHY_STATE_STR(STARTING)
50 PHY_STATE_STR(READY)
51 PHY_STATE_STR(PENDING)
52 PHY_STATE_STR(UP)
53 PHY_STATE_STR(AN)
54 PHY_STATE_STR(RUNNING)
55 PHY_STATE_STR(NOLINK)
56 PHY_STATE_STR(FORCING)
57 PHY_STATE_STR(CHANGELINK)
58 PHY_STATE_STR(HALTED)
59 PHY_STATE_STR(RESUMING)
60 }
61
62 return NULL;
63 }
64
65
66 /**
67 * phy_print_status - Convenience function to print out the current phy status
68 * @phydev: the phy_device struct
69 */
70 void phy_print_status(struct phy_device *phydev)
71 {
72 if (phydev->link) {
73 netdev_info(phydev->attached_dev,
74 "Link is Up - %s/%s - flow control %s\n",
75 phy_speed_to_str(phydev->speed),
76 phy_duplex_to_str(phydev->duplex),
77 phydev->pause ? "rx/tx" : "off");
78 } else {
79 netdev_info(phydev->attached_dev, "Link is Down\n");
80 }
81 }
82 EXPORT_SYMBOL(phy_print_status);
83
84 /**
85 * phy_clear_interrupt - Ack the phy device's interrupt
86 * @phydev: the phy_device struct
87 *
88 * If the @phydev driver has an ack_interrupt function, call it to
89 * ack and clear the phy device's interrupt.
90 *
91 * Returns 0 on success or < 0 on error.
92 */
93 static int phy_clear_interrupt(struct phy_device *phydev)
94 {
95 if (phydev->drv->ack_interrupt)
96 return phydev->drv->ack_interrupt(phydev);
97
98 return 0;
99 }
100
101 /**
102 * phy_config_interrupt - configure the PHY device for the requested interrupts
103 * @phydev: the phy_device struct
104 * @interrupts: interrupt flags to configure for this @phydev
105 *
106 * Returns 0 on success or < 0 on error.
107 */
108 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
109 {
110 phydev->interrupts = interrupts;
111 if (phydev->drv->config_intr)
112 return phydev->drv->config_intr(phydev);
113
114 return 0;
115 }
116
117 /**
118 * phy_restart_aneg - restart auto-negotiation
119 * @phydev: target phy_device struct
120 *
121 * Restart the autonegotiation on @phydev. Returns >= 0 on success or
122 * negative errno on error.
123 */
124 int phy_restart_aneg(struct phy_device *phydev)
125 {
126 int ret;
127
128 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
129 ret = genphy_c45_restart_aneg(phydev);
130 else
131 ret = genphy_restart_aneg(phydev);
132
133 return ret;
134 }
135 EXPORT_SYMBOL_GPL(phy_restart_aneg);
136
137 /**
138 * phy_aneg_done - return auto-negotiation status
139 * @phydev: target phy_device struct
140 *
141 * Description: Return the auto-negotiation status from this @phydev
142 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
143 * is still pending.
144 */
145 int phy_aneg_done(struct phy_device *phydev)
146 {
147 if (phydev->drv && phydev->drv->aneg_done)
148 return phydev->drv->aneg_done(phydev);
149
150 /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
151 * implement Clause 22 registers
152 */
153 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
154 return -EINVAL;
155
156 return genphy_aneg_done(phydev);
157 }
158 EXPORT_SYMBOL(phy_aneg_done);
159
160 /**
161 * phy_find_valid - find a PHY setting that matches the requested parameters
162 * @speed: desired speed
163 * @duplex: desired duplex
164 * @supported: mask of supported link modes
165 *
166 * Locate a supported phy setting that is, in priority order:
167 * - an exact match for the specified speed and duplex mode
168 * - a match for the specified speed, or slower speed
169 * - the slowest supported speed
170 * Returns the matched phy_setting entry, or %NULL if no supported phy
171 * settings were found.
172 */
173 static const struct phy_setting *
174 phy_find_valid(int speed, int duplex, u32 supported)
175 {
176 unsigned long mask = supported;
177
178 return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false);
179 }
180
181 /**
182 * phy_supported_speeds - return all speeds currently supported by a phy device
183 * @phy: The phy device to return supported speeds of.
184 * @speeds: buffer to store supported speeds in.
185 * @size: size of speeds buffer.
186 *
187 * Description: Returns the number of supported speeds, and fills the speeds
188 * buffer with the supported speeds. If speeds buffer is too small to contain
189 * all currently supported speeds, will return as many speeds as can fit.
190 */
191 unsigned int phy_supported_speeds(struct phy_device *phy,
192 unsigned int *speeds,
193 unsigned int size)
194 {
195 unsigned long supported = phy->supported;
196
197 return phy_speeds(speeds, size, &supported, BITS_PER_LONG);
198 }
199
200 /**
201 * phy_check_valid - check if there is a valid PHY setting which matches
202 * speed, duplex, and feature mask
203 * @speed: speed to match
204 * @duplex: duplex to match
205 * @features: A mask of the valid settings
206 *
207 * Description: Returns true if there is a valid setting, false otherwise.
208 */
209 static inline bool phy_check_valid(int speed, int duplex, u32 features)
210 {
211 unsigned long mask = features;
212
213 return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true);
214 }
215
216 /**
217 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
218 * @phydev: the target phy_device struct
219 *
220 * Description: Make sure the PHY is set to supported speeds and
221 * duplexes. Drop down by one in this order: 1000/FULL,
222 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
223 */
224 static void phy_sanitize_settings(struct phy_device *phydev)
225 {
226 const struct phy_setting *setting;
227 u32 features = phydev->supported;
228
229 /* Sanitize settings based on PHY capabilities */
230 if ((features & SUPPORTED_Autoneg) == 0)
231 phydev->autoneg = AUTONEG_DISABLE;
232
233 setting = phy_find_valid(phydev->speed, phydev->duplex, features);
234 if (setting) {
235 phydev->speed = setting->speed;
236 phydev->duplex = setting->duplex;
237 } else {
238 /* We failed to find anything (no supported speeds?) */
239 phydev->speed = SPEED_UNKNOWN;
240 phydev->duplex = DUPLEX_UNKNOWN;
241 }
242 }
243
244 /**
245 * phy_ethtool_sset - generic ethtool sset function, handles all the details
246 * @phydev: target phy_device struct
247 * @cmd: ethtool_cmd
248 *
249 * A few notes about parameter checking:
250 *
251 * - We don't set port or transceiver, so we don't care what they
252 * were set to.
253 * - phy_start_aneg() will make sure forced settings are sane, and
254 * choose the next best ones from the ones selected, so we don't
255 * care if ethtool tries to give us bad values.
256 */
257 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
258 {
259 u32 speed = ethtool_cmd_speed(cmd);
260
261 if (cmd->phy_address != phydev->mdio.addr)
262 return -EINVAL;
263
264 /* We make sure that we don't pass unsupported values in to the PHY */
265 cmd->advertising &= phydev->supported;
266
267 /* Verify the settings we care about. */
268 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
269 return -EINVAL;
270
271 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
272 return -EINVAL;
273
274 if (cmd->autoneg == AUTONEG_DISABLE &&
275 ((speed != SPEED_1000 &&
276 speed != SPEED_100 &&
277 speed != SPEED_10) ||
278 (cmd->duplex != DUPLEX_HALF &&
279 cmd->duplex != DUPLEX_FULL)))
280 return -EINVAL;
281
282 phydev->autoneg = cmd->autoneg;
283
284 phydev->speed = speed;
285
286 phydev->advertising = cmd->advertising;
287
288 if (AUTONEG_ENABLE == cmd->autoneg)
289 phydev->advertising |= ADVERTISED_Autoneg;
290 else
291 phydev->advertising &= ~ADVERTISED_Autoneg;
292
293 phydev->duplex = cmd->duplex;
294
295 phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;
296
297 /* Restart the PHY */
298 phy_start_aneg(phydev);
299
300 return 0;
301 }
302 EXPORT_SYMBOL(phy_ethtool_sset);
303
304 int phy_ethtool_ksettings_set(struct phy_device *phydev,
305 const struct ethtool_link_ksettings *cmd)
306 {
307 u8 autoneg = cmd->base.autoneg;
308 u8 duplex = cmd->base.duplex;
309 u32 speed = cmd->base.speed;
310 u32 advertising;
311
312 if (cmd->base.phy_address != phydev->mdio.addr)
313 return -EINVAL;
314
315 ethtool_convert_link_mode_to_legacy_u32(&advertising,
316 cmd->link_modes.advertising);
317
318 /* We make sure that we don't pass unsupported values in to the PHY */
319 advertising &= phydev->supported;
320
321 /* Verify the settings we care about. */
322 if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
323 return -EINVAL;
324
325 if (autoneg == AUTONEG_ENABLE && advertising == 0)
326 return -EINVAL;
327
328 if (autoneg == AUTONEG_DISABLE &&
329 ((speed != SPEED_1000 &&
330 speed != SPEED_100 &&
331 speed != SPEED_10) ||
332 (duplex != DUPLEX_HALF &&
333 duplex != DUPLEX_FULL)))
334 return -EINVAL;
335
336 phydev->autoneg = autoneg;
337
338 phydev->speed = speed;
339
340 phydev->advertising = advertising;
341
342 if (autoneg == AUTONEG_ENABLE)
343 phydev->advertising |= ADVERTISED_Autoneg;
344 else
345 phydev->advertising &= ~ADVERTISED_Autoneg;
346
347 phydev->duplex = duplex;
348
349 phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
350
351 /* Restart the PHY */
352 phy_start_aneg(phydev);
353
354 return 0;
355 }
356 EXPORT_SYMBOL(phy_ethtool_ksettings_set);
357
358 void phy_ethtool_ksettings_get(struct phy_device *phydev,
359 struct ethtool_link_ksettings *cmd)
360 {
361 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
362 phydev->supported);
363
364 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
365 phydev->advertising);
366
367 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
368 phydev->lp_advertising);
369
370 cmd->base.speed = phydev->speed;
371 cmd->base.duplex = phydev->duplex;
372 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
373 cmd->base.port = PORT_BNC;
374 else
375 cmd->base.port = PORT_MII;
376 cmd->base.transceiver = phy_is_internal(phydev) ?
377 XCVR_INTERNAL : XCVR_EXTERNAL;
378 cmd->base.phy_address = phydev->mdio.addr;
379 cmd->base.autoneg = phydev->autoneg;
380 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
381 cmd->base.eth_tp_mdix = phydev->mdix;
382 }
383 EXPORT_SYMBOL(phy_ethtool_ksettings_get);
384
385 /**
386 * phy_mii_ioctl - generic PHY MII ioctl interface
387 * @phydev: the phy_device struct
388 * @ifr: &struct ifreq for socket ioctl's
389 * @cmd: ioctl cmd to execute
390 *
391 * Note that this function is currently incompatible with the
392 * PHYCONTROL layer. It changes registers without regard to
393 * current state. Use at own risk.
394 */
395 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
396 {
397 struct mii_ioctl_data *mii_data = if_mii(ifr);
398 u16 val = mii_data->val_in;
399 bool change_autoneg = false;
400
401 switch (cmd) {
402 case SIOCGMIIPHY:
403 mii_data->phy_id = phydev->mdio.addr;
404 /* fall through */
405
406 case SIOCGMIIREG:
407 mii_data->val_out = mdiobus_read(phydev->mdio.bus,
408 mii_data->phy_id,
409 mii_data->reg_num);
410 return 0;
411
412 case SIOCSMIIREG:
413 if (mii_data->phy_id == phydev->mdio.addr) {
414 switch (mii_data->reg_num) {
415 case MII_BMCR:
416 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
417 if (phydev->autoneg == AUTONEG_ENABLE)
418 change_autoneg = true;
419 phydev->autoneg = AUTONEG_DISABLE;
420 if (val & BMCR_FULLDPLX)
421 phydev->duplex = DUPLEX_FULL;
422 else
423 phydev->duplex = DUPLEX_HALF;
424 if (val & BMCR_SPEED1000)
425 phydev->speed = SPEED_1000;
426 else if (val & BMCR_SPEED100)
427 phydev->speed = SPEED_100;
428 else phydev->speed = SPEED_10;
429 }
430 else {
431 if (phydev->autoneg == AUTONEG_DISABLE)
432 change_autoneg = true;
433 phydev->autoneg = AUTONEG_ENABLE;
434 }
435 break;
436 case MII_ADVERTISE:
437 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
438 change_autoneg = true;
439 break;
440 default:
441 /* do nothing */
442 break;
443 }
444 }
445
446 mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
447 mii_data->reg_num, val);
448
449 if (mii_data->phy_id == phydev->mdio.addr &&
450 mii_data->reg_num == MII_BMCR &&
451 val & BMCR_RESET)
452 return phy_init_hw(phydev);
453
454 if (change_autoneg)
455 return phy_start_aneg(phydev);
456
457 return 0;
458
459 case SIOCSHWTSTAMP:
460 if (phydev->drv && phydev->drv->hwtstamp)
461 return phydev->drv->hwtstamp(phydev, ifr);
462 /* fall through */
463
464 default:
465 return -EOPNOTSUPP;
466 }
467 }
468 EXPORT_SYMBOL(phy_mii_ioctl);
469
470 /**
471 * phy_start_aneg_priv - start auto-negotiation for this PHY device
472 * @phydev: the phy_device struct
473 * @sync: indicate whether we should wait for the workqueue cancelation
474 *
475 * Description: Sanitizes the settings (if we're not autonegotiating
476 * them), and then calls the driver's config_aneg function.
477 * If the PHYCONTROL Layer is operating, we change the state to
478 * reflect the beginning of Auto-negotiation or forcing.
479 */
480 static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
481 {
482 bool trigger = 0;
483 int err;
484
485 if (!phydev->drv)
486 return -EIO;
487
488 mutex_lock(&phydev->lock);
489
490 if (AUTONEG_DISABLE == phydev->autoneg)
491 phy_sanitize_settings(phydev);
492
493 /* Invalidate LP advertising flags */
494 phydev->lp_advertising = 0;
495
496 if (phydev->drv->config_aneg)
497 err = phydev->drv->config_aneg(phydev);
498 else
499 err = genphy_config_aneg(phydev);
500 if (err < 0)
501 goto out_unlock;
502
503 if (phydev->state != PHY_HALTED) {
504 if (AUTONEG_ENABLE == phydev->autoneg) {
505 phydev->state = PHY_AN;
506 phydev->link_timeout = PHY_AN_TIMEOUT;
507 } else {
508 phydev->state = PHY_FORCING;
509 phydev->link_timeout = PHY_FORCE_TIMEOUT;
510 }
511 }
512
513 /* Re-schedule a PHY state machine to check PHY status because
514 * negotiation may already be done and aneg interrupt may not be
515 * generated.
516 */
517 if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
518 err = phy_aneg_done(phydev);
519 if (err > 0) {
520 trigger = true;
521 err = 0;
522 }
523 }
524
525 out_unlock:
526 mutex_unlock(&phydev->lock);
527
528 if (trigger)
529 phy_trigger_machine(phydev, sync);
530
531 return err;
532 }
533
534 /**
535 * phy_start_aneg - start auto-negotiation for this PHY device
536 * @phydev: the phy_device struct
537 *
538 * Description: Sanitizes the settings (if we're not autonegotiating
539 * them), and then calls the driver's config_aneg function.
540 * If the PHYCONTROL Layer is operating, we change the state to
541 * reflect the beginning of Auto-negotiation or forcing.
542 */
543 int phy_start_aneg(struct phy_device *phydev)
544 {
545 return phy_start_aneg_priv(phydev, true);
546 }
547 EXPORT_SYMBOL(phy_start_aneg);
548
549 /**
550 * phy_start_machine - start PHY state machine tracking
551 * @phydev: the phy_device struct
552 *
553 * Description: The PHY infrastructure can run a state machine
554 * which tracks whether the PHY is starting up, negotiating,
555 * etc. This function starts the delayed workqueue which tracks
556 * the state of the PHY. If you want to maintain your own state machine,
557 * do not call this function.
558 */
559 void phy_start_machine(struct phy_device *phydev)
560 {
561 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
562 }
563 EXPORT_SYMBOL_GPL(phy_start_machine);
564
565 /**
566 * phy_trigger_machine - trigger the state machine to run
567 *
568 * @phydev: the phy_device struct
569 * @sync: indicate whether we should wait for the workqueue cancelation
570 *
571 * Description: There has been a change in state which requires that the
572 * state machine runs.
573 */
574
575 void phy_trigger_machine(struct phy_device *phydev, bool sync)
576 {
577 if (sync)
578 cancel_delayed_work_sync(&phydev->state_queue);
579 else
580 cancel_delayed_work(&phydev->state_queue);
581 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
582 }
583
584 /**
585 * phy_stop_machine - stop the PHY state machine tracking
586 * @phydev: target phy_device struct
587 *
588 * Description: Stops the state machine delayed workqueue, sets the
589 * state to UP (unless it wasn't up yet). This function must be
590 * called BEFORE phy_detach.
591 */
592 void phy_stop_machine(struct phy_device *phydev)
593 {
594 cancel_delayed_work_sync(&phydev->state_queue);
595
596 mutex_lock(&phydev->lock);
597 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
598 phydev->state = PHY_UP;
599 mutex_unlock(&phydev->lock);
600 }
601
602 /**
603 * phy_error - enter HALTED state for this PHY device
604 * @phydev: target phy_device struct
605 *
606 * Moves the PHY to the HALTED state in response to a read
607 * or write error, and tells the controller the link is down.
608 * Must not be called from interrupt context, or while the
609 * phydev->lock is held.
610 */
611 static void phy_error(struct phy_device *phydev)
612 {
613 mutex_lock(&phydev->lock);
614 phydev->state = PHY_HALTED;
615 mutex_unlock(&phydev->lock);
616
617 phy_trigger_machine(phydev, false);
618 }
619
620 /**
621 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
622 * @phydev: target phy_device struct
623 */
624 static int phy_disable_interrupts(struct phy_device *phydev)
625 {
626 int err;
627
628 /* Disable PHY interrupts */
629 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
630 if (err)
631 goto phy_err;
632
633 /* Clear the interrupt */
634 err = phy_clear_interrupt(phydev);
635 if (err)
636 goto phy_err;
637
638 return 0;
639
640 phy_err:
641 phy_error(phydev);
642
643 return err;
644 }
645
646 /**
647 * phy_change - Called by the phy_interrupt to handle PHY changes
648 * @phydev: phy_device struct that interrupted
649 */
650 static irqreturn_t phy_change(struct phy_device *phydev)
651 {
652 if (phy_interrupt_is_valid(phydev)) {
653 if (phydev->drv->did_interrupt &&
654 !phydev->drv->did_interrupt(phydev))
655 return IRQ_NONE;
656
657 if (phydev->state == PHY_HALTED)
658 if (phy_disable_interrupts(phydev))
659 goto phy_err;
660 }
661
662 mutex_lock(&phydev->lock);
663 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
664 phydev->state = PHY_CHANGELINK;
665 mutex_unlock(&phydev->lock);
666
667 /* reschedule state queue work to run as soon as possible */
668 phy_trigger_machine(phydev, true);
669
670 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
671 goto phy_err;
672 return IRQ_HANDLED;
673
674 phy_err:
675 phy_error(phydev);
676 return IRQ_NONE;
677 }
678
679 /**
680 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
681 * @work: work_struct that describes the work to be done
682 */
683 void phy_change_work(struct work_struct *work)
684 {
685 struct phy_device *phydev =
686 container_of(work, struct phy_device, phy_queue);
687
688 phy_change(phydev);
689 }
690
691 /**
692 * phy_interrupt - PHY interrupt handler
693 * @irq: interrupt line
694 * @phy_dat: phy_device pointer
695 *
696 * Description: When a PHY interrupt occurs, the handler disables
697 * interrupts, and uses phy_change to handle the interrupt.
698 */
699 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
700 {
701 struct phy_device *phydev = phy_dat;
702
703 if (PHY_HALTED == phydev->state)
704 return IRQ_NONE; /* It can't be ours. */
705
706 return phy_change(phydev);
707 }
708
709 /**
710 * phy_enable_interrupts - Enable the interrupts from the PHY side
711 * @phydev: target phy_device struct
712 */
713 static int phy_enable_interrupts(struct phy_device *phydev)
714 {
715 int err = phy_clear_interrupt(phydev);
716
717 if (err < 0)
718 return err;
719
720 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
721 }
722
723 /**
724 * phy_start_interrupts - request and enable interrupts for a PHY device
725 * @phydev: target phy_device struct
726 *
727 * Description: Request the interrupt for the given PHY.
728 * If this fails, then we set irq to PHY_POLL.
729 * Otherwise, we enable the interrupts in the PHY.
730 * This should only be called with a valid IRQ number.
731 * Returns 0 on success or < 0 on error.
732 */
733 int phy_start_interrupts(struct phy_device *phydev)
734 {
735 if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
736 IRQF_ONESHOT | IRQF_SHARED,
737 phydev_name(phydev), phydev) < 0) {
738 pr_warn("%s: Can't get IRQ %d (PHY)\n",
739 phydev->mdio.bus->name, phydev->irq);
740 phydev->irq = PHY_POLL;
741 return 0;
742 }
743
744 return phy_enable_interrupts(phydev);
745 }
746 EXPORT_SYMBOL(phy_start_interrupts);
747
748 /**
749 * phy_stop_interrupts - disable interrupts from a PHY device
750 * @phydev: target phy_device struct
751 */
752 int phy_stop_interrupts(struct phy_device *phydev)
753 {
754 int err = phy_disable_interrupts(phydev);
755
756 if (err)
757 phy_error(phydev);
758
759 free_irq(phydev->irq, phydev);
760
761 return err;
762 }
763 EXPORT_SYMBOL(phy_stop_interrupts);
764
765 /**
766 * phy_stop - Bring down the PHY link, and stop checking the status
767 * @phydev: target phy_device struct
768 */
769 void phy_stop(struct phy_device *phydev)
770 {
771 mutex_lock(&phydev->lock);
772
773 if (PHY_HALTED == phydev->state)
774 goto out_unlock;
775
776 if (phy_interrupt_is_valid(phydev)) {
777 /* Disable PHY Interrupts */
778 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
779
780 /* Clear any pending interrupts */
781 phy_clear_interrupt(phydev);
782 }
783
784 phydev->state = PHY_HALTED;
785
786 out_unlock:
787 mutex_unlock(&phydev->lock);
788
789 /* Cannot call flush_scheduled_work() here as desired because
790 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
791 * will not reenable interrupts.
792 */
793 }
794 EXPORT_SYMBOL(phy_stop);
795
796 /**
797 * phy_start - start or restart a PHY device
798 * @phydev: target phy_device struct
799 *
800 * Description: Indicates the attached device's readiness to
801 * handle PHY-related work. Used during startup to start the
802 * PHY, and after a call to phy_stop() to resume operation.
803 * Also used to indicate the MDIO bus has cleared an error
804 * condition.
805 */
806 void phy_start(struct phy_device *phydev)
807 {
808 int err = 0;
809
810 mutex_lock(&phydev->lock);
811
812 switch (phydev->state) {
813 case PHY_STARTING:
814 phydev->state = PHY_PENDING;
815 break;
816 case PHY_READY:
817 phydev->state = PHY_UP;
818 break;
819 case PHY_HALTED:
820 /* if phy was suspended, bring the physical link up again */
821 __phy_resume(phydev);
822
823 /* make sure interrupts are re-enabled for the PHY */
824 if (phy_interrupt_is_valid(phydev)) {
825 err = phy_enable_interrupts(phydev);
826 if (err < 0)
827 break;
828 }
829
830 phydev->state = PHY_RESUMING;
831 break;
832 default:
833 break;
834 }
835 mutex_unlock(&phydev->lock);
836
837 phy_trigger_machine(phydev, true);
838 }
839 EXPORT_SYMBOL(phy_start);
840
841 static void phy_link_up(struct phy_device *phydev)
842 {
843 phydev->phy_link_change(phydev, true, true);
844 phy_led_trigger_change_speed(phydev);
845 }
846
847 static void phy_link_down(struct phy_device *phydev, bool do_carrier)
848 {
849 phydev->phy_link_change(phydev, false, do_carrier);
850 phy_led_trigger_change_speed(phydev);
851 }
852
853 /**
854 * phy_state_machine - Handle the state machine
855 * @work: work_struct that describes the work to be done
856 */
857 void phy_state_machine(struct work_struct *work)
858 {
859 struct delayed_work *dwork = to_delayed_work(work);
860 struct phy_device *phydev =
861 container_of(dwork, struct phy_device, state_queue);
862 bool needs_aneg = false, do_suspend = false;
863 enum phy_state old_state;
864 int err = 0;
865 int old_link;
866
867 mutex_lock(&phydev->lock);
868
869 old_state = phydev->state;
870
871 if (phydev->drv && phydev->drv->link_change_notify)
872 phydev->drv->link_change_notify(phydev);
873
874 switch (phydev->state) {
875 case PHY_DOWN:
876 case PHY_STARTING:
877 case PHY_READY:
878 case PHY_PENDING:
879 break;
880 case PHY_UP:
881 needs_aneg = true;
882
883 phydev->link_timeout = PHY_AN_TIMEOUT;
884
885 break;
886 case PHY_AN:
887 err = phy_read_status(phydev);
888 if (err < 0)
889 break;
890
891 /* If the link is down, give up on negotiation for now */
892 if (!phydev->link) {
893 phydev->state = PHY_NOLINK;
894 phy_link_down(phydev, true);
895 break;
896 }
897
898 /* Check if negotiation is done. Break if there's an error */
899 err = phy_aneg_done(phydev);
900 if (err < 0)
901 break;
902
903 /* If AN is done, we're running */
904 if (err > 0) {
905 phydev->state = PHY_RUNNING;
906 phy_link_up(phydev);
907 } else if (0 == phydev->link_timeout--)
908 needs_aneg = true;
909 break;
910 case PHY_NOLINK:
911 if (phy_interrupt_is_valid(phydev))
912 break;
913
914 err = phy_read_status(phydev);
915 if (err)
916 break;
917
918 if (phydev->link) {
919 if (AUTONEG_ENABLE == phydev->autoneg) {
920 err = phy_aneg_done(phydev);
921 if (err < 0)
922 break;
923
924 if (!err) {
925 phydev->state = PHY_AN;
926 phydev->link_timeout = PHY_AN_TIMEOUT;
927 break;
928 }
929 }
930 phydev->state = PHY_RUNNING;
931 phy_link_up(phydev);
932 }
933 break;
934 case PHY_FORCING:
935 err = genphy_update_link(phydev);
936 if (err)
937 break;
938
939 if (phydev->link) {
940 phydev->state = PHY_RUNNING;
941 phy_link_up(phydev);
942 } else {
943 if (0 == phydev->link_timeout--)
944 needs_aneg = true;
945 phy_link_down(phydev, false);
946 }
947 break;
948 case PHY_RUNNING:
949 /* Only register a CHANGE if we are polling and link changed
950 * since latest checking.
951 */
952 if (phydev->irq == PHY_POLL) {
953 old_link = phydev->link;
954 err = phy_read_status(phydev);
955 if (err)
956 break;
957
958 if (old_link != phydev->link)
959 phydev->state = PHY_CHANGELINK;
960 }
961 /*
962 * Failsafe: check that nobody set phydev->link=0 between two
963 * poll cycles, otherwise we won't leave RUNNING state as long
964 * as link remains down.
965 */
966 if (!phydev->link && phydev->state == PHY_RUNNING) {
967 phydev->state = PHY_CHANGELINK;
968 phydev_err(phydev, "no link in PHY_RUNNING\n");
969 }
970 break;
971 case PHY_CHANGELINK:
972 err = phy_read_status(phydev);
973 if (err)
974 break;
975
976 if (phydev->link) {
977 phydev->state = PHY_RUNNING;
978 phy_link_up(phydev);
979 } else {
980 phydev->state = PHY_NOLINK;
981 phy_link_down(phydev, true);
982 }
983 break;
984 case PHY_HALTED:
985 if (phydev->link) {
986 phydev->link = 0;
987 phy_link_down(phydev, true);
988 do_suspend = true;
989 }
990 break;
991 case PHY_RESUMING:
992 if (AUTONEG_ENABLE == phydev->autoneg) {
993 err = phy_aneg_done(phydev);
994 if (err < 0)
995 break;
996
997 /* err > 0 if AN is done.
998 * Otherwise, it's 0, and we're still waiting for AN
999 */
1000 if (err > 0) {
1001 err = phy_read_status(phydev);
1002 if (err)
1003 break;
1004
1005 if (phydev->link) {
1006 phydev->state = PHY_RUNNING;
1007 phy_link_up(phydev);
1008 } else {
1009 phydev->state = PHY_NOLINK;
1010 phy_link_down(phydev, false);
1011 }
1012 } else {
1013 phydev->state = PHY_AN;
1014 phydev->link_timeout = PHY_AN_TIMEOUT;
1015 }
1016 } else {
1017 err = phy_read_status(phydev);
1018 if (err)
1019 break;
1020
1021 if (phydev->link) {
1022 phydev->state = PHY_RUNNING;
1023 phy_link_up(phydev);
1024 } else {
1025 phydev->state = PHY_NOLINK;
1026 phy_link_down(phydev, false);
1027 }
1028 }
1029 break;
1030 }
1031
1032 mutex_unlock(&phydev->lock);
1033
1034 if (needs_aneg)
1035 err = phy_start_aneg_priv(phydev, false);
1036 else if (do_suspend)
1037 phy_suspend(phydev);
1038
1039 if (err < 0)
1040 phy_error(phydev);
1041
1042 if (old_state != phydev->state)
1043 phydev_dbg(phydev, "PHY state change %s -> %s\n",
1044 phy_state_to_str(old_state),
1045 phy_state_to_str(phydev->state));
1046
1047 /* Only re-schedule a PHY state machine change if we are polling the
1048 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
1049 * between states from phy_mac_interrupt()
1050 */
1051 if (phydev->irq == PHY_POLL)
1052 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
1053 PHY_STATE_TIME * HZ);
1054 }
1055
1056 /**
1057 * phy_mac_interrupt - MAC says the link has changed
1058 * @phydev: phy_device struct with changed link
1059 *
1060 * The MAC layer is able to indicate there has been a change in the PHY link
1061 * status. Trigger the state machine and work a work queue.
1062 */
1063 void phy_mac_interrupt(struct phy_device *phydev)
1064 {
1065 /* Trigger a state machine change */
1066 queue_work(system_power_efficient_wq, &phydev->phy_queue);
1067 }
1068 EXPORT_SYMBOL(phy_mac_interrupt);
1069
1070 /**
1071 * phy_init_eee - init and check the EEE feature
1072 * @phydev: target phy_device struct
1073 * @clk_stop_enable: PHY may stop the clock during LPI
1074 *
1075 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1076 * is supported by looking at the MMD registers 3.20 and 7.60/61
1077 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1078 * bit if required.
1079 */
1080 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1081 {
1082 if (!phydev->drv)
1083 return -EIO;
1084
1085 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1086 */
1087 if (phydev->duplex == DUPLEX_FULL) {
1088 int eee_lp, eee_cap, eee_adv;
1089 u32 lp, cap, adv;
1090 int status;
1091
1092 /* Read phy status to properly get the right settings */
1093 status = phy_read_status(phydev);
1094 if (status)
1095 return status;
1096
1097 /* First check if the EEE ability is supported */
1098 eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1099 if (eee_cap <= 0)
1100 goto eee_exit_err;
1101
1102 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1103 if (!cap)
1104 goto eee_exit_err;
1105
1106 /* Check which link settings negotiated and verify it in
1107 * the EEE advertising registers.
1108 */
1109 eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1110 if (eee_lp <= 0)
1111 goto eee_exit_err;
1112
1113 eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1114 if (eee_adv <= 0)
1115 goto eee_exit_err;
1116
1117 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1118 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1119 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1120 goto eee_exit_err;
1121
1122 if (clk_stop_enable) {
1123 /* Configure the PHY to stop receiving xMII
1124 * clock while it is signaling LPI.
1125 */
1126 int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1127 if (val < 0)
1128 return val;
1129
1130 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1131 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
1132 }
1133
1134 return 0; /* EEE supported */
1135 }
1136 eee_exit_err:
1137 return -EPROTONOSUPPORT;
1138 }
1139 EXPORT_SYMBOL(phy_init_eee);
1140
1141 /**
1142 * phy_get_eee_err - report the EEE wake error count
1143 * @phydev: target phy_device struct
1144 *
1145 * Description: it is to report the number of time where the PHY
1146 * failed to complete its normal wake sequence.
1147 */
1148 int phy_get_eee_err(struct phy_device *phydev)
1149 {
1150 if (!phydev->drv)
1151 return -EIO;
1152
1153 return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
1154 }
1155 EXPORT_SYMBOL(phy_get_eee_err);
1156
1157 /**
1158 * phy_ethtool_get_eee - get EEE supported and status
1159 * @phydev: target phy_device struct
1160 * @data: ethtool_eee data
1161 *
1162 * Description: it reportes the Supported/Advertisement/LP Advertisement
1163 * capabilities.
1164 */
1165 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1166 {
1167 int val;
1168
1169 if (!phydev->drv)
1170 return -EIO;
1171
1172 /* Get Supported EEE */
1173 val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1174 if (val < 0)
1175 return val;
1176 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1177
1178 /* Get advertisement EEE */
1179 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1180 if (val < 0)
1181 return val;
1182 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1183
1184 /* Get LP advertisement EEE */
1185 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1186 if (val < 0)
1187 return val;
1188 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1189
1190 return 0;
1191 }
1192 EXPORT_SYMBOL(phy_ethtool_get_eee);
1193
1194 /**
1195 * phy_ethtool_set_eee - set EEE supported and status
1196 * @phydev: target phy_device struct
1197 * @data: ethtool_eee data
1198 *
1199 * Description: it is to program the Advertisement EEE register.
1200 */
1201 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1202 {
1203 int cap, old_adv, adv, ret;
1204
1205 if (!phydev->drv)
1206 return -EIO;
1207
1208 /* Get Supported EEE */
1209 cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1210 if (cap < 0)
1211 return cap;
1212
1213 old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1214 if (old_adv < 0)
1215 return old_adv;
1216
1217 adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
1218
1219 /* Mask prohibited EEE modes */
1220 adv &= ~phydev->eee_broken_modes;
1221
1222 if (old_adv != adv) {
1223 ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
1224 if (ret < 0)
1225 return ret;
1226
1227 /* Restart autonegotiation so the new modes get sent to the
1228 * link partner.
1229 */
1230 ret = phy_restart_aneg(phydev);
1231 if (ret < 0)
1232 return ret;
1233 }
1234
1235 return 0;
1236 }
1237 EXPORT_SYMBOL(phy_ethtool_set_eee);
1238
1239 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1240 {
1241 if (phydev->drv && phydev->drv->set_wol)
1242 return phydev->drv->set_wol(phydev, wol);
1243
1244 return -EOPNOTSUPP;
1245 }
1246 EXPORT_SYMBOL(phy_ethtool_set_wol);
1247
1248 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1249 {
1250 if (phydev->drv && phydev->drv->get_wol)
1251 phydev->drv->get_wol(phydev, wol);
1252 }
1253 EXPORT_SYMBOL(phy_ethtool_get_wol);
1254
1255 int phy_ethtool_get_link_ksettings(struct net_device *ndev,
1256 struct ethtool_link_ksettings *cmd)
1257 {
1258 struct phy_device *phydev = ndev->phydev;
1259
1260 if (!phydev)
1261 return -ENODEV;
1262
1263 phy_ethtool_ksettings_get(phydev, cmd);
1264
1265 return 0;
1266 }
1267 EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
1268
1269 int phy_ethtool_set_link_ksettings(struct net_device *ndev,
1270 const struct ethtool_link_ksettings *cmd)
1271 {
1272 struct phy_device *phydev = ndev->phydev;
1273
1274 if (!phydev)
1275 return -ENODEV;
1276
1277 return phy_ethtool_ksettings_set(phydev, cmd);
1278 }
1279 EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
1280
1281 int phy_ethtool_nway_reset(struct net_device *ndev)
1282 {
1283 struct phy_device *phydev = ndev->phydev;
1284
1285 if (!phydev)
1286 return -ENODEV;
1287
1288 if (!phydev->drv)
1289 return -EIO;
1290
1291 return phy_restart_aneg(phydev);
1292 }
1293 EXPORT_SYMBOL(phy_ethtool_nway_reset);