]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/phy/phy.c
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / net / phy / phy.c
1 /* Framework for configuring and reading PHY devices
2 * Based on code in sungem_phy.c and gianfar_phy.c
3 *
4 * Author: Andy Fleming
5 *
6 * Copyright (c) 2004 Freescale Semiconductor, Inc.
7 * Copyright (c) 2006, 2007 Maciej W. Rozycki
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/phy_led_triggers.h>
33 #include <linux/timer.h>
34 #include <linux/workqueue.h>
35 #include <linux/mdio.h>
36 #include <linux/io.h>
37 #include <linux/uaccess.h>
38 #include <linux/atomic.h>
39
40 #include <asm/irq.h>
41
42 static const char *phy_speed_to_str(int speed)
43 {
44 switch (speed) {
45 case SPEED_10:
46 return "10Mbps";
47 case SPEED_100:
48 return "100Mbps";
49 case SPEED_1000:
50 return "1Gbps";
51 case SPEED_2500:
52 return "2.5Gbps";
53 case SPEED_5000:
54 return "5Gbps";
55 case SPEED_10000:
56 return "10Gbps";
57 case SPEED_14000:
58 return "14Gbps";
59 case SPEED_20000:
60 return "20Gbps";
61 case SPEED_25000:
62 return "25Gbps";
63 case SPEED_40000:
64 return "40Gbps";
65 case SPEED_50000:
66 return "50Gbps";
67 case SPEED_56000:
68 return "56Gbps";
69 case SPEED_100000:
70 return "100Gbps";
71 case SPEED_UNKNOWN:
72 return "Unknown";
73 default:
74 return "Unsupported (update phy.c)";
75 }
76 }
77
78 #define PHY_STATE_STR(_state) \
79 case PHY_##_state: \
80 return __stringify(_state); \
81
82 static const char *phy_state_to_str(enum phy_state st)
83 {
84 switch (st) {
85 PHY_STATE_STR(DOWN)
86 PHY_STATE_STR(STARTING)
87 PHY_STATE_STR(READY)
88 PHY_STATE_STR(PENDING)
89 PHY_STATE_STR(UP)
90 PHY_STATE_STR(AN)
91 PHY_STATE_STR(RUNNING)
92 PHY_STATE_STR(NOLINK)
93 PHY_STATE_STR(FORCING)
94 PHY_STATE_STR(CHANGELINK)
95 PHY_STATE_STR(HALTED)
96 PHY_STATE_STR(RESUMING)
97 }
98
99 return NULL;
100 }
101
102
103 /**
104 * phy_print_status - Convenience function to print out the current phy status
105 * @phydev: the phy_device struct
106 */
107 void phy_print_status(struct phy_device *phydev)
108 {
109 if (phydev->link) {
110 netdev_info(phydev->attached_dev,
111 "Link is Up - %s/%s - flow control %s\n",
112 phy_speed_to_str(phydev->speed),
113 DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
114 phydev->pause ? "rx/tx" : "off");
115 } else {
116 netdev_info(phydev->attached_dev, "Link is Down\n");
117 }
118 }
119 EXPORT_SYMBOL(phy_print_status);
120
121 /**
122 * phy_clear_interrupt - Ack the phy device's interrupt
123 * @phydev: the phy_device struct
124 *
125 * If the @phydev driver has an ack_interrupt function, call it to
126 * ack and clear the phy device's interrupt.
127 *
128 * Returns 0 on success or < 0 on error.
129 */
130 static int phy_clear_interrupt(struct phy_device *phydev)
131 {
132 if (phydev->drv->ack_interrupt)
133 return phydev->drv->ack_interrupt(phydev);
134
135 return 0;
136 }
137
138 /**
139 * phy_config_interrupt - configure the PHY device for the requested interrupts
140 * @phydev: the phy_device struct
141 * @interrupts: interrupt flags to configure for this @phydev
142 *
143 * Returns 0 on success or < 0 on error.
144 */
145 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
146 {
147 phydev->interrupts = interrupts;
148 if (phydev->drv->config_intr)
149 return phydev->drv->config_intr(phydev);
150
151 return 0;
152 }
153
154 /**
155 * phy_restart_aneg - restart auto-negotiation
156 * @phydev: target phy_device struct
157 *
158 * Restart the autonegotiation on @phydev. Returns >= 0 on success or
159 * negative errno on error.
160 */
161 int phy_restart_aneg(struct phy_device *phydev)
162 {
163 int ret;
164
165 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
166 ret = genphy_c45_restart_aneg(phydev);
167 else
168 ret = genphy_restart_aneg(phydev);
169
170 return ret;
171 }
172 EXPORT_SYMBOL_GPL(phy_restart_aneg);
173
174 /**
175 * phy_aneg_done - return auto-negotiation status
176 * @phydev: target phy_device struct
177 *
178 * Description: Return the auto-negotiation status from this @phydev
179 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
180 * is still pending.
181 */
182 int phy_aneg_done(struct phy_device *phydev)
183 {
184 if (phydev->drv && phydev->drv->aneg_done)
185 return phydev->drv->aneg_done(phydev);
186
187 /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
188 * implement Clause 22 registers
189 */
190 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
191 return -EINVAL;
192
193 return genphy_aneg_done(phydev);
194 }
195 EXPORT_SYMBOL(phy_aneg_done);
196
197 /* A structure for mapping a particular speed and duplex
198 * combination to a particular SUPPORTED and ADVERTISED value
199 */
200 struct phy_setting {
201 int speed;
202 int duplex;
203 u32 setting;
204 };
205
206 /* A mapping of all SUPPORTED settings to speed/duplex. This table
207 * must be grouped by speed and sorted in descending match priority
208 * - iow, descending speed. */
209 static const struct phy_setting settings[] = {
210 {
211 .speed = SPEED_10000,
212 .duplex = DUPLEX_FULL,
213 .setting = SUPPORTED_10000baseKR_Full,
214 },
215 {
216 .speed = SPEED_10000,
217 .duplex = DUPLEX_FULL,
218 .setting = SUPPORTED_10000baseKX4_Full,
219 },
220 {
221 .speed = SPEED_10000,
222 .duplex = DUPLEX_FULL,
223 .setting = SUPPORTED_10000baseT_Full,
224 },
225 {
226 .speed = SPEED_2500,
227 .duplex = DUPLEX_FULL,
228 .setting = SUPPORTED_2500baseX_Full,
229 },
230 {
231 .speed = SPEED_1000,
232 .duplex = DUPLEX_FULL,
233 .setting = SUPPORTED_1000baseKX_Full,
234 },
235 {
236 .speed = SPEED_1000,
237 .duplex = DUPLEX_FULL,
238 .setting = SUPPORTED_1000baseT_Full,
239 },
240 {
241 .speed = SPEED_1000,
242 .duplex = DUPLEX_HALF,
243 .setting = SUPPORTED_1000baseT_Half,
244 },
245 {
246 .speed = SPEED_100,
247 .duplex = DUPLEX_FULL,
248 .setting = SUPPORTED_100baseT_Full,
249 },
250 {
251 .speed = SPEED_100,
252 .duplex = DUPLEX_HALF,
253 .setting = SUPPORTED_100baseT_Half,
254 },
255 {
256 .speed = SPEED_10,
257 .duplex = DUPLEX_FULL,
258 .setting = SUPPORTED_10baseT_Full,
259 },
260 {
261 .speed = SPEED_10,
262 .duplex = DUPLEX_HALF,
263 .setting = SUPPORTED_10baseT_Half,
264 },
265 };
266
267 /**
268 * phy_lookup_setting - lookup a PHY setting
269 * @speed: speed to match
270 * @duplex: duplex to match
271 * @features: allowed link modes
272 * @exact: an exact match is required
273 *
274 * Search the settings array for a setting that matches the speed and
275 * duplex, and which is supported.
276 *
277 * If @exact is unset, either an exact match or %NULL for no match will
278 * be returned.
279 *
280 * If @exact is set, an exact match, the fastest supported setting at
281 * or below the specified speed, the slowest supported setting, or if
282 * they all fail, %NULL will be returned.
283 */
284 static const struct phy_setting *
285 phy_lookup_setting(int speed, int duplex, u32 features, bool exact)
286 {
287 const struct phy_setting *p, *match = NULL, *last = NULL;
288 int i;
289
290 for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
291 if (p->setting & features) {
292 last = p;
293 if (p->speed == speed && p->duplex == duplex) {
294 /* Exact match for speed and duplex */
295 match = p;
296 break;
297 } else if (!exact) {
298 if (!match && p->speed <= speed)
299 /* Candidate */
300 match = p;
301
302 if (p->speed < speed)
303 break;
304 }
305 }
306 }
307
308 if (!match && !exact)
309 match = last;
310
311 return match;
312 }
313
314 /**
315 * phy_find_valid - find a PHY setting that matches the requested parameters
316 * @speed: desired speed
317 * @duplex: desired duplex
318 * @supported: mask of supported link modes
319 *
320 * Locate a supported phy setting that is, in priority order:
321 * - an exact match for the specified speed and duplex mode
322 * - a match for the specified speed, or slower speed
323 * - the slowest supported speed
324 * Returns the matched phy_setting entry, or %NULL if no supported phy
325 * settings were found.
326 */
327 static const struct phy_setting *
328 phy_find_valid(int speed, int duplex, u32 supported)
329 {
330 return phy_lookup_setting(speed, duplex, supported, false);
331 }
332
333 /**
334 * phy_supported_speeds - return all speeds currently supported by a phy device
335 * @phy: The phy device to return supported speeds of.
336 * @speeds: buffer to store supported speeds in.
337 * @size: size of speeds buffer.
338 *
339 * Description: Returns the number of supported speeds, and fills the speeds
340 * buffer with the supported speeds. If speeds buffer is too small to contain
341 * all currently supported speeds, will return as many speeds as can fit.
342 */
343 unsigned int phy_supported_speeds(struct phy_device *phy,
344 unsigned int *speeds,
345 unsigned int size)
346 {
347 unsigned int count = 0;
348 unsigned int idx = 0;
349
350 for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++)
351 /* Assumes settings are grouped by speed */
352 if ((settings[idx].setting & phy->supported) &&
353 (count == 0 || speeds[count - 1] != settings[idx].speed))
354 speeds[count++] = settings[idx].speed;
355
356 return count;
357 }
358
359 /**
360 * phy_check_valid - check if there is a valid PHY setting which matches
361 * speed, duplex, and feature mask
362 * @speed: speed to match
363 * @duplex: duplex to match
364 * @features: A mask of the valid settings
365 *
366 * Description: Returns true if there is a valid setting, false otherwise.
367 */
368 static inline bool phy_check_valid(int speed, int duplex, u32 features)
369 {
370 return !!phy_lookup_setting(speed, duplex, features, true);
371 }
372
373 /**
374 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
375 * @phydev: the target phy_device struct
376 *
377 * Description: Make sure the PHY is set to supported speeds and
378 * duplexes. Drop down by one in this order: 1000/FULL,
379 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
380 */
381 static void phy_sanitize_settings(struct phy_device *phydev)
382 {
383 const struct phy_setting *setting;
384 u32 features = phydev->supported;
385
386 /* Sanitize settings based on PHY capabilities */
387 if ((features & SUPPORTED_Autoneg) == 0)
388 phydev->autoneg = AUTONEG_DISABLE;
389
390 setting = phy_find_valid(phydev->speed, phydev->duplex, features);
391 if (setting) {
392 phydev->speed = setting->speed;
393 phydev->duplex = setting->duplex;
394 } else {
395 /* We failed to find anything (no supported speeds?) */
396 phydev->speed = SPEED_UNKNOWN;
397 phydev->duplex = DUPLEX_UNKNOWN;
398 }
399 }
400
401 /**
402 * phy_ethtool_sset - generic ethtool sset function, handles all the details
403 * @phydev: target phy_device struct
404 * @cmd: ethtool_cmd
405 *
406 * A few notes about parameter checking:
407 *
408 * - We don't set port or transceiver, so we don't care what they
409 * were set to.
410 * - phy_start_aneg() will make sure forced settings are sane, and
411 * choose the next best ones from the ones selected, so we don't
412 * care if ethtool tries to give us bad values.
413 */
414 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
415 {
416 u32 speed = ethtool_cmd_speed(cmd);
417
418 if (cmd->phy_address != phydev->mdio.addr)
419 return -EINVAL;
420
421 /* We make sure that we don't pass unsupported values in to the PHY */
422 cmd->advertising &= phydev->supported;
423
424 /* Verify the settings we care about. */
425 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
426 return -EINVAL;
427
428 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
429 return -EINVAL;
430
431 if (cmd->autoneg == AUTONEG_DISABLE &&
432 ((speed != SPEED_1000 &&
433 speed != SPEED_100 &&
434 speed != SPEED_10) ||
435 (cmd->duplex != DUPLEX_HALF &&
436 cmd->duplex != DUPLEX_FULL)))
437 return -EINVAL;
438
439 phydev->autoneg = cmd->autoneg;
440
441 phydev->speed = speed;
442
443 phydev->advertising = cmd->advertising;
444
445 if (AUTONEG_ENABLE == cmd->autoneg)
446 phydev->advertising |= ADVERTISED_Autoneg;
447 else
448 phydev->advertising &= ~ADVERTISED_Autoneg;
449
450 phydev->duplex = cmd->duplex;
451
452 phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;
453
454 /* Restart the PHY */
455 phy_start_aneg(phydev);
456
457 return 0;
458 }
459 EXPORT_SYMBOL(phy_ethtool_sset);
460
461 int phy_ethtool_ksettings_set(struct phy_device *phydev,
462 const struct ethtool_link_ksettings *cmd)
463 {
464 u8 autoneg = cmd->base.autoneg;
465 u8 duplex = cmd->base.duplex;
466 u32 speed = cmd->base.speed;
467 u32 advertising;
468
469 if (cmd->base.phy_address != phydev->mdio.addr)
470 return -EINVAL;
471
472 ethtool_convert_link_mode_to_legacy_u32(&advertising,
473 cmd->link_modes.advertising);
474
475 /* We make sure that we don't pass unsupported values in to the PHY */
476 advertising &= phydev->supported;
477
478 /* Verify the settings we care about. */
479 if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
480 return -EINVAL;
481
482 if (autoneg == AUTONEG_ENABLE && advertising == 0)
483 return -EINVAL;
484
485 if (autoneg == AUTONEG_DISABLE &&
486 ((speed != SPEED_1000 &&
487 speed != SPEED_100 &&
488 speed != SPEED_10) ||
489 (duplex != DUPLEX_HALF &&
490 duplex != DUPLEX_FULL)))
491 return -EINVAL;
492
493 phydev->autoneg = autoneg;
494
495 phydev->speed = speed;
496
497 phydev->advertising = advertising;
498
499 if (autoneg == AUTONEG_ENABLE)
500 phydev->advertising |= ADVERTISED_Autoneg;
501 else
502 phydev->advertising &= ~ADVERTISED_Autoneg;
503
504 phydev->duplex = duplex;
505
506 phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
507
508 /* Restart the PHY */
509 phy_start_aneg(phydev);
510
511 return 0;
512 }
513 EXPORT_SYMBOL(phy_ethtool_ksettings_set);
514
515 void phy_ethtool_ksettings_get(struct phy_device *phydev,
516 struct ethtool_link_ksettings *cmd)
517 {
518 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
519 phydev->supported);
520
521 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
522 phydev->advertising);
523
524 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
525 phydev->lp_advertising);
526
527 cmd->base.speed = phydev->speed;
528 cmd->base.duplex = phydev->duplex;
529 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
530 cmd->base.port = PORT_BNC;
531 else
532 cmd->base.port = PORT_MII;
533
534 cmd->base.phy_address = phydev->mdio.addr;
535 cmd->base.autoneg = phydev->autoneg;
536 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
537 cmd->base.eth_tp_mdix = phydev->mdix;
538 }
539 EXPORT_SYMBOL(phy_ethtool_ksettings_get);
540
541 /**
542 * phy_mii_ioctl - generic PHY MII ioctl interface
543 * @phydev: the phy_device struct
544 * @ifr: &struct ifreq for socket ioctl's
545 * @cmd: ioctl cmd to execute
546 *
547 * Note that this function is currently incompatible with the
548 * PHYCONTROL layer. It changes registers without regard to
549 * current state. Use at own risk.
550 */
551 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
552 {
553 struct mii_ioctl_data *mii_data = if_mii(ifr);
554 u16 val = mii_data->val_in;
555 bool change_autoneg = false;
556
557 switch (cmd) {
558 case SIOCGMIIPHY:
559 mii_data->phy_id = phydev->mdio.addr;
560 /* fall through */
561
562 case SIOCGMIIREG:
563 mii_data->val_out = mdiobus_read(phydev->mdio.bus,
564 mii_data->phy_id,
565 mii_data->reg_num);
566 return 0;
567
568 case SIOCSMIIREG:
569 if (mii_data->phy_id == phydev->mdio.addr) {
570 switch (mii_data->reg_num) {
571 case MII_BMCR:
572 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
573 if (phydev->autoneg == AUTONEG_ENABLE)
574 change_autoneg = true;
575 phydev->autoneg = AUTONEG_DISABLE;
576 if (val & BMCR_FULLDPLX)
577 phydev->duplex = DUPLEX_FULL;
578 else
579 phydev->duplex = DUPLEX_HALF;
580 if (val & BMCR_SPEED1000)
581 phydev->speed = SPEED_1000;
582 else if (val & BMCR_SPEED100)
583 phydev->speed = SPEED_100;
584 else phydev->speed = SPEED_10;
585 }
586 else {
587 if (phydev->autoneg == AUTONEG_DISABLE)
588 change_autoneg = true;
589 phydev->autoneg = AUTONEG_ENABLE;
590 }
591 break;
592 case MII_ADVERTISE:
593 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
594 change_autoneg = true;
595 break;
596 default:
597 /* do nothing */
598 break;
599 }
600 }
601
602 mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
603 mii_data->reg_num, val);
604
605 if (mii_data->phy_id == phydev->mdio.addr &&
606 mii_data->reg_num == MII_BMCR &&
607 val & BMCR_RESET)
608 return phy_init_hw(phydev);
609
610 if (change_autoneg)
611 return phy_start_aneg(phydev);
612
613 return 0;
614
615 case SIOCSHWTSTAMP:
616 if (phydev->drv && phydev->drv->hwtstamp)
617 return phydev->drv->hwtstamp(phydev, ifr);
618 /* fall through */
619
620 default:
621 return -EOPNOTSUPP;
622 }
623 }
624 EXPORT_SYMBOL(phy_mii_ioctl);
625
626 /**
627 * phy_start_aneg_priv - start auto-negotiation for this PHY device
628 * @phydev: the phy_device struct
629 * @sync: indicate whether we should wait for the workqueue cancelation
630 *
631 * Description: Sanitizes the settings (if we're not autonegotiating
632 * them), and then calls the driver's config_aneg function.
633 * If the PHYCONTROL Layer is operating, we change the state to
634 * reflect the beginning of Auto-negotiation or forcing.
635 */
636 static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
637 {
638 bool trigger = 0;
639 int err;
640
641 if (!phydev->drv)
642 return -EIO;
643
644 mutex_lock(&phydev->lock);
645
646 if (AUTONEG_DISABLE == phydev->autoneg)
647 phy_sanitize_settings(phydev);
648
649 /* Invalidate LP advertising flags */
650 phydev->lp_advertising = 0;
651
652 err = phydev->drv->config_aneg(phydev);
653 if (err < 0)
654 goto out_unlock;
655
656 if (phydev->state != PHY_HALTED) {
657 if (AUTONEG_ENABLE == phydev->autoneg) {
658 phydev->state = PHY_AN;
659 phydev->link_timeout = PHY_AN_TIMEOUT;
660 } else {
661 phydev->state = PHY_FORCING;
662 phydev->link_timeout = PHY_FORCE_TIMEOUT;
663 }
664 }
665
666 /* Re-schedule a PHY state machine to check PHY status because
667 * negotiation may already be done and aneg interrupt may not be
668 * generated.
669 */
670 if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
671 err = phy_aneg_done(phydev);
672 if (err > 0) {
673 trigger = true;
674 err = 0;
675 }
676 }
677
678 out_unlock:
679 mutex_unlock(&phydev->lock);
680
681 if (trigger)
682 phy_trigger_machine(phydev, sync);
683
684 return err;
685 }
686
687 /**
688 * phy_start_aneg - start auto-negotiation for this PHY device
689 * @phydev: the phy_device struct
690 *
691 * Description: Sanitizes the settings (if we're not autonegotiating
692 * them), and then calls the driver's config_aneg function.
693 * If the PHYCONTROL Layer is operating, we change the state to
694 * reflect the beginning of Auto-negotiation or forcing.
695 */
696 int phy_start_aneg(struct phy_device *phydev)
697 {
698 return phy_start_aneg_priv(phydev, true);
699 }
700 EXPORT_SYMBOL(phy_start_aneg);
701
702 /**
703 * phy_start_machine - start PHY state machine tracking
704 * @phydev: the phy_device struct
705 *
706 * Description: The PHY infrastructure can run a state machine
707 * which tracks whether the PHY is starting up, negotiating,
708 * etc. This function starts the timer which tracks the state
709 * of the PHY. If you want to maintain your own state machine,
710 * do not call this function.
711 */
712 void phy_start_machine(struct phy_device *phydev)
713 {
714 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
715 }
716
717 /**
718 * phy_trigger_machine - trigger the state machine to run
719 *
720 * @phydev: the phy_device struct
721 * @sync: indicate whether we should wait for the workqueue cancelation
722 *
723 * Description: There has been a change in state which requires that the
724 * state machine runs.
725 */
726
727 void phy_trigger_machine(struct phy_device *phydev, bool sync)
728 {
729 if (sync)
730 cancel_delayed_work_sync(&phydev->state_queue);
731 else
732 cancel_delayed_work(&phydev->state_queue);
733 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
734 }
735
736 /**
737 * phy_stop_machine - stop the PHY state machine tracking
738 * @phydev: target phy_device struct
739 *
740 * Description: Stops the state machine timer, sets the state to UP
741 * (unless it wasn't up yet). This function must be called BEFORE
742 * phy_detach.
743 */
744 void phy_stop_machine(struct phy_device *phydev)
745 {
746 cancel_delayed_work_sync(&phydev->state_queue);
747
748 mutex_lock(&phydev->lock);
749 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
750 phydev->state = PHY_UP;
751 mutex_unlock(&phydev->lock);
752
753 /* Now we can run the state machine synchronously */
754 phy_state_machine(&phydev->state_queue.work);
755 }
756
757 /**
758 * phy_error - enter HALTED state for this PHY device
759 * @phydev: target phy_device struct
760 *
761 * Moves the PHY to the HALTED state in response to a read
762 * or write error, and tells the controller the link is down.
763 * Must not be called from interrupt context, or while the
764 * phydev->lock is held.
765 */
766 static void phy_error(struct phy_device *phydev)
767 {
768 mutex_lock(&phydev->lock);
769 phydev->state = PHY_HALTED;
770 mutex_unlock(&phydev->lock);
771
772 phy_trigger_machine(phydev, false);
773 }
774
775 /**
776 * phy_interrupt - PHY interrupt handler
777 * @irq: interrupt line
778 * @phy_dat: phy_device pointer
779 *
780 * Description: When a PHY interrupt occurs, the handler disables
781 * interrupts, and uses phy_change to handle the interrupt.
782 */
783 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
784 {
785 struct phy_device *phydev = phy_dat;
786
787 if (PHY_HALTED == phydev->state)
788 return IRQ_NONE; /* It can't be ours. */
789
790 disable_irq_nosync(irq);
791 atomic_inc(&phydev->irq_disable);
792
793 phy_change(phydev);
794
795 return IRQ_HANDLED;
796 }
797
798 /**
799 * phy_enable_interrupts - Enable the interrupts from the PHY side
800 * @phydev: target phy_device struct
801 */
802 static int phy_enable_interrupts(struct phy_device *phydev)
803 {
804 int err = phy_clear_interrupt(phydev);
805
806 if (err < 0)
807 return err;
808
809 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
810 }
811
812 /**
813 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
814 * @phydev: target phy_device struct
815 */
816 static int phy_disable_interrupts(struct phy_device *phydev)
817 {
818 int err;
819
820 /* Disable PHY interrupts */
821 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
822 if (err)
823 goto phy_err;
824
825 /* Clear the interrupt */
826 err = phy_clear_interrupt(phydev);
827 if (err)
828 goto phy_err;
829
830 return 0;
831
832 phy_err:
833 phy_error(phydev);
834
835 return err;
836 }
837
838 /**
839 * phy_start_interrupts - request and enable interrupts for a PHY device
840 * @phydev: target phy_device struct
841 *
842 * Description: Request the interrupt for the given PHY.
843 * If this fails, then we set irq to PHY_POLL.
844 * Otherwise, we enable the interrupts in the PHY.
845 * This should only be called with a valid IRQ number.
846 * Returns 0 on success or < 0 on error.
847 */
848 int phy_start_interrupts(struct phy_device *phydev)
849 {
850 atomic_set(&phydev->irq_disable, 0);
851 if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
852 IRQF_ONESHOT | IRQF_SHARED,
853 phydev_name(phydev), phydev) < 0) {
854 pr_warn("%s: Can't get IRQ %d (PHY)\n",
855 phydev->mdio.bus->name, phydev->irq);
856 phydev->irq = PHY_POLL;
857 return 0;
858 }
859
860 return phy_enable_interrupts(phydev);
861 }
862 EXPORT_SYMBOL(phy_start_interrupts);
863
864 /**
865 * phy_stop_interrupts - disable interrupts from a PHY device
866 * @phydev: target phy_device struct
867 */
868 int phy_stop_interrupts(struct phy_device *phydev)
869 {
870 int err = phy_disable_interrupts(phydev);
871
872 if (err)
873 phy_error(phydev);
874
875 free_irq(phydev->irq, phydev);
876
877 /* If work indeed has been cancelled, disable_irq() will have
878 * been left unbalanced from phy_interrupt() and enable_irq()
879 * has to be called so that other devices on the line work.
880 */
881 while (atomic_dec_return(&phydev->irq_disable) >= 0)
882 enable_irq(phydev->irq);
883
884 return err;
885 }
886 EXPORT_SYMBOL(phy_stop_interrupts);
887
888 /**
889 * phy_change - Called by the phy_interrupt to handle PHY changes
890 * @phydev: phy_device struct that interrupted
891 */
892 void phy_change(struct phy_device *phydev)
893 {
894 if (phy_interrupt_is_valid(phydev)) {
895 if (phydev->drv->did_interrupt &&
896 !phydev->drv->did_interrupt(phydev))
897 goto ignore;
898
899 if (phy_disable_interrupts(phydev))
900 goto phy_err;
901 }
902
903 mutex_lock(&phydev->lock);
904 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
905 phydev->state = PHY_CHANGELINK;
906 mutex_unlock(&phydev->lock);
907
908 if (phy_interrupt_is_valid(phydev)) {
909 atomic_dec(&phydev->irq_disable);
910 enable_irq(phydev->irq);
911
912 /* Reenable interrupts */
913 if (PHY_HALTED != phydev->state &&
914 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
915 goto irq_enable_err;
916 }
917
918 /* reschedule state queue work to run as soon as possible */
919 phy_trigger_machine(phydev, true);
920 return;
921
922 ignore:
923 atomic_dec(&phydev->irq_disable);
924 enable_irq(phydev->irq);
925 return;
926
927 irq_enable_err:
928 disable_irq(phydev->irq);
929 atomic_inc(&phydev->irq_disable);
930 phy_err:
931 phy_error(phydev);
932 }
933
934 /**
935 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
936 * @work: work_struct that describes the work to be done
937 */
938 void phy_change_work(struct work_struct *work)
939 {
940 struct phy_device *phydev =
941 container_of(work, struct phy_device, phy_queue);
942
943 phy_change(phydev);
944 }
945
946 /**
947 * phy_stop - Bring down the PHY link, and stop checking the status
948 * @phydev: target phy_device struct
949 */
950 void phy_stop(struct phy_device *phydev)
951 {
952 mutex_lock(&phydev->lock);
953
954 if (PHY_HALTED == phydev->state)
955 goto out_unlock;
956
957 if (phy_interrupt_is_valid(phydev)) {
958 /* Disable PHY Interrupts */
959 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
960
961 /* Clear any pending interrupts */
962 phy_clear_interrupt(phydev);
963 }
964
965 phydev->state = PHY_HALTED;
966
967 out_unlock:
968 mutex_unlock(&phydev->lock);
969
970 /* Cannot call flush_scheduled_work() here as desired because
971 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
972 * will not reenable interrupts.
973 */
974 }
975 EXPORT_SYMBOL(phy_stop);
976
977 /**
978 * phy_start - start or restart a PHY device
979 * @phydev: target phy_device struct
980 *
981 * Description: Indicates the attached device's readiness to
982 * handle PHY-related work. Used during startup to start the
983 * PHY, and after a call to phy_stop() to resume operation.
984 * Also used to indicate the MDIO bus has cleared an error
985 * condition.
986 */
987 void phy_start(struct phy_device *phydev)
988 {
989 bool do_resume = false;
990 int err = 0;
991
992 mutex_lock(&phydev->lock);
993
994 switch (phydev->state) {
995 case PHY_STARTING:
996 phydev->state = PHY_PENDING;
997 break;
998 case PHY_READY:
999 phydev->state = PHY_UP;
1000 break;
1001 case PHY_HALTED:
1002 /* make sure interrupts are re-enabled for the PHY */
1003 if (phydev->irq != PHY_POLL) {
1004 err = phy_enable_interrupts(phydev);
1005 if (err < 0)
1006 break;
1007 }
1008
1009 phydev->state = PHY_RESUMING;
1010 do_resume = true;
1011 break;
1012 default:
1013 break;
1014 }
1015 mutex_unlock(&phydev->lock);
1016
1017 /* if phy was suspended, bring the physical link up again */
1018 if (do_resume)
1019 phy_resume(phydev);
1020
1021 phy_trigger_machine(phydev, true);
1022 }
1023 EXPORT_SYMBOL(phy_start);
1024
1025 static void phy_adjust_link(struct phy_device *phydev)
1026 {
1027 phydev->adjust_link(phydev->attached_dev);
1028 phy_led_trigger_change_speed(phydev);
1029 }
1030
1031 /**
1032 * phy_state_machine - Handle the state machine
1033 * @work: work_struct that describes the work to be done
1034 */
1035 void phy_state_machine(struct work_struct *work)
1036 {
1037 struct delayed_work *dwork = to_delayed_work(work);
1038 struct phy_device *phydev =
1039 container_of(dwork, struct phy_device, state_queue);
1040 bool needs_aneg = false, do_suspend = false;
1041 enum phy_state old_state;
1042 int err = 0;
1043 int old_link;
1044
1045 mutex_lock(&phydev->lock);
1046
1047 old_state = phydev->state;
1048
1049 if (phydev->drv && phydev->drv->link_change_notify)
1050 phydev->drv->link_change_notify(phydev);
1051
1052 switch (phydev->state) {
1053 case PHY_DOWN:
1054 case PHY_STARTING:
1055 case PHY_READY:
1056 case PHY_PENDING:
1057 break;
1058 case PHY_UP:
1059 needs_aneg = true;
1060
1061 phydev->link_timeout = PHY_AN_TIMEOUT;
1062
1063 break;
1064 case PHY_AN:
1065 err = phy_read_status(phydev);
1066 if (err < 0)
1067 break;
1068
1069 /* If the link is down, give up on negotiation for now */
1070 if (!phydev->link) {
1071 phydev->state = PHY_NOLINK;
1072 netif_carrier_off(phydev->attached_dev);
1073 phy_adjust_link(phydev);
1074 break;
1075 }
1076
1077 /* Check if negotiation is done. Break if there's an error */
1078 err = phy_aneg_done(phydev);
1079 if (err < 0)
1080 break;
1081
1082 /* If AN is done, we're running */
1083 if (err > 0) {
1084 phydev->state = PHY_RUNNING;
1085 netif_carrier_on(phydev->attached_dev);
1086 phy_adjust_link(phydev);
1087
1088 } else if (0 == phydev->link_timeout--)
1089 needs_aneg = true;
1090 break;
1091 case PHY_NOLINK:
1092 if (phy_interrupt_is_valid(phydev))
1093 break;
1094
1095 err = phy_read_status(phydev);
1096 if (err)
1097 break;
1098
1099 if (phydev->link) {
1100 if (AUTONEG_ENABLE == phydev->autoneg) {
1101 err = phy_aneg_done(phydev);
1102 if (err < 0)
1103 break;
1104
1105 if (!err) {
1106 phydev->state = PHY_AN;
1107 phydev->link_timeout = PHY_AN_TIMEOUT;
1108 break;
1109 }
1110 }
1111 phydev->state = PHY_RUNNING;
1112 netif_carrier_on(phydev->attached_dev);
1113 phy_adjust_link(phydev);
1114 }
1115 break;
1116 case PHY_FORCING:
1117 err = genphy_update_link(phydev);
1118 if (err)
1119 break;
1120
1121 if (phydev->link) {
1122 phydev->state = PHY_RUNNING;
1123 netif_carrier_on(phydev->attached_dev);
1124 } else {
1125 if (0 == phydev->link_timeout--)
1126 needs_aneg = true;
1127 }
1128
1129 phy_adjust_link(phydev);
1130 break;
1131 case PHY_RUNNING:
1132 /* Only register a CHANGE if we are polling and link changed
1133 * since latest checking.
1134 */
1135 if (phydev->irq == PHY_POLL) {
1136 old_link = phydev->link;
1137 err = phy_read_status(phydev);
1138 if (err)
1139 break;
1140
1141 if (old_link != phydev->link)
1142 phydev->state = PHY_CHANGELINK;
1143 }
1144 /*
1145 * Failsafe: check that nobody set phydev->link=0 between two
1146 * poll cycles, otherwise we won't leave RUNNING state as long
1147 * as link remains down.
1148 */
1149 if (!phydev->link && phydev->state == PHY_RUNNING) {
1150 phydev->state = PHY_CHANGELINK;
1151 phydev_err(phydev, "no link in PHY_RUNNING\n");
1152 }
1153 break;
1154 case PHY_CHANGELINK:
1155 err = phy_read_status(phydev);
1156 if (err)
1157 break;
1158
1159 if (phydev->link) {
1160 phydev->state = PHY_RUNNING;
1161 netif_carrier_on(phydev->attached_dev);
1162 } else {
1163 phydev->state = PHY_NOLINK;
1164 netif_carrier_off(phydev->attached_dev);
1165 }
1166
1167 phy_adjust_link(phydev);
1168
1169 if (phy_interrupt_is_valid(phydev))
1170 err = phy_config_interrupt(phydev,
1171 PHY_INTERRUPT_ENABLED);
1172 break;
1173 case PHY_HALTED:
1174 if (phydev->link) {
1175 phydev->link = 0;
1176 netif_carrier_off(phydev->attached_dev);
1177 phy_adjust_link(phydev);
1178 do_suspend = true;
1179 }
1180 break;
1181 case PHY_RESUMING:
1182 if (AUTONEG_ENABLE == phydev->autoneg) {
1183 err = phy_aneg_done(phydev);
1184 if (err < 0)
1185 break;
1186
1187 /* err > 0 if AN is done.
1188 * Otherwise, it's 0, and we're still waiting for AN
1189 */
1190 if (err > 0) {
1191 err = phy_read_status(phydev);
1192 if (err)
1193 break;
1194
1195 if (phydev->link) {
1196 phydev->state = PHY_RUNNING;
1197 netif_carrier_on(phydev->attached_dev);
1198 } else {
1199 phydev->state = PHY_NOLINK;
1200 }
1201 phy_adjust_link(phydev);
1202 } else {
1203 phydev->state = PHY_AN;
1204 phydev->link_timeout = PHY_AN_TIMEOUT;
1205 }
1206 } else {
1207 err = phy_read_status(phydev);
1208 if (err)
1209 break;
1210
1211 if (phydev->link) {
1212 phydev->state = PHY_RUNNING;
1213 netif_carrier_on(phydev->attached_dev);
1214 } else {
1215 phydev->state = PHY_NOLINK;
1216 }
1217 phy_adjust_link(phydev);
1218 }
1219 break;
1220 }
1221
1222 mutex_unlock(&phydev->lock);
1223
1224 if (needs_aneg)
1225 err = phy_start_aneg_priv(phydev, false);
1226 else if (do_suspend)
1227 phy_suspend(phydev);
1228
1229 if (err < 0)
1230 phy_error(phydev);
1231
1232 phydev_dbg(phydev, "PHY state change %s -> %s\n",
1233 phy_state_to_str(old_state),
1234 phy_state_to_str(phydev->state));
1235
1236 /* Only re-schedule a PHY state machine change if we are polling the
1237 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
1238 * between states from phy_mac_interrupt()
1239 */
1240 if (phydev->irq == PHY_POLL)
1241 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
1242 PHY_STATE_TIME * HZ);
1243 }
1244
1245 /**
1246 * phy_mac_interrupt - MAC says the link has changed
1247 * @phydev: phy_device struct with changed link
1248 * @new_link: Link is Up/Down.
1249 *
1250 * Description: The MAC layer is able indicate there has been a change
1251 * in the PHY link status. Set the new link status, and trigger the
1252 * state machine, work a work queue.
1253 */
1254 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
1255 {
1256 phydev->link = new_link;
1257
1258 /* Trigger a state machine change */
1259 queue_work(system_power_efficient_wq, &phydev->phy_queue);
1260 }
1261 EXPORT_SYMBOL(phy_mac_interrupt);
1262
1263 /**
1264 * phy_init_eee - init and check the EEE feature
1265 * @phydev: target phy_device struct
1266 * @clk_stop_enable: PHY may stop the clock during LPI
1267 *
1268 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1269 * is supported by looking at the MMD registers 3.20 and 7.60/61
1270 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1271 * bit if required.
1272 */
1273 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1274 {
1275 if (!phydev->drv)
1276 return -EIO;
1277
1278 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1279 */
1280 if (phydev->duplex == DUPLEX_FULL) {
1281 int eee_lp, eee_cap, eee_adv;
1282 u32 lp, cap, adv;
1283 int status;
1284
1285 /* Read phy status to properly get the right settings */
1286 status = phy_read_status(phydev);
1287 if (status)
1288 return status;
1289
1290 /* First check if the EEE ability is supported */
1291 eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1292 if (eee_cap <= 0)
1293 goto eee_exit_err;
1294
1295 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1296 if (!cap)
1297 goto eee_exit_err;
1298
1299 /* Check which link settings negotiated and verify it in
1300 * the EEE advertising registers.
1301 */
1302 eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1303 if (eee_lp <= 0)
1304 goto eee_exit_err;
1305
1306 eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1307 if (eee_adv <= 0)
1308 goto eee_exit_err;
1309
1310 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1311 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1312 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1313 goto eee_exit_err;
1314
1315 if (clk_stop_enable) {
1316 /* Configure the PHY to stop receiving xMII
1317 * clock while it is signaling LPI.
1318 */
1319 int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1320 if (val < 0)
1321 return val;
1322
1323 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1324 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
1325 }
1326
1327 return 0; /* EEE supported */
1328 }
1329 eee_exit_err:
1330 return -EPROTONOSUPPORT;
1331 }
1332 EXPORT_SYMBOL(phy_init_eee);
1333
1334 /**
1335 * phy_get_eee_err - report the EEE wake error count
1336 * @phydev: target phy_device struct
1337 *
1338 * Description: it is to report the number of time where the PHY
1339 * failed to complete its normal wake sequence.
1340 */
1341 int phy_get_eee_err(struct phy_device *phydev)
1342 {
1343 if (!phydev->drv)
1344 return -EIO;
1345
1346 return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
1347 }
1348 EXPORT_SYMBOL(phy_get_eee_err);
1349
1350 /**
1351 * phy_ethtool_get_eee - get EEE supported and status
1352 * @phydev: target phy_device struct
1353 * @data: ethtool_eee data
1354 *
1355 * Description: it reportes the Supported/Advertisement/LP Advertisement
1356 * capabilities.
1357 */
1358 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1359 {
1360 int val;
1361
1362 if (!phydev->drv)
1363 return -EIO;
1364
1365 /* Get Supported EEE */
1366 val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1367 if (val < 0)
1368 return val;
1369 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1370
1371 /* Get advertisement EEE */
1372 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1373 if (val < 0)
1374 return val;
1375 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1376
1377 /* Get LP advertisement EEE */
1378 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1379 if (val < 0)
1380 return val;
1381 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1382
1383 return 0;
1384 }
1385 EXPORT_SYMBOL(phy_ethtool_get_eee);
1386
1387 /**
1388 * phy_ethtool_set_eee - set EEE supported and status
1389 * @phydev: target phy_device struct
1390 * @data: ethtool_eee data
1391 *
1392 * Description: it is to program the Advertisement EEE register.
1393 */
1394 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1395 {
1396 int cap, old_adv, adv, ret;
1397
1398 if (!phydev->drv)
1399 return -EIO;
1400
1401 /* Get Supported EEE */
1402 cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1403 if (cap < 0)
1404 return cap;
1405
1406 old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1407 if (old_adv < 0)
1408 return old_adv;
1409
1410 adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
1411
1412 /* Mask prohibited EEE modes */
1413 adv &= ~phydev->eee_broken_modes;
1414
1415 if (old_adv != adv) {
1416 ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
1417 if (ret < 0)
1418 return ret;
1419
1420 /* Restart autonegotiation so the new modes get sent to the
1421 * link partner.
1422 */
1423 ret = phy_restart_aneg(phydev);
1424 if (ret < 0)
1425 return ret;
1426 }
1427
1428 return 0;
1429 }
1430 EXPORT_SYMBOL(phy_ethtool_set_eee);
1431
1432 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1433 {
1434 if (phydev->drv && phydev->drv->set_wol)
1435 return phydev->drv->set_wol(phydev, wol);
1436
1437 return -EOPNOTSUPP;
1438 }
1439 EXPORT_SYMBOL(phy_ethtool_set_wol);
1440
1441 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1442 {
1443 if (phydev->drv && phydev->drv->get_wol)
1444 phydev->drv->get_wol(phydev, wol);
1445 }
1446 EXPORT_SYMBOL(phy_ethtool_get_wol);
1447
1448 int phy_ethtool_get_link_ksettings(struct net_device *ndev,
1449 struct ethtool_link_ksettings *cmd)
1450 {
1451 struct phy_device *phydev = ndev->phydev;
1452
1453 if (!phydev)
1454 return -ENODEV;
1455
1456 phy_ethtool_ksettings_get(phydev, cmd);
1457
1458 return 0;
1459 }
1460 EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
1461
1462 int phy_ethtool_set_link_ksettings(struct net_device *ndev,
1463 const struct ethtool_link_ksettings *cmd)
1464 {
1465 struct phy_device *phydev = ndev->phydev;
1466
1467 if (!phydev)
1468 return -ENODEV;
1469
1470 return phy_ethtool_ksettings_set(phydev, cmd);
1471 }
1472 EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
1473
1474 int phy_ethtool_nway_reset(struct net_device *ndev)
1475 {
1476 struct phy_device *phydev = ndev->phydev;
1477
1478 if (!phydev)
1479 return -ENODEV;
1480
1481 if (!phydev->drv)
1482 return -EIO;
1483
1484 return phy_restart_aneg(phydev);
1485 }
1486 EXPORT_SYMBOL(phy_ethtool_nway_reset);