]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/phy/phy.c
net: phy: fix kernel-doc warnings
[mirror_ubuntu-bionic-kernel.git] / drivers / net / phy / phy.c
1 /* Framework for configuring and reading PHY devices
2 * Based on code in sungem_phy.c and gianfar_phy.c
3 *
4 * Author: Andy Fleming
5 *
6 * Copyright (c) 2004 Freescale Semiconductor, Inc.
7 * Copyright (c) 2006, 2007 Maciej W. Rozycki
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/phy_led_triggers.h>
33 #include <linux/timer.h>
34 #include <linux/workqueue.h>
35 #include <linux/mdio.h>
36 #include <linux/io.h>
37 #include <linux/uaccess.h>
38 #include <linux/atomic.h>
39
40 #include <asm/irq.h>
41
42 static const char *phy_speed_to_str(int speed)
43 {
44 switch (speed) {
45 case SPEED_10:
46 return "10Mbps";
47 case SPEED_100:
48 return "100Mbps";
49 case SPEED_1000:
50 return "1Gbps";
51 case SPEED_2500:
52 return "2.5Gbps";
53 case SPEED_5000:
54 return "5Gbps";
55 case SPEED_10000:
56 return "10Gbps";
57 case SPEED_20000:
58 return "20Gbps";
59 case SPEED_25000:
60 return "25Gbps";
61 case SPEED_40000:
62 return "40Gbps";
63 case SPEED_50000:
64 return "50Gbps";
65 case SPEED_56000:
66 return "56Gbps";
67 case SPEED_100000:
68 return "100Gbps";
69 case SPEED_UNKNOWN:
70 return "Unknown";
71 default:
72 return "Unsupported (update phy.c)";
73 }
74 }
75
76 #define PHY_STATE_STR(_state) \
77 case PHY_##_state: \
78 return __stringify(_state); \
79
80 static const char *phy_state_to_str(enum phy_state st)
81 {
82 switch (st) {
83 PHY_STATE_STR(DOWN)
84 PHY_STATE_STR(STARTING)
85 PHY_STATE_STR(READY)
86 PHY_STATE_STR(PENDING)
87 PHY_STATE_STR(UP)
88 PHY_STATE_STR(AN)
89 PHY_STATE_STR(RUNNING)
90 PHY_STATE_STR(NOLINK)
91 PHY_STATE_STR(FORCING)
92 PHY_STATE_STR(CHANGELINK)
93 PHY_STATE_STR(HALTED)
94 PHY_STATE_STR(RESUMING)
95 }
96
97 return NULL;
98 }
99
100
101 /**
102 * phy_print_status - Convenience function to print out the current phy status
103 * @phydev: the phy_device struct
104 */
105 void phy_print_status(struct phy_device *phydev)
106 {
107 if (phydev->link) {
108 netdev_info(phydev->attached_dev,
109 "Link is Up - %s/%s - flow control %s\n",
110 phy_speed_to_str(phydev->speed),
111 DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
112 phydev->pause ? "rx/tx" : "off");
113 } else {
114 netdev_info(phydev->attached_dev, "Link is Down\n");
115 }
116 }
117 EXPORT_SYMBOL(phy_print_status);
118
119 /**
120 * phy_clear_interrupt - Ack the phy device's interrupt
121 * @phydev: the phy_device struct
122 *
123 * If the @phydev driver has an ack_interrupt function, call it to
124 * ack and clear the phy device's interrupt.
125 *
126 * Returns 0 on success or < 0 on error.
127 */
128 static int phy_clear_interrupt(struct phy_device *phydev)
129 {
130 if (phydev->drv->ack_interrupt)
131 return phydev->drv->ack_interrupt(phydev);
132
133 return 0;
134 }
135
136 /**
137 * phy_config_interrupt - configure the PHY device for the requested interrupts
138 * @phydev: the phy_device struct
139 * @interrupts: interrupt flags to configure for this @phydev
140 *
141 * Returns 0 on success or < 0 on error.
142 */
143 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
144 {
145 phydev->interrupts = interrupts;
146 if (phydev->drv->config_intr)
147 return phydev->drv->config_intr(phydev);
148
149 return 0;
150 }
151
152
153 /**
154 * phy_aneg_done - return auto-negotiation status
155 * @phydev: target phy_device struct
156 *
157 * Description: Return the auto-negotiation status from this @phydev
158 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
159 * is still pending.
160 */
161 int phy_aneg_done(struct phy_device *phydev)
162 {
163 if (phydev->drv && phydev->drv->aneg_done)
164 return phydev->drv->aneg_done(phydev);
165
166 return genphy_aneg_done(phydev);
167 }
168 EXPORT_SYMBOL(phy_aneg_done);
169
170 /* A structure for mapping a particular speed and duplex
171 * combination to a particular SUPPORTED and ADVERTISED value
172 */
173 struct phy_setting {
174 int speed;
175 int duplex;
176 u32 setting;
177 };
178
179 /* A mapping of all SUPPORTED settings to speed/duplex. This table
180 * must be grouped by speed and sorted in descending match priority
181 * - iow, descending speed. */
182 static const struct phy_setting settings[] = {
183 {
184 .speed = SPEED_10000,
185 .duplex = DUPLEX_FULL,
186 .setting = SUPPORTED_10000baseKR_Full,
187 },
188 {
189 .speed = SPEED_10000,
190 .duplex = DUPLEX_FULL,
191 .setting = SUPPORTED_10000baseKX4_Full,
192 },
193 {
194 .speed = SPEED_10000,
195 .duplex = DUPLEX_FULL,
196 .setting = SUPPORTED_10000baseT_Full,
197 },
198 {
199 .speed = SPEED_2500,
200 .duplex = DUPLEX_FULL,
201 .setting = SUPPORTED_2500baseX_Full,
202 },
203 {
204 .speed = SPEED_1000,
205 .duplex = DUPLEX_FULL,
206 .setting = SUPPORTED_1000baseKX_Full,
207 },
208 {
209 .speed = SPEED_1000,
210 .duplex = DUPLEX_FULL,
211 .setting = SUPPORTED_1000baseT_Full,
212 },
213 {
214 .speed = SPEED_1000,
215 .duplex = DUPLEX_HALF,
216 .setting = SUPPORTED_1000baseT_Half,
217 },
218 {
219 .speed = SPEED_100,
220 .duplex = DUPLEX_FULL,
221 .setting = SUPPORTED_100baseT_Full,
222 },
223 {
224 .speed = SPEED_100,
225 .duplex = DUPLEX_HALF,
226 .setting = SUPPORTED_100baseT_Half,
227 },
228 {
229 .speed = SPEED_10,
230 .duplex = DUPLEX_FULL,
231 .setting = SUPPORTED_10baseT_Full,
232 },
233 {
234 .speed = SPEED_10,
235 .duplex = DUPLEX_HALF,
236 .setting = SUPPORTED_10baseT_Half,
237 },
238 };
239
240 /**
241 * phy_lookup_setting - lookup a PHY setting
242 * @speed: speed to match
243 * @duplex: duplex to match
244 * @features: allowed link modes
245 * @exact: an exact match is required
246 *
247 * Search the settings array for a setting that matches the speed and
248 * duplex, and which is supported.
249 *
250 * If @exact is unset, either an exact match or %NULL for no match will
251 * be returned.
252 *
253 * If @exact is set, an exact match, the fastest supported setting at
254 * or below the specified speed, the slowest supported setting, or if
255 * they all fail, %NULL will be returned.
256 */
257 static const struct phy_setting *
258 phy_lookup_setting(int speed, int duplex, u32 features, bool exact)
259 {
260 const struct phy_setting *p, *match = NULL, *last = NULL;
261 int i;
262
263 for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
264 if (p->setting & features) {
265 last = p;
266 if (p->speed == speed && p->duplex == duplex) {
267 /* Exact match for speed and duplex */
268 match = p;
269 break;
270 } else if (!exact) {
271 if (!match && p->speed <= speed)
272 /* Candidate */
273 match = p;
274
275 if (p->speed < speed)
276 break;
277 }
278 }
279 }
280
281 if (!match && !exact)
282 match = last;
283
284 return match;
285 }
286
287 /**
288 * phy_find_valid - find a PHY setting that matches the requested parameters
289 * @speed: desired speed
290 * @duplex: desired duplex
291 * @supported: mask of supported link modes
292 *
293 * Locate a supported phy setting that is, in priority order:
294 * - an exact match for the specified speed and duplex mode
295 * - a match for the specified speed, or slower speed
296 * - the slowest supported speed
297 * Returns the matched phy_setting entry, or %NULL if no supported phy
298 * settings were found.
299 */
300 static const struct phy_setting *
301 phy_find_valid(int speed, int duplex, u32 supported)
302 {
303 return phy_lookup_setting(speed, duplex, supported, false);
304 }
305
306 /**
307 * phy_supported_speeds - return all speeds currently supported by a phy device
308 * @phy: The phy device to return supported speeds of.
309 * @speeds: buffer to store supported speeds in.
310 * @size: size of speeds buffer.
311 *
312 * Description: Returns the number of supported speeds, and fills the speeds
313 * buffer with the supported speeds. If speeds buffer is too small to contain
314 * all currently supported speeds, will return as many speeds as can fit.
315 */
316 unsigned int phy_supported_speeds(struct phy_device *phy,
317 unsigned int *speeds,
318 unsigned int size)
319 {
320 unsigned int count = 0;
321 unsigned int idx = 0;
322
323 for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++)
324 /* Assumes settings are grouped by speed */
325 if ((settings[idx].setting & phy->supported) &&
326 (count == 0 || speeds[count - 1] != settings[idx].speed))
327 speeds[count++] = settings[idx].speed;
328
329 return count;
330 }
331
332 /**
333 * phy_check_valid - check if there is a valid PHY setting which matches
334 * speed, duplex, and feature mask
335 * @speed: speed to match
336 * @duplex: duplex to match
337 * @features: A mask of the valid settings
338 *
339 * Description: Returns true if there is a valid setting, false otherwise.
340 */
341 static inline bool phy_check_valid(int speed, int duplex, u32 features)
342 {
343 return !!phy_lookup_setting(speed, duplex, features, true);
344 }
345
346 /**
347 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
348 * @phydev: the target phy_device struct
349 *
350 * Description: Make sure the PHY is set to supported speeds and
351 * duplexes. Drop down by one in this order: 1000/FULL,
352 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
353 */
354 static void phy_sanitize_settings(struct phy_device *phydev)
355 {
356 const struct phy_setting *setting;
357 u32 features = phydev->supported;
358
359 /* Sanitize settings based on PHY capabilities */
360 if ((features & SUPPORTED_Autoneg) == 0)
361 phydev->autoneg = AUTONEG_DISABLE;
362
363 setting = phy_find_valid(phydev->speed, phydev->duplex, features);
364 if (setting) {
365 phydev->speed = setting->speed;
366 phydev->duplex = setting->duplex;
367 } else {
368 /* We failed to find anything (no supported speeds?) */
369 phydev->speed = SPEED_UNKNOWN;
370 phydev->duplex = DUPLEX_UNKNOWN;
371 }
372 }
373
374 /**
375 * phy_ethtool_sset - generic ethtool sset function, handles all the details
376 * @phydev: target phy_device struct
377 * @cmd: ethtool_cmd
378 *
379 * A few notes about parameter checking:
380 * - We don't set port or transceiver, so we don't care what they
381 * were set to.
382 * - phy_start_aneg() will make sure forced settings are sane, and
383 * choose the next best ones from the ones selected, so we don't
384 * care if ethtool tries to give us bad values.
385 */
386 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
387 {
388 u32 speed = ethtool_cmd_speed(cmd);
389
390 if (cmd->phy_address != phydev->mdio.addr)
391 return -EINVAL;
392
393 /* We make sure that we don't pass unsupported values in to the PHY */
394 cmd->advertising &= phydev->supported;
395
396 /* Verify the settings we care about. */
397 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
398 return -EINVAL;
399
400 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
401 return -EINVAL;
402
403 if (cmd->autoneg == AUTONEG_DISABLE &&
404 ((speed != SPEED_1000 &&
405 speed != SPEED_100 &&
406 speed != SPEED_10) ||
407 (cmd->duplex != DUPLEX_HALF &&
408 cmd->duplex != DUPLEX_FULL)))
409 return -EINVAL;
410
411 phydev->autoneg = cmd->autoneg;
412
413 phydev->speed = speed;
414
415 phydev->advertising = cmd->advertising;
416
417 if (AUTONEG_ENABLE == cmd->autoneg)
418 phydev->advertising |= ADVERTISED_Autoneg;
419 else
420 phydev->advertising &= ~ADVERTISED_Autoneg;
421
422 phydev->duplex = cmd->duplex;
423
424 phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl;
425
426 /* Restart the PHY */
427 phy_start_aneg(phydev);
428
429 return 0;
430 }
431 EXPORT_SYMBOL(phy_ethtool_sset);
432
433 int phy_ethtool_ksettings_set(struct phy_device *phydev,
434 const struct ethtool_link_ksettings *cmd)
435 {
436 u8 autoneg = cmd->base.autoneg;
437 u8 duplex = cmd->base.duplex;
438 u32 speed = cmd->base.speed;
439 u32 advertising;
440
441 if (cmd->base.phy_address != phydev->mdio.addr)
442 return -EINVAL;
443
444 ethtool_convert_link_mode_to_legacy_u32(&advertising,
445 cmd->link_modes.advertising);
446
447 /* We make sure that we don't pass unsupported values in to the PHY */
448 advertising &= phydev->supported;
449
450 /* Verify the settings we care about. */
451 if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
452 return -EINVAL;
453
454 if (autoneg == AUTONEG_ENABLE && advertising == 0)
455 return -EINVAL;
456
457 if (autoneg == AUTONEG_DISABLE &&
458 ((speed != SPEED_1000 &&
459 speed != SPEED_100 &&
460 speed != SPEED_10) ||
461 (duplex != DUPLEX_HALF &&
462 duplex != DUPLEX_FULL)))
463 return -EINVAL;
464
465 phydev->autoneg = autoneg;
466
467 phydev->speed = speed;
468
469 phydev->advertising = advertising;
470
471 if (autoneg == AUTONEG_ENABLE)
472 phydev->advertising |= ADVERTISED_Autoneg;
473 else
474 phydev->advertising &= ~ADVERTISED_Autoneg;
475
476 phydev->duplex = duplex;
477
478 phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
479
480 /* Restart the PHY */
481 phy_start_aneg(phydev);
482
483 return 0;
484 }
485 EXPORT_SYMBOL(phy_ethtool_ksettings_set);
486
487 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
488 {
489 cmd->supported = phydev->supported;
490
491 cmd->advertising = phydev->advertising;
492 cmd->lp_advertising = phydev->lp_advertising;
493
494 ethtool_cmd_speed_set(cmd, phydev->speed);
495 cmd->duplex = phydev->duplex;
496 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
497 cmd->port = PORT_BNC;
498 else
499 cmd->port = PORT_MII;
500 cmd->phy_address = phydev->mdio.addr;
501 cmd->transceiver = phy_is_internal(phydev) ?
502 XCVR_INTERNAL : XCVR_EXTERNAL;
503 cmd->autoneg = phydev->autoneg;
504 cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl;
505 cmd->eth_tp_mdix = phydev->mdix;
506
507 return 0;
508 }
509 EXPORT_SYMBOL(phy_ethtool_gset);
510
511 int phy_ethtool_ksettings_get(struct phy_device *phydev,
512 struct ethtool_link_ksettings *cmd)
513 {
514 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
515 phydev->supported);
516
517 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
518 phydev->advertising);
519
520 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
521 phydev->lp_advertising);
522
523 cmd->base.speed = phydev->speed;
524 cmd->base.duplex = phydev->duplex;
525 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
526 cmd->base.port = PORT_BNC;
527 else
528 cmd->base.port = PORT_MII;
529
530 cmd->base.phy_address = phydev->mdio.addr;
531 cmd->base.autoneg = phydev->autoneg;
532 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
533 cmd->base.eth_tp_mdix = phydev->mdix;
534
535 return 0;
536 }
537 EXPORT_SYMBOL(phy_ethtool_ksettings_get);
538
539 /**
540 * phy_mii_ioctl - generic PHY MII ioctl interface
541 * @phydev: the phy_device struct
542 * @ifr: &struct ifreq for socket ioctl's
543 * @cmd: ioctl cmd to execute
544 *
545 * Note that this function is currently incompatible with the
546 * PHYCONTROL layer. It changes registers without regard to
547 * current state. Use at own risk.
548 */
549 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
550 {
551 struct mii_ioctl_data *mii_data = if_mii(ifr);
552 u16 val = mii_data->val_in;
553 bool change_autoneg = false;
554
555 switch (cmd) {
556 case SIOCGMIIPHY:
557 mii_data->phy_id = phydev->mdio.addr;
558 /* fall through */
559
560 case SIOCGMIIREG:
561 mii_data->val_out = mdiobus_read(phydev->mdio.bus,
562 mii_data->phy_id,
563 mii_data->reg_num);
564 return 0;
565
566 case SIOCSMIIREG:
567 if (mii_data->phy_id == phydev->mdio.addr) {
568 switch (mii_data->reg_num) {
569 case MII_BMCR:
570 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
571 if (phydev->autoneg == AUTONEG_ENABLE)
572 change_autoneg = true;
573 phydev->autoneg = AUTONEG_DISABLE;
574 if (val & BMCR_FULLDPLX)
575 phydev->duplex = DUPLEX_FULL;
576 else
577 phydev->duplex = DUPLEX_HALF;
578 if (val & BMCR_SPEED1000)
579 phydev->speed = SPEED_1000;
580 else if (val & BMCR_SPEED100)
581 phydev->speed = SPEED_100;
582 else phydev->speed = SPEED_10;
583 }
584 else {
585 if (phydev->autoneg == AUTONEG_DISABLE)
586 change_autoneg = true;
587 phydev->autoneg = AUTONEG_ENABLE;
588 }
589 break;
590 case MII_ADVERTISE:
591 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
592 change_autoneg = true;
593 break;
594 default:
595 /* do nothing */
596 break;
597 }
598 }
599
600 mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
601 mii_data->reg_num, val);
602
603 if (mii_data->phy_id == phydev->mdio.addr &&
604 mii_data->reg_num == MII_BMCR &&
605 val & BMCR_RESET)
606 return phy_init_hw(phydev);
607
608 if (change_autoneg)
609 return phy_start_aneg(phydev);
610
611 return 0;
612
613 case SIOCSHWTSTAMP:
614 if (phydev->drv && phydev->drv->hwtstamp)
615 return phydev->drv->hwtstamp(phydev, ifr);
616 /* fall through */
617
618 default:
619 return -EOPNOTSUPP;
620 }
621 }
622 EXPORT_SYMBOL(phy_mii_ioctl);
623
624 /**
625 * phy_start_aneg_priv - start auto-negotiation for this PHY device
626 * @phydev: the phy_device struct
627 * @sync: indicate whether we should wait for the workqueue cancelation
628 *
629 * Description: Sanitizes the settings (if we're not autonegotiating
630 * them), and then calls the driver's config_aneg function.
631 * If the PHYCONTROL Layer is operating, we change the state to
632 * reflect the beginning of Auto-negotiation or forcing.
633 */
634 static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
635 {
636 bool trigger = 0;
637 int err;
638
639 if (!phydev->drv)
640 return -EIO;
641
642 mutex_lock(&phydev->lock);
643
644 if (AUTONEG_DISABLE == phydev->autoneg)
645 phy_sanitize_settings(phydev);
646
647 /* Invalidate LP advertising flags */
648 phydev->lp_advertising = 0;
649
650 err = phydev->drv->config_aneg(phydev);
651 if (err < 0)
652 goto out_unlock;
653
654 if (phydev->state != PHY_HALTED) {
655 if (AUTONEG_ENABLE == phydev->autoneg) {
656 phydev->state = PHY_AN;
657 phydev->link_timeout = PHY_AN_TIMEOUT;
658 } else {
659 phydev->state = PHY_FORCING;
660 phydev->link_timeout = PHY_FORCE_TIMEOUT;
661 }
662 }
663
664 /* Re-schedule a PHY state machine to check PHY status because
665 * negotiation may already be done and aneg interrupt may not be
666 * generated.
667 */
668 if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
669 err = phy_aneg_done(phydev);
670 if (err > 0) {
671 trigger = true;
672 err = 0;
673 }
674 }
675
676 out_unlock:
677 mutex_unlock(&phydev->lock);
678
679 if (trigger)
680 phy_trigger_machine(phydev, sync);
681
682 return err;
683 }
684
685 /**
686 * phy_start_aneg - start auto-negotiation for this PHY device
687 * @phydev: the phy_device struct
688 *
689 * Description: Sanitizes the settings (if we're not autonegotiating
690 * them), and then calls the driver's config_aneg function.
691 * If the PHYCONTROL Layer is operating, we change the state to
692 * reflect the beginning of Auto-negotiation or forcing.
693 */
694 int phy_start_aneg(struct phy_device *phydev)
695 {
696 return phy_start_aneg_priv(phydev, true);
697 }
698 EXPORT_SYMBOL(phy_start_aneg);
699
700 /**
701 * phy_start_machine - start PHY state machine tracking
702 * @phydev: the phy_device struct
703 *
704 * Description: The PHY infrastructure can run a state machine
705 * which tracks whether the PHY is starting up, negotiating,
706 * etc. This function starts the timer which tracks the state
707 * of the PHY. If you want to maintain your own state machine,
708 * do not call this function.
709 */
710 void phy_start_machine(struct phy_device *phydev)
711 {
712 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
713 }
714
715 /**
716 * phy_trigger_machine - trigger the state machine to run
717 *
718 * @phydev: the phy_device struct
719 * @sync: indicate whether we should wait for the workqueue cancelation
720 *
721 * Description: There has been a change in state which requires that the
722 * state machine runs.
723 */
724
725 void phy_trigger_machine(struct phy_device *phydev, bool sync)
726 {
727 if (sync)
728 cancel_delayed_work_sync(&phydev->state_queue);
729 else
730 cancel_delayed_work(&phydev->state_queue);
731 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
732 }
733
734 /**
735 * phy_stop_machine - stop the PHY state machine tracking
736 * @phydev: target phy_device struct
737 *
738 * Description: Stops the state machine timer, sets the state to UP
739 * (unless it wasn't up yet). This function must be called BEFORE
740 * phy_detach.
741 */
742 void phy_stop_machine(struct phy_device *phydev)
743 {
744 cancel_delayed_work_sync(&phydev->state_queue);
745
746 mutex_lock(&phydev->lock);
747 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
748 phydev->state = PHY_UP;
749 mutex_unlock(&phydev->lock);
750 }
751
752 /**
753 * phy_error - enter HALTED state for this PHY device
754 * @phydev: target phy_device struct
755 *
756 * Moves the PHY to the HALTED state in response to a read
757 * or write error, and tells the controller the link is down.
758 * Must not be called from interrupt context, or while the
759 * phydev->lock is held.
760 */
761 static void phy_error(struct phy_device *phydev)
762 {
763 mutex_lock(&phydev->lock);
764 phydev->state = PHY_HALTED;
765 mutex_unlock(&phydev->lock);
766
767 phy_trigger_machine(phydev, false);
768 }
769
770 /**
771 * phy_interrupt - PHY interrupt handler
772 * @irq: interrupt line
773 * @phy_dat: phy_device pointer
774 *
775 * Description: When a PHY interrupt occurs, the handler disables
776 * interrupts, and uses phy_change to handle the interrupt.
777 */
778 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
779 {
780 struct phy_device *phydev = phy_dat;
781
782 if (PHY_HALTED == phydev->state)
783 return IRQ_NONE; /* It can't be ours. */
784
785 disable_irq_nosync(irq);
786 atomic_inc(&phydev->irq_disable);
787
788 phy_change(phydev);
789
790 return IRQ_HANDLED;
791 }
792
793 /**
794 * phy_enable_interrupts - Enable the interrupts from the PHY side
795 * @phydev: target phy_device struct
796 */
797 static int phy_enable_interrupts(struct phy_device *phydev)
798 {
799 int err = phy_clear_interrupt(phydev);
800
801 if (err < 0)
802 return err;
803
804 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
805 }
806
807 /**
808 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
809 * @phydev: target phy_device struct
810 */
811 static int phy_disable_interrupts(struct phy_device *phydev)
812 {
813 int err;
814
815 /* Disable PHY interrupts */
816 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
817 if (err)
818 goto phy_err;
819
820 /* Clear the interrupt */
821 err = phy_clear_interrupt(phydev);
822 if (err)
823 goto phy_err;
824
825 return 0;
826
827 phy_err:
828 phy_error(phydev);
829
830 return err;
831 }
832
833 /**
834 * phy_start_interrupts - request and enable interrupts for a PHY device
835 * @phydev: target phy_device struct
836 *
837 * Description: Request the interrupt for the given PHY.
838 * If this fails, then we set irq to PHY_POLL.
839 * Otherwise, we enable the interrupts in the PHY.
840 * This should only be called with a valid IRQ number.
841 * Returns 0 on success or < 0 on error.
842 */
843 int phy_start_interrupts(struct phy_device *phydev)
844 {
845 atomic_set(&phydev->irq_disable, 0);
846 if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
847 IRQF_ONESHOT | IRQF_SHARED,
848 phydev_name(phydev), phydev) < 0) {
849 pr_warn("%s: Can't get IRQ %d (PHY)\n",
850 phydev->mdio.bus->name, phydev->irq);
851 phydev->irq = PHY_POLL;
852 return 0;
853 }
854
855 return phy_enable_interrupts(phydev);
856 }
857 EXPORT_SYMBOL(phy_start_interrupts);
858
859 /**
860 * phy_stop_interrupts - disable interrupts from a PHY device
861 * @phydev: target phy_device struct
862 */
863 int phy_stop_interrupts(struct phy_device *phydev)
864 {
865 int err = phy_disable_interrupts(phydev);
866
867 if (err)
868 phy_error(phydev);
869
870 free_irq(phydev->irq, phydev);
871
872 /* If work indeed has been cancelled, disable_irq() will have
873 * been left unbalanced from phy_interrupt() and enable_irq()
874 * has to be called so that other devices on the line work.
875 */
876 while (atomic_dec_return(&phydev->irq_disable) >= 0)
877 enable_irq(phydev->irq);
878
879 return err;
880 }
881 EXPORT_SYMBOL(phy_stop_interrupts);
882
883 /**
884 * phy_change - Called by the phy_interrupt to handle PHY changes
885 * @phydev: phy_device struct that interrupted
886 */
887 void phy_change(struct phy_device *phydev)
888 {
889 if (phy_interrupt_is_valid(phydev)) {
890 if (phydev->drv->did_interrupt &&
891 !phydev->drv->did_interrupt(phydev))
892 goto ignore;
893
894 if (phy_disable_interrupts(phydev))
895 goto phy_err;
896 }
897
898 mutex_lock(&phydev->lock);
899 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
900 phydev->state = PHY_CHANGELINK;
901 mutex_unlock(&phydev->lock);
902
903 if (phy_interrupt_is_valid(phydev)) {
904 atomic_dec(&phydev->irq_disable);
905 enable_irq(phydev->irq);
906
907 /* Reenable interrupts */
908 if (PHY_HALTED != phydev->state &&
909 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
910 goto irq_enable_err;
911 }
912
913 /* reschedule state queue work to run as soon as possible */
914 phy_trigger_machine(phydev, true);
915 return;
916
917 ignore:
918 atomic_dec(&phydev->irq_disable);
919 enable_irq(phydev->irq);
920 return;
921
922 irq_enable_err:
923 disable_irq(phydev->irq);
924 atomic_inc(&phydev->irq_disable);
925 phy_err:
926 phy_error(phydev);
927 }
928
929 /**
930 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
931 * @work: work_struct that describes the work to be done
932 */
933 void phy_change_work(struct work_struct *work)
934 {
935 struct phy_device *phydev =
936 container_of(work, struct phy_device, phy_queue);
937
938 phy_change(phydev);
939 }
940
941 /**
942 * phy_stop - Bring down the PHY link, and stop checking the status
943 * @phydev: target phy_device struct
944 */
945 void phy_stop(struct phy_device *phydev)
946 {
947 mutex_lock(&phydev->lock);
948
949 if (PHY_HALTED == phydev->state)
950 goto out_unlock;
951
952 if (phy_interrupt_is_valid(phydev)) {
953 /* Disable PHY Interrupts */
954 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
955
956 /* Clear any pending interrupts */
957 phy_clear_interrupt(phydev);
958 }
959
960 phydev->state = PHY_HALTED;
961
962 out_unlock:
963 mutex_unlock(&phydev->lock);
964
965 /* Cannot call flush_scheduled_work() here as desired because
966 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
967 * will not reenable interrupts.
968 */
969 }
970 EXPORT_SYMBOL(phy_stop);
971
972 /**
973 * phy_start - start or restart a PHY device
974 * @phydev: target phy_device struct
975 *
976 * Description: Indicates the attached device's readiness to
977 * handle PHY-related work. Used during startup to start the
978 * PHY, and after a call to phy_stop() to resume operation.
979 * Also used to indicate the MDIO bus has cleared an error
980 * condition.
981 */
982 void phy_start(struct phy_device *phydev)
983 {
984 bool do_resume = false;
985 int err = 0;
986
987 mutex_lock(&phydev->lock);
988
989 switch (phydev->state) {
990 case PHY_STARTING:
991 phydev->state = PHY_PENDING;
992 break;
993 case PHY_READY:
994 phydev->state = PHY_UP;
995 break;
996 case PHY_HALTED:
997 /* make sure interrupts are re-enabled for the PHY */
998 if (phydev->irq != PHY_POLL) {
999 err = phy_enable_interrupts(phydev);
1000 if (err < 0)
1001 break;
1002 }
1003
1004 phydev->state = PHY_RESUMING;
1005 do_resume = true;
1006 break;
1007 default:
1008 break;
1009 }
1010 mutex_unlock(&phydev->lock);
1011
1012 /* if phy was suspended, bring the physical link up again */
1013 if (do_resume)
1014 phy_resume(phydev);
1015
1016 phy_trigger_machine(phydev, true);
1017 }
1018 EXPORT_SYMBOL(phy_start);
1019
1020 static void phy_adjust_link(struct phy_device *phydev)
1021 {
1022 phydev->adjust_link(phydev->attached_dev);
1023 phy_led_trigger_change_speed(phydev);
1024 }
1025
1026 /**
1027 * phy_state_machine - Handle the state machine
1028 * @work: work_struct that describes the work to be done
1029 */
1030 void phy_state_machine(struct work_struct *work)
1031 {
1032 struct delayed_work *dwork = to_delayed_work(work);
1033 struct phy_device *phydev =
1034 container_of(dwork, struct phy_device, state_queue);
1035 bool needs_aneg = false, do_suspend = false;
1036 enum phy_state old_state;
1037 int err = 0;
1038 int old_link;
1039
1040 mutex_lock(&phydev->lock);
1041
1042 old_state = phydev->state;
1043
1044 if (phydev->drv && phydev->drv->link_change_notify)
1045 phydev->drv->link_change_notify(phydev);
1046
1047 switch (phydev->state) {
1048 case PHY_DOWN:
1049 case PHY_STARTING:
1050 case PHY_READY:
1051 case PHY_PENDING:
1052 break;
1053 case PHY_UP:
1054 needs_aneg = true;
1055
1056 phydev->link_timeout = PHY_AN_TIMEOUT;
1057
1058 break;
1059 case PHY_AN:
1060 err = phy_read_status(phydev);
1061 if (err < 0)
1062 break;
1063
1064 /* If the link is down, give up on negotiation for now */
1065 if (!phydev->link) {
1066 phydev->state = PHY_NOLINK;
1067 netif_carrier_off(phydev->attached_dev);
1068 phy_adjust_link(phydev);
1069 break;
1070 }
1071
1072 /* Check if negotiation is done. Break if there's an error */
1073 err = phy_aneg_done(phydev);
1074 if (err < 0)
1075 break;
1076
1077 /* If AN is done, we're running */
1078 if (err > 0) {
1079 phydev->state = PHY_RUNNING;
1080 netif_carrier_on(phydev->attached_dev);
1081 phy_adjust_link(phydev);
1082
1083 } else if (0 == phydev->link_timeout--)
1084 needs_aneg = true;
1085 break;
1086 case PHY_NOLINK:
1087 if (phy_interrupt_is_valid(phydev))
1088 break;
1089
1090 err = phy_read_status(phydev);
1091 if (err)
1092 break;
1093
1094 if (phydev->link) {
1095 if (AUTONEG_ENABLE == phydev->autoneg) {
1096 err = phy_aneg_done(phydev);
1097 if (err < 0)
1098 break;
1099
1100 if (!err) {
1101 phydev->state = PHY_AN;
1102 phydev->link_timeout = PHY_AN_TIMEOUT;
1103 break;
1104 }
1105 }
1106 phydev->state = PHY_RUNNING;
1107 netif_carrier_on(phydev->attached_dev);
1108 phy_adjust_link(phydev);
1109 }
1110 break;
1111 case PHY_FORCING:
1112 err = genphy_update_link(phydev);
1113 if (err)
1114 break;
1115
1116 if (phydev->link) {
1117 phydev->state = PHY_RUNNING;
1118 netif_carrier_on(phydev->attached_dev);
1119 } else {
1120 if (0 == phydev->link_timeout--)
1121 needs_aneg = true;
1122 }
1123
1124 phy_adjust_link(phydev);
1125 break;
1126 case PHY_RUNNING:
1127 /* Only register a CHANGE if we are polling and link changed
1128 * since latest checking.
1129 */
1130 if (phydev->irq == PHY_POLL) {
1131 old_link = phydev->link;
1132 err = phy_read_status(phydev);
1133 if (err)
1134 break;
1135
1136 if (old_link != phydev->link)
1137 phydev->state = PHY_CHANGELINK;
1138 }
1139 /*
1140 * Failsafe: check that nobody set phydev->link=0 between two
1141 * poll cycles, otherwise we won't leave RUNNING state as long
1142 * as link remains down.
1143 */
1144 if (!phydev->link && phydev->state == PHY_RUNNING) {
1145 phydev->state = PHY_CHANGELINK;
1146 phydev_err(phydev, "no link in PHY_RUNNING\n");
1147 }
1148 break;
1149 case PHY_CHANGELINK:
1150 err = phy_read_status(phydev);
1151 if (err)
1152 break;
1153
1154 if (phydev->link) {
1155 phydev->state = PHY_RUNNING;
1156 netif_carrier_on(phydev->attached_dev);
1157 } else {
1158 phydev->state = PHY_NOLINK;
1159 netif_carrier_off(phydev->attached_dev);
1160 }
1161
1162 phy_adjust_link(phydev);
1163
1164 if (phy_interrupt_is_valid(phydev))
1165 err = phy_config_interrupt(phydev,
1166 PHY_INTERRUPT_ENABLED);
1167 break;
1168 case PHY_HALTED:
1169 if (phydev->link) {
1170 phydev->link = 0;
1171 netif_carrier_off(phydev->attached_dev);
1172 phy_adjust_link(phydev);
1173 do_suspend = true;
1174 }
1175 break;
1176 case PHY_RESUMING:
1177 if (AUTONEG_ENABLE == phydev->autoneg) {
1178 err = phy_aneg_done(phydev);
1179 if (err < 0)
1180 break;
1181
1182 /* err > 0 if AN is done.
1183 * Otherwise, it's 0, and we're still waiting for AN
1184 */
1185 if (err > 0) {
1186 err = phy_read_status(phydev);
1187 if (err)
1188 break;
1189
1190 if (phydev->link) {
1191 phydev->state = PHY_RUNNING;
1192 netif_carrier_on(phydev->attached_dev);
1193 } else {
1194 phydev->state = PHY_NOLINK;
1195 }
1196 phy_adjust_link(phydev);
1197 } else {
1198 phydev->state = PHY_AN;
1199 phydev->link_timeout = PHY_AN_TIMEOUT;
1200 }
1201 } else {
1202 err = phy_read_status(phydev);
1203 if (err)
1204 break;
1205
1206 if (phydev->link) {
1207 phydev->state = PHY_RUNNING;
1208 netif_carrier_on(phydev->attached_dev);
1209 } else {
1210 phydev->state = PHY_NOLINK;
1211 }
1212 phy_adjust_link(phydev);
1213 }
1214 break;
1215 }
1216
1217 mutex_unlock(&phydev->lock);
1218
1219 if (needs_aneg)
1220 err = phy_start_aneg_priv(phydev, false);
1221 else if (do_suspend)
1222 phy_suspend(phydev);
1223
1224 if (err < 0)
1225 phy_error(phydev);
1226
1227 phydev_dbg(phydev, "PHY state change %s -> %s\n",
1228 phy_state_to_str(old_state),
1229 phy_state_to_str(phydev->state));
1230
1231 /* Only re-schedule a PHY state machine change if we are polling the
1232 * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
1233 * between states from phy_mac_interrupt()
1234 */
1235 if (phydev->irq == PHY_POLL)
1236 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
1237 PHY_STATE_TIME * HZ);
1238 }
1239
1240 /**
1241 * phy_mac_interrupt - MAC says the link has changed
1242 * @phydev: phy_device struct with changed link
1243 * @new_link: Link is Up/Down.
1244 *
1245 * Description: The MAC layer is able indicate there has been a change
1246 * in the PHY link status. Set the new link status, and trigger the
1247 * state machine, work a work queue.
1248 */
1249 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
1250 {
1251 phydev->link = new_link;
1252
1253 /* Trigger a state machine change */
1254 queue_work(system_power_efficient_wq, &phydev->phy_queue);
1255 }
1256 EXPORT_SYMBOL(phy_mac_interrupt);
1257
1258 /**
1259 * phy_init_eee - init and check the EEE feature
1260 * @phydev: target phy_device struct
1261 * @clk_stop_enable: PHY may stop the clock during LPI
1262 *
1263 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1264 * is supported by looking at the MMD registers 3.20 and 7.60/61
1265 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1266 * bit if required.
1267 */
1268 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1269 {
1270 if (!phydev->drv)
1271 return -EIO;
1272
1273 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1274 */
1275 if (phydev->duplex == DUPLEX_FULL) {
1276 int eee_lp, eee_cap, eee_adv;
1277 u32 lp, cap, adv;
1278 int status;
1279
1280 /* Read phy status to properly get the right settings */
1281 status = phy_read_status(phydev);
1282 if (status)
1283 return status;
1284
1285 /* First check if the EEE ability is supported */
1286 eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1287 if (eee_cap <= 0)
1288 goto eee_exit_err;
1289
1290 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1291 if (!cap)
1292 goto eee_exit_err;
1293
1294 /* Check which link settings negotiated and verify it in
1295 * the EEE advertising registers.
1296 */
1297 eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1298 if (eee_lp <= 0)
1299 goto eee_exit_err;
1300
1301 eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1302 if (eee_adv <= 0)
1303 goto eee_exit_err;
1304
1305 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1306 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1307 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1308 goto eee_exit_err;
1309
1310 if (clk_stop_enable) {
1311 /* Configure the PHY to stop receiving xMII
1312 * clock while it is signaling LPI.
1313 */
1314 int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1315 if (val < 0)
1316 return val;
1317
1318 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1319 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val);
1320 }
1321
1322 return 0; /* EEE supported */
1323 }
1324 eee_exit_err:
1325 return -EPROTONOSUPPORT;
1326 }
1327 EXPORT_SYMBOL(phy_init_eee);
1328
1329 /**
1330 * phy_get_eee_err - report the EEE wake error count
1331 * @phydev: target phy_device struct
1332 *
1333 * Description: it is to report the number of time where the PHY
1334 * failed to complete its normal wake sequence.
1335 */
1336 int phy_get_eee_err(struct phy_device *phydev)
1337 {
1338 if (!phydev->drv)
1339 return -EIO;
1340
1341 return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
1342 }
1343 EXPORT_SYMBOL(phy_get_eee_err);
1344
1345 /**
1346 * phy_ethtool_get_eee - get EEE supported and status
1347 * @phydev: target phy_device struct
1348 * @data: ethtool_eee data
1349 *
1350 * Description: it reportes the Supported/Advertisement/LP Advertisement
1351 * capabilities.
1352 */
1353 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1354 {
1355 int val;
1356
1357 if (!phydev->drv)
1358 return -EIO;
1359
1360 /* Get Supported EEE */
1361 val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1362 if (val < 0)
1363 return val;
1364 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1365
1366 /* Get advertisement EEE */
1367 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1368 if (val < 0)
1369 return val;
1370 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1371
1372 /* Get LP advertisement EEE */
1373 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
1374 if (val < 0)
1375 return val;
1376 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1377
1378 return 0;
1379 }
1380 EXPORT_SYMBOL(phy_ethtool_get_eee);
1381
1382 /**
1383 * phy_ethtool_set_eee - set EEE supported and status
1384 * @phydev: target phy_device struct
1385 * @data: ethtool_eee data
1386 *
1387 * Description: it is to program the Advertisement EEE register.
1388 */
1389 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1390 {
1391 int cap, old_adv, adv, ret;
1392
1393 if (!phydev->drv)
1394 return -EIO;
1395
1396 /* Get Supported EEE */
1397 cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1398 if (cap < 0)
1399 return cap;
1400
1401 old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
1402 if (old_adv < 0)
1403 return old_adv;
1404
1405 adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
1406
1407 /* Mask prohibited EEE modes */
1408 adv &= ~phydev->eee_broken_modes;
1409
1410 if (old_adv != adv) {
1411 ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
1412 if (ret < 0)
1413 return ret;
1414
1415 /* Restart autonegotiation so the new modes get sent to the
1416 * link partner.
1417 */
1418 ret = genphy_restart_aneg(phydev);
1419 if (ret < 0)
1420 return ret;
1421 }
1422
1423 return 0;
1424 }
1425 EXPORT_SYMBOL(phy_ethtool_set_eee);
1426
1427 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1428 {
1429 if (phydev->drv && phydev->drv->set_wol)
1430 return phydev->drv->set_wol(phydev, wol);
1431
1432 return -EOPNOTSUPP;
1433 }
1434 EXPORT_SYMBOL(phy_ethtool_set_wol);
1435
1436 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1437 {
1438 if (phydev->drv && phydev->drv->get_wol)
1439 phydev->drv->get_wol(phydev, wol);
1440 }
1441 EXPORT_SYMBOL(phy_ethtool_get_wol);
1442
1443 int phy_ethtool_get_link_ksettings(struct net_device *ndev,
1444 struct ethtool_link_ksettings *cmd)
1445 {
1446 struct phy_device *phydev = ndev->phydev;
1447
1448 if (!phydev)
1449 return -ENODEV;
1450
1451 return phy_ethtool_ksettings_get(phydev, cmd);
1452 }
1453 EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
1454
1455 int phy_ethtool_set_link_ksettings(struct net_device *ndev,
1456 const struct ethtool_link_ksettings *cmd)
1457 {
1458 struct phy_device *phydev = ndev->phydev;
1459
1460 if (!phydev)
1461 return -ENODEV;
1462
1463 return phy_ethtool_ksettings_set(phydev, cmd);
1464 }
1465 EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
1466
1467 int phy_ethtool_nway_reset(struct net_device *ndev)
1468 {
1469 struct phy_device *phydev = ndev->phydev;
1470
1471 if (!phydev)
1472 return -ENODEV;
1473
1474 if (!phydev->drv)
1475 return -EIO;
1476
1477 return genphy_restart_aneg(phydev);
1478 }
1479 EXPORT_SYMBOL(phy_ethtool_nway_reset);