]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/phy/amd-xgbe-phy.c
Merge tag 'pci-v3.17-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[mirror_ubuntu-artful-kernel.git] / drivers / net / phy / amd-xgbe-phy.c
CommitLineData
4d874b30
LT
1/*
2 * AMD 10Gb Ethernet PHY driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 *
25 * License 2: Modified BSD
26 *
27 * Copyright (c) 2014 Advanced Micro Devices, Inc.
28 * All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in the
36 * documentation and/or other materials provided with the distribution.
37 * * Neither the name of Advanced Micro Devices, Inc. nor the
38 * names of its contributors may be used to endorse or promote products
39 * derived from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53#include <linux/kernel.h>
54#include <linux/device.h>
55#include <linux/platform_device.h>
56#include <linux/string.h>
57#include <linux/errno.h>
58#include <linux/unistd.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
61#include <linux/init.h>
62#include <linux/delay.h>
63#include <linux/netdevice.h>
64#include <linux/etherdevice.h>
65#include <linux/skbuff.h>
66#include <linux/mm.h>
67#include <linux/module.h>
68#include <linux/mii.h>
69#include <linux/ethtool.h>
70#include <linux/phy.h>
71#include <linux/mdio.h>
72#include <linux/io.h>
73#include <linux/of.h>
74#include <linux/of_platform.h>
75#include <linux/of_device.h>
76#include <linux/uaccess.h>
4d874b30
LT
77
78
79MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
80MODULE_LICENSE("Dual BSD/GPL");
81MODULE_VERSION("1.0.0-a");
82MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
83
84#define XGBE_PHY_ID 0x000162d0
85#define XGBE_PHY_MASK 0xfffffff0
86
f047604a
LT
87#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
88
4d874b30
LT
89#define XGBE_AN_INT_CMPLT 0x01
90#define XGBE_AN_INC_LINK 0x02
91#define XGBE_AN_PG_RCV 0x04
92
93#define XNP_MCF_NULL_MESSAGE 0x001
94#define XNP_ACK_PROCESSED (1 << 12)
95#define XNP_MP_FORMATTED (1 << 13)
96#define XNP_NP_EXCHANGE (1 << 15)
97
1fa1f2e0 98#define XGBE_PHY_RATECHANGE_COUNT 500
169a6303 99
4d874b30
LT
100#ifndef MDIO_PMA_10GBR_PMD_CTRL
101#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
102#endif
103#ifndef MDIO_PMA_10GBR_FEC_CTRL
104#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
105#endif
106#ifndef MDIO_AN_XNP
107#define MDIO_AN_XNP 0x0016
108#endif
109
110#ifndef MDIO_AN_INTMASK
111#define MDIO_AN_INTMASK 0x8001
112#endif
113#ifndef MDIO_AN_INT
114#define MDIO_AN_INT 0x8002
115#endif
116
117#ifndef MDIO_CTRL1_SPEED1G
118#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
119#endif
120
121/* SerDes integration register offsets */
5c10e5cb 122#define SIR0_KR_RT_1 0x002c
4d874b30
LT
123#define SIR0_STATUS 0x0040
124#define SIR1_SPEED 0x0000
125
126/* SerDes integration register entry bit positions and sizes */
5c10e5cb
LT
127#define SIR0_KR_RT_1_RESET_INDEX 11
128#define SIR0_KR_RT_1_RESET_WIDTH 1
4d874b30
LT
129#define SIR0_STATUS_RX_READY_INDEX 0
130#define SIR0_STATUS_RX_READY_WIDTH 1
131#define SIR0_STATUS_TX_READY_INDEX 8
132#define SIR0_STATUS_TX_READY_WIDTH 1
133#define SIR1_SPEED_DATARATE_INDEX 4
134#define SIR1_SPEED_DATARATE_WIDTH 2
135#define SIR1_SPEED_PI_SPD_SEL_INDEX 12
136#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4
137#define SIR1_SPEED_PLLSEL_INDEX 3
138#define SIR1_SPEED_PLLSEL_WIDTH 1
139#define SIR1_SPEED_RATECHANGE_INDEX 6
140#define SIR1_SPEED_RATECHANGE_WIDTH 1
141#define SIR1_SPEED_TXAMP_INDEX 8
142#define SIR1_SPEED_TXAMP_WIDTH 4
143#define SIR1_SPEED_WORDMODE_INDEX 0
144#define SIR1_SPEED_WORDMODE_WIDTH 3
145
146#define SPEED_10000_CDR 0x7
147#define SPEED_10000_PLL 0x1
148#define SPEED_10000_RATE 0x0
149#define SPEED_10000_TXAMP 0xa
150#define SPEED_10000_WORD 0x7
151
152#define SPEED_2500_CDR 0x2
153#define SPEED_2500_PLL 0x0
f047604a 154#define SPEED_2500_RATE 0x1
4d874b30
LT
155#define SPEED_2500_TXAMP 0xf
156#define SPEED_2500_WORD 0x1
157
158#define SPEED_1000_CDR 0x2
159#define SPEED_1000_PLL 0x0
160#define SPEED_1000_RATE 0x3
161#define SPEED_1000_TXAMP 0xf
162#define SPEED_1000_WORD 0x1
163
164
165/* SerDes RxTx register offsets */
166#define RXTX_REG20 0x0050
167#define RXTX_REG114 0x01c8
168
169/* SerDes RxTx register entry bit positions and sizes */
170#define RXTX_REG20_BLWC_ENA_INDEX 2
171#define RXTX_REG20_BLWC_ENA_WIDTH 1
172#define RXTX_REG114_PQ_REG_INDEX 9
173#define RXTX_REG114_PQ_REG_WIDTH 7
174
175#define RXTX_10000_BLWC 0
176#define RXTX_10000_PQ 0x1e
177
178#define RXTX_2500_BLWC 1
179#define RXTX_2500_PQ 0xa
180
181#define RXTX_1000_BLWC 1
182#define RXTX_1000_PQ 0xa
183
184/* Bit setting and getting macros
185 * The get macro will extract the current bit field value from within
186 * the variable
187 *
188 * The set macro will clear the current bit field value within the
189 * variable and then set the bit field of the variable to the
190 * specified value
191 */
192#define GET_BITS(_var, _index, _width) \
193 (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
194
195#define SET_BITS(_var, _index, _width, _val) \
196do { \
197 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
198 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
199} while (0)
200
169a6303
LT
201#define XSIR_GET_BITS(_var, _prefix, _field) \
202 GET_BITS((_var), \
203 _prefix##_##_field##_INDEX, \
204 _prefix##_##_field##_WIDTH)
205
206#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
207 SET_BITS((_var), \
208 _prefix##_##_field##_INDEX, \
209 _prefix##_##_field##_WIDTH, (_val))
210
4d874b30
LT
211/* Macros for reading or writing SerDes integration registers
212 * The ioread macros will get bit fields or full values using the
213 * register definitions formed using the input names
214 *
215 * The iowrite macros will set bit fields or full values using the
216 * register definitions formed using the input names
217 */
218#define XSIR0_IOREAD(_priv, _reg) \
219 ioread16((_priv)->sir0_regs + _reg)
220
221#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
222 GET_BITS(XSIR0_IOREAD((_priv), _reg), \
223 _reg##_##_field##_INDEX, \
224 _reg##_##_field##_WIDTH)
225
226#define XSIR0_IOWRITE(_priv, _reg, _val) \
227 iowrite16((_val), (_priv)->sir0_regs + _reg)
228
229#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
230do { \
231 u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
232 SET_BITS(reg_val, \
233 _reg##_##_field##_INDEX, \
234 _reg##_##_field##_WIDTH, (_val)); \
235 XSIR0_IOWRITE((_priv), _reg, reg_val); \
236} while (0)
237
238#define XSIR1_IOREAD(_priv, _reg) \
239 ioread16((_priv)->sir1_regs + _reg)
240
241#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
242 GET_BITS(XSIR1_IOREAD((_priv), _reg), \
243 _reg##_##_field##_INDEX, \
244 _reg##_##_field##_WIDTH)
245
246#define XSIR1_IOWRITE(_priv, _reg, _val) \
247 iowrite16((_val), (_priv)->sir1_regs + _reg)
248
249#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
250do { \
251 u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
252 SET_BITS(reg_val, \
253 _reg##_##_field##_INDEX, \
254 _reg##_##_field##_WIDTH, (_val)); \
255 XSIR1_IOWRITE((_priv), _reg, reg_val); \
256} while (0)
257
258
259/* Macros for reading or writing SerDes RxTx registers
260 * The ioread macros will get bit fields or full values using the
261 * register definitions formed using the input names
262 *
263 * The iowrite macros will set bit fields or full values using the
264 * register definitions formed using the input names
265 */
266#define XRXTX_IOREAD(_priv, _reg) \
267 ioread16((_priv)->rxtx_regs + _reg)
268
269#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
270 GET_BITS(XRXTX_IOREAD((_priv), _reg), \
271 _reg##_##_field##_INDEX, \
272 _reg##_##_field##_WIDTH)
273
274#define XRXTX_IOWRITE(_priv, _reg, _val) \
275 iowrite16((_val), (_priv)->rxtx_regs + _reg)
276
277#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
278do { \
279 u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
280 SET_BITS(reg_val, \
281 _reg##_##_field##_INDEX, \
282 _reg##_##_field##_WIDTH, (_val)); \
283 XRXTX_IOWRITE((_priv), _reg, reg_val); \
284} while (0)
285
286
287enum amd_xgbe_phy_an {
288 AMD_XGBE_AN_READY = 0,
289 AMD_XGBE_AN_START,
290 AMD_XGBE_AN_EVENT,
291 AMD_XGBE_AN_PAGE_RECEIVED,
292 AMD_XGBE_AN_INCOMPAT_LINK,
293 AMD_XGBE_AN_COMPLETE,
294 AMD_XGBE_AN_NO_LINK,
295 AMD_XGBE_AN_EXIT,
296 AMD_XGBE_AN_ERROR,
297};
298
299enum amd_xgbe_phy_rx {
300 AMD_XGBE_RX_READY = 0,
301 AMD_XGBE_RX_BPA,
302 AMD_XGBE_RX_XNP,
303 AMD_XGBE_RX_COMPLETE,
304};
305
306enum amd_xgbe_phy_mode {
307 AMD_XGBE_MODE_KR,
308 AMD_XGBE_MODE_KX,
309};
310
f047604a
LT
311enum amd_xgbe_phy_speedset {
312 AMD_XGBE_PHY_SPEEDSET_1000_10000,
313 AMD_XGBE_PHY_SPEEDSET_2500_10000,
314};
315
4d874b30
LT
316struct amd_xgbe_phy_priv {
317 struct platform_device *pdev;
318 struct device *dev;
319
320 struct phy_device *phydev;
321
322 /* SerDes related mmio resources */
323 struct resource *rxtx_res;
324 struct resource *sir0_res;
325 struct resource *sir1_res;
326
327 /* SerDes related mmio registers */
328 void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
329 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
330 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
331
332 /* Maintain link status for re-starting auto-negotiation */
333 unsigned int link;
334 enum amd_xgbe_phy_mode mode;
f047604a 335 unsigned int speed_set;
4d874b30
LT
336
337 /* Auto-negotiation state machine support */
338 struct mutex an_mutex;
339 enum amd_xgbe_phy_an an_result;
340 enum amd_xgbe_phy_an an_state;
341 enum amd_xgbe_phy_rx kr_state;
342 enum amd_xgbe_phy_rx kx_state;
343 struct work_struct an_work;
344 struct workqueue_struct *an_workqueue;
345};
346
347static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
348{
349 int ret;
350
351 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
352 if (ret < 0)
353 return ret;
354
355 ret |= 0x02;
356 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
357
358 return 0;
359}
360
361static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
362{
363 int ret;
364
365 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
366 if (ret < 0)
367 return ret;
368
369 ret &= ~0x02;
370 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
371
372 return 0;
373}
374
375static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
376{
377 int ret;
378
379 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
380 if (ret < 0)
381 return ret;
382
383 ret |= MDIO_CTRL1_LPOWER;
384 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
385
386 usleep_range(75, 100);
387
388 ret &= ~MDIO_CTRL1_LPOWER;
389 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
390
391 return 0;
392}
393
394static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
395{
396 struct amd_xgbe_phy_priv *priv = phydev->priv;
397
398 /* Assert Rx and Tx ratechange */
399 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
400}
401
402static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
403{
404 struct amd_xgbe_phy_priv *priv = phydev->priv;
169a6303
LT
405 unsigned int wait;
406 u16 status;
4d874b30
LT
407
408 /* Release Rx and Tx ratechange */
409 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
410
411 /* Wait for Rx and Tx ready */
169a6303
LT
412 wait = XGBE_PHY_RATECHANGE_COUNT;
413 while (wait--) {
1fa1f2e0 414 usleep_range(50, 75);
169a6303
LT
415
416 status = XSIR0_IOREAD(priv, SIR0_STATUS);
417 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
418 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
419 return;
420 }
421
1fa1f2e0 422 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
169a6303 423 status);
4d874b30
LT
424}
425
426static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
427{
428 struct amd_xgbe_phy_priv *priv = phydev->priv;
429 int ret;
430
431 /* Enable KR training */
432 ret = amd_xgbe_an_enable_kr_training(phydev);
433 if (ret < 0)
434 return ret;
435
436 /* Set PCS to KR/10G speed */
437 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
438 if (ret < 0)
439 return ret;
440
441 ret &= ~MDIO_PCS_CTRL2_TYPE;
442 ret |= MDIO_PCS_CTRL2_10GBR;
443 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
444
445 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
446 if (ret < 0)
447 return ret;
448
449 ret &= ~MDIO_CTRL1_SPEEDSEL;
450 ret |= MDIO_CTRL1_SPEED10G;
451 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
452
453 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
454 if (ret < 0)
455 return ret;
456
457 /* Set SerDes to 10G speed */
458 amd_xgbe_phy_serdes_start_ratechange(phydev);
459
460 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
461 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
462 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP);
463 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
464 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR);
465
466 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC);
467 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ);
468
469 amd_xgbe_phy_serdes_complete_ratechange(phydev);
470
471 priv->mode = AMD_XGBE_MODE_KR;
472
473 return 0;
474}
475
476static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
477{
478 struct amd_xgbe_phy_priv *priv = phydev->priv;
479 int ret;
480
481 /* Disable KR training */
482 ret = amd_xgbe_an_disable_kr_training(phydev);
483 if (ret < 0)
484 return ret;
485
486 /* Set PCS to KX/1G speed */
487 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
488 if (ret < 0)
489 return ret;
490
491 ret &= ~MDIO_PCS_CTRL2_TYPE;
492 ret |= MDIO_PCS_CTRL2_10GBX;
493 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
494
495 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
496 if (ret < 0)
497 return ret;
498
499 ret &= ~MDIO_CTRL1_SPEEDSEL;
500 ret |= MDIO_CTRL1_SPEED1G;
501 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
502
503 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
504 if (ret < 0)
505 return ret;
506
507 /* Set SerDes to 2.5G speed */
508 amd_xgbe_phy_serdes_start_ratechange(phydev);
509
510 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
511 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
512 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP);
513 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
514 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR);
515
516 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC);
517 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ);
518
519 amd_xgbe_phy_serdes_complete_ratechange(phydev);
520
521 priv->mode = AMD_XGBE_MODE_KX;
522
523 return 0;
524}
525
526static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
527{
528 struct amd_xgbe_phy_priv *priv = phydev->priv;
529 int ret;
530
531 /* Disable KR training */
532 ret = amd_xgbe_an_disable_kr_training(phydev);
533 if (ret < 0)
534 return ret;
535
536 /* Set PCS to KX/1G speed */
537 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
538 if (ret < 0)
539 return ret;
540
541 ret &= ~MDIO_PCS_CTRL2_TYPE;
542 ret |= MDIO_PCS_CTRL2_10GBX;
543 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
544
545 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
546 if (ret < 0)
547 return ret;
548
549 ret &= ~MDIO_CTRL1_SPEEDSEL;
550 ret |= MDIO_CTRL1_SPEED1G;
551 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
552
553 ret = amd_xgbe_phy_pcs_power_cycle(phydev);
554 if (ret < 0)
555 return ret;
556
557 /* Set SerDes to 1G speed */
558 amd_xgbe_phy_serdes_start_ratechange(phydev);
559
560 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
561 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
562 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP);
563 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
564 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR);
565
566 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC);
567 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ);
568
569 amd_xgbe_phy_serdes_complete_ratechange(phydev);
570
571 priv->mode = AMD_XGBE_MODE_KX;
572
573 return 0;
574}
575
576static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
577{
578 struct amd_xgbe_phy_priv *priv = phydev->priv;
579 int ret;
580
581 /* If we are in KR switch to KX, and vice-versa */
f047604a
LT
582 if (priv->mode == AMD_XGBE_MODE_KR) {
583 if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
584 ret = amd_xgbe_phy_gmii_mode(phydev);
585 else
586 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
587 } else {
4d874b30 588 ret = amd_xgbe_phy_xgmii_mode(phydev);
f047604a 589 }
4d874b30
LT
590
591 return ret;
592}
593
594static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev)
595{
596 int ret;
597
598 ret = amd_xgbe_phy_switch_mode(phydev);
599 if (ret < 0)
600 return AMD_XGBE_AN_ERROR;
601
602 return AMD_XGBE_AN_START;
603}
604
605static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
606 enum amd_xgbe_phy_rx *state)
607{
608 struct amd_xgbe_phy_priv *priv = phydev->priv;
609 int ad_reg, lp_reg, ret;
610
611 *state = AMD_XGBE_RX_COMPLETE;
612
613 /* If we're in KX mode then we're done */
614 if (priv->mode == AMD_XGBE_MODE_KX)
615 return AMD_XGBE_AN_EVENT;
616
617 /* Enable/Disable FEC */
618 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
619 if (ad_reg < 0)
620 return AMD_XGBE_AN_ERROR;
621
622 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
623 if (lp_reg < 0)
624 return AMD_XGBE_AN_ERROR;
625
626 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
627 if (ret < 0)
628 return AMD_XGBE_AN_ERROR;
629
630 if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
631 ret |= 0x01;
632 else
633 ret &= ~0x01;
634
635 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
636
637 /* Start KR training */
638 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
639 if (ret < 0)
640 return AMD_XGBE_AN_ERROR;
641
5c10e5cb
LT
642 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
643
4d874b30
LT
644 ret |= 0x01;
645 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
646
5c10e5cb
LT
647 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
648
4d874b30
LT
649 return AMD_XGBE_AN_EVENT;
650}
651
652static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
653 enum amd_xgbe_phy_rx *state)
654{
655 u16 msg;
656
657 *state = AMD_XGBE_RX_XNP;
658
659 msg = XNP_MCF_NULL_MESSAGE;
660 msg |= XNP_MP_FORMATTED;
661
662 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
663 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
664 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
665
666 return AMD_XGBE_AN_EVENT;
667}
668
669static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
670 enum amd_xgbe_phy_rx *state)
671{
672 struct amd_xgbe_phy_priv *priv = phydev->priv;
673 unsigned int link_support;
674 int ret, ad_reg, lp_reg;
675
676 /* Read Base Ability register 2 first */
677 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
678 if (ret < 0)
679 return AMD_XGBE_AN_ERROR;
680
681 /* Check for a supported mode, otherwise restart in a different one */
682 link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20;
683 if (!(ret & link_support))
684 return amd_xgbe_an_switch_mode(phydev);
685
686 /* Check Extended Next Page support */
687 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
688 if (ad_reg < 0)
689 return AMD_XGBE_AN_ERROR;
690
691 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
692 if (lp_reg < 0)
693 return AMD_XGBE_AN_ERROR;
694
695 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
696 amd_xgbe_an_tx_xnp(phydev, state) :
697 amd_xgbe_an_tx_training(phydev, state);
698}
699
700static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
701 enum amd_xgbe_phy_rx *state)
702{
703 int ad_reg, lp_reg;
704
705 /* Check Extended Next Page support */
706 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
707 if (ad_reg < 0)
708 return AMD_XGBE_AN_ERROR;
709
710 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
711 if (lp_reg < 0)
712 return AMD_XGBE_AN_ERROR;
713
714 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
715 amd_xgbe_an_tx_xnp(phydev, state) :
716 amd_xgbe_an_tx_training(phydev, state);
717}
718
719static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
720{
721 struct amd_xgbe_phy_priv *priv = phydev->priv;
722 int ret;
723
724 /* Be sure we aren't looping trying to negotiate */
725 if (priv->mode == AMD_XGBE_MODE_KR) {
726 if (priv->kr_state != AMD_XGBE_RX_READY)
727 return AMD_XGBE_AN_NO_LINK;
728 priv->kr_state = AMD_XGBE_RX_BPA;
729 } else {
730 if (priv->kx_state != AMD_XGBE_RX_READY)
731 return AMD_XGBE_AN_NO_LINK;
732 priv->kx_state = AMD_XGBE_RX_BPA;
733 }
734
735 /* Set up Advertisement register 3 first */
736 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
737 if (ret < 0)
738 return AMD_XGBE_AN_ERROR;
739
740 if (phydev->supported & SUPPORTED_10000baseR_FEC)
741 ret |= 0xc000;
742 else
743 ret &= ~0xc000;
744
745 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
746
747 /* Set up Advertisement register 2 next */
748 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
749 if (ret < 0)
750 return AMD_XGBE_AN_ERROR;
751
752 if (phydev->supported & SUPPORTED_10000baseKR_Full)
753 ret |= 0x80;
754 else
755 ret &= ~0x80;
756
f047604a
LT
757 if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
758 (phydev->supported & SUPPORTED_2500baseX_Full))
4d874b30
LT
759 ret |= 0x20;
760 else
761 ret &= ~0x20;
762
763 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
764
765 /* Set up Advertisement register 1 last */
766 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
767 if (ret < 0)
768 return AMD_XGBE_AN_ERROR;
769
770 if (phydev->supported & SUPPORTED_Pause)
771 ret |= 0x400;
772 else
773 ret &= ~0x400;
774
775 if (phydev->supported & SUPPORTED_Asym_Pause)
776 ret |= 0x800;
777 else
778 ret &= ~0x800;
779
780 /* We don't intend to perform XNP */
781 ret &= ~XNP_NP_EXCHANGE;
782
783 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
784
785 /* Enable and start auto-negotiation */
786 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
787
788 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
789 if (ret < 0)
790 return AMD_XGBE_AN_ERROR;
791
792 ret |= MDIO_AN_CTRL1_ENABLE;
793 ret |= MDIO_AN_CTRL1_RESTART;
794 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
795
796 return AMD_XGBE_AN_EVENT;
797}
798
799static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev)
800{
801 enum amd_xgbe_phy_an new_state;
802 int ret;
803
804 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
805 if (ret < 0)
806 return AMD_XGBE_AN_ERROR;
807
808 new_state = AMD_XGBE_AN_EVENT;
809 if (ret & XGBE_AN_PG_RCV)
810 new_state = AMD_XGBE_AN_PAGE_RECEIVED;
811 else if (ret & XGBE_AN_INC_LINK)
812 new_state = AMD_XGBE_AN_INCOMPAT_LINK;
813 else if (ret & XGBE_AN_INT_CMPLT)
814 new_state = AMD_XGBE_AN_COMPLETE;
815
816 if (new_state != AMD_XGBE_AN_EVENT)
817 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
818
819 return new_state;
820}
821
822static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
823{
824 struct amd_xgbe_phy_priv *priv = phydev->priv;
825 enum amd_xgbe_phy_rx *state;
826 int ret;
827
828 state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state
829 : &priv->kx_state;
830
831 switch (*state) {
832 case AMD_XGBE_RX_BPA:
833 ret = amd_xgbe_an_rx_bpa(phydev, state);
834 break;
835
836 case AMD_XGBE_RX_XNP:
837 ret = amd_xgbe_an_rx_xnp(phydev, state);
838 break;
839
840 default:
841 ret = AMD_XGBE_AN_ERROR;
842 }
843
844 return ret;
845}
846
847static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
848{
849 return amd_xgbe_an_switch_mode(phydev);
850}
851
852static void amd_xgbe_an_state_machine(struct work_struct *work)
853{
854 struct amd_xgbe_phy_priv *priv = container_of(work,
855 struct amd_xgbe_phy_priv,
856 an_work);
857 struct phy_device *phydev = priv->phydev;
858 enum amd_xgbe_phy_an cur_state;
859 int sleep;
b668a3ae 860 unsigned int an_supported = 0;
4d874b30
LT
861
862 while (1) {
863 mutex_lock(&priv->an_mutex);
864
865 cur_state = priv->an_state;
866
867 switch (priv->an_state) {
868 case AMD_XGBE_AN_START:
869 priv->an_state = amd_xgbe_an_start(phydev);
b668a3ae 870 an_supported = 0;
4d874b30
LT
871 break;
872
873 case AMD_XGBE_AN_EVENT:
874 priv->an_state = amd_xgbe_an_event(phydev);
875 break;
876
877 case AMD_XGBE_AN_PAGE_RECEIVED:
878 priv->an_state = amd_xgbe_an_page_received(phydev);
b668a3ae 879 an_supported++;
4d874b30
LT
880 break;
881
882 case AMD_XGBE_AN_INCOMPAT_LINK:
883 priv->an_state = amd_xgbe_an_incompat_link(phydev);
884 break;
885
886 case AMD_XGBE_AN_COMPLETE:
b668a3ae
LT
887 netdev_info(phydev->attached_dev, "%s successful\n",
888 an_supported ? "Auto negotiation"
889 : "Parallel detection");
890 /* fall through */
891
4d874b30
LT
892 case AMD_XGBE_AN_NO_LINK:
893 case AMD_XGBE_AN_EXIT:
894 goto exit_unlock;
895
896 default:
897 priv->an_state = AMD_XGBE_AN_ERROR;
898 }
899
900 if (priv->an_state == AMD_XGBE_AN_ERROR) {
901 netdev_err(phydev->attached_dev,
902 "error during auto-negotiation, state=%u\n",
903 cur_state);
904 goto exit_unlock;
905 }
906
907 sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0;
908
909 mutex_unlock(&priv->an_mutex);
910
911 if (sleep)
912 usleep_range(20, 50);
913 }
914
915exit_unlock:
916 priv->an_result = priv->an_state;
917 priv->an_state = AMD_XGBE_AN_READY;
918
919 mutex_unlock(&priv->an_mutex);
920}
921
922static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
923{
924 int count, ret;
925
926 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
927 if (ret < 0)
928 return ret;
929
930 ret |= MDIO_CTRL1_RESET;
931 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
932
933 count = 50;
934 do {
935 msleep(20);
936 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
937 if (ret < 0)
938 return ret;
939 } while ((ret & MDIO_CTRL1_RESET) && --count);
940
941 if (ret & MDIO_CTRL1_RESET)
942 return -ETIMEDOUT;
943
944 return 0;
945}
946
947static int amd_xgbe_phy_config_init(struct phy_device *phydev)
948{
f047604a
LT
949 struct amd_xgbe_phy_priv *priv = phydev->priv;
950
4d874b30
LT
951 /* Initialize supported features */
952 phydev->supported = SUPPORTED_Autoneg;
953 phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
954 phydev->supported |= SUPPORTED_Backplane;
4d874b30
LT
955 phydev->supported |= SUPPORTED_10000baseKR_Full |
956 SUPPORTED_10000baseR_FEC;
f047604a
LT
957 switch (priv->speed_set) {
958 case AMD_XGBE_PHY_SPEEDSET_1000_10000:
959 phydev->supported |= SUPPORTED_1000baseKX_Full;
960 break;
961 case AMD_XGBE_PHY_SPEEDSET_2500_10000:
962 phydev->supported |= SUPPORTED_2500baseX_Full;
963 break;
964 }
4d874b30
LT
965 phydev->advertising = phydev->supported;
966
967 /* Turn off and clear interrupts */
968 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
969 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
970
971 return 0;
972}
973
974static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
975{
976 int ret;
977
978 /* Disable auto-negotiation */
979 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
980 if (ret < 0)
981 return ret;
982
983 ret &= ~MDIO_AN_CTRL1_ENABLE;
984 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
985
986 /* Validate/Set specified speed */
987 switch (phydev->speed) {
988 case SPEED_10000:
989 ret = amd_xgbe_phy_xgmii_mode(phydev);
990 break;
991
992 case SPEED_2500:
993 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
994 break;
995
996 case SPEED_1000:
997 ret = amd_xgbe_phy_gmii_mode(phydev);
998 break;
999
1000 default:
1001 ret = -EINVAL;
1002 }
1003
1004 if (ret < 0)
1005 return ret;
1006
1007 /* Validate duplex mode */
1008 if (phydev->duplex != DUPLEX_FULL)
1009 return -EINVAL;
1010
1011 phydev->pause = 0;
1012 phydev->asym_pause = 0;
1013
1014 return 0;
1015}
1016
1017static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1018{
1019 struct amd_xgbe_phy_priv *priv = phydev->priv;
1020 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1021 int ret;
1022
1023 if (phydev->autoneg != AUTONEG_ENABLE)
1024 return amd_xgbe_phy_setup_forced(phydev);
1025
1026 /* Make sure we have the AN MMD present */
1027 if (!(mmd_mask & MDIO_DEVS_AN))
1028 return -EINVAL;
1029
1030 /* Get the current speed mode */
1031 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1032 if (ret < 0)
1033 return ret;
1034
1035 /* Start/Restart the auto-negotiation state machine */
1036 mutex_lock(&priv->an_mutex);
1037 priv->an_result = AMD_XGBE_AN_READY;
1038 priv->an_state = AMD_XGBE_AN_START;
1039 priv->kr_state = AMD_XGBE_RX_READY;
1040 priv->kx_state = AMD_XGBE_RX_READY;
1041 mutex_unlock(&priv->an_mutex);
1042
1043 queue_work(priv->an_workqueue, &priv->an_work);
1044
1045 return 0;
1046}
1047
1048static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
1049{
1050 struct amd_xgbe_phy_priv *priv = phydev->priv;
1051 enum amd_xgbe_phy_an state;
1052
1053 mutex_lock(&priv->an_mutex);
1054 state = priv->an_result;
1055 mutex_unlock(&priv->an_mutex);
1056
1057 return (state == AMD_XGBE_AN_COMPLETE);
1058}
1059
1060static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1061{
1062 struct amd_xgbe_phy_priv *priv = phydev->priv;
1063 enum amd_xgbe_phy_an state;
1064 unsigned int check_again, autoneg;
1065 int ret;
1066
1067 /* If we're doing auto-negotiation don't report link down */
1068 mutex_lock(&priv->an_mutex);
1069 state = priv->an_state;
1070 mutex_unlock(&priv->an_mutex);
1071
1072 if (state != AMD_XGBE_AN_READY) {
1073 phydev->link = 1;
1074 return 0;
1075 }
1076
1077 /* Since the device can be in the wrong mode when a link is
1078 * (re-)established (cable connected after the interface is
1079 * up, etc.), the link status may report no link. If there
1080 * is no link, try switching modes and checking the status
f047604a 1081 * again if auto negotiation is enabled.
4d874b30 1082 */
f047604a 1083 check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
4d874b30
LT
1084again:
1085 /* Link status is latched low, so read once to clear
1086 * and then read again to get current state
1087 */
1088 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1089 if (ret < 0)
1090 return ret;
1091
1092 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1093 if (ret < 0)
1094 return ret;
1095
1096 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1097
1098 if (!phydev->link) {
4d874b30 1099 if (check_again) {
f047604a
LT
1100 ret = amd_xgbe_phy_switch_mode(phydev);
1101 if (ret < 0)
1102 return ret;
4d874b30
LT
1103 check_again = 0;
1104 goto again;
1105 }
1106 }
1107
1108 autoneg = (phydev->link && !priv->link) ? 1 : 0;
1109 priv->link = phydev->link;
1110 if (autoneg) {
1111 /* Link is (back) up, re-start auto-negotiation */
1112 ret = amd_xgbe_phy_config_aneg(phydev);
1113 if (ret < 0)
1114 return ret;
1115 }
1116
1117 return 0;
1118}
1119
1120static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1121{
f047604a 1122 struct amd_xgbe_phy_priv *priv = phydev->priv;
4d874b30
LT
1123 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1124 int ret, mode, ad_ret, lp_ret;
1125
1126 ret = amd_xgbe_phy_update_link(phydev);
1127 if (ret)
1128 return ret;
1129
1130 mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1131 if (mode < 0)
1132 return mode;
1133 mode &= MDIO_PCS_CTRL2_TYPE;
1134
1135 if (phydev->autoneg == AUTONEG_ENABLE) {
1136 if (!(mmd_mask & MDIO_DEVS_AN))
1137 return -EINVAL;
1138
1139 if (!amd_xgbe_phy_aneg_done(phydev))
1140 return 0;
1141
1142 /* Compare Advertisement and Link Partner register 1 */
1143 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1144 if (ad_ret < 0)
1145 return ad_ret;
1146 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
1147 if (lp_ret < 0)
1148 return lp_ret;
1149
1150 ad_ret &= lp_ret;
1151 phydev->pause = (ad_ret & 0x400) ? 1 : 0;
1152 phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
1153
1154 /* Compare Advertisement and Link Partner register 2 */
1155 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
1156 MDIO_AN_ADVERTISE + 1);
1157 if (ad_ret < 0)
1158 return ad_ret;
1159 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
1160 if (lp_ret < 0)
1161 return lp_ret;
1162
1163 ad_ret &= lp_ret;
1164 if (ad_ret & 0x80) {
1165 phydev->speed = SPEED_10000;
1166 if (mode != MDIO_PCS_CTRL2_10GBR) {
1167 ret = amd_xgbe_phy_xgmii_mode(phydev);
1168 if (ret < 0)
1169 return ret;
1170 }
1171 } else {
f047604a
LT
1172 int (*mode_fcn)(struct phy_device *);
1173
1174 if (priv->speed_set ==
1175 AMD_XGBE_PHY_SPEEDSET_1000_10000) {
1176 phydev->speed = SPEED_1000;
1177 mode_fcn = amd_xgbe_phy_gmii_mode;
1178 } else {
1179 phydev->speed = SPEED_2500;
1180 mode_fcn = amd_xgbe_phy_gmii_2500_mode;
1181 }
1182
4d874b30 1183 if (mode == MDIO_PCS_CTRL2_10GBR) {
f047604a 1184 ret = mode_fcn(phydev);
4d874b30
LT
1185 if (ret < 0)
1186 return ret;
1187 }
1188 }
1189
1190 phydev->duplex = DUPLEX_FULL;
1191 } else {
f047604a
LT
1192 if (mode == MDIO_PCS_CTRL2_10GBR) {
1193 phydev->speed = SPEED_10000;
1194 } else {
1195 if (priv->speed_set ==
1196 AMD_XGBE_PHY_SPEEDSET_1000_10000)
1197 phydev->speed = SPEED_1000;
1198 else
1199 phydev->speed = SPEED_2500;
1200 }
4d874b30
LT
1201 phydev->duplex = DUPLEX_FULL;
1202 phydev->pause = 0;
1203 phydev->asym_pause = 0;
1204 }
1205
1206 return 0;
1207}
1208
1209static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1210{
1211 int ret;
1212
1213 mutex_lock(&phydev->lock);
1214
1215 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1216 if (ret < 0)
1217 goto unlock;
1218
1219 ret |= MDIO_CTRL1_LPOWER;
1220 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1221
1222 ret = 0;
1223
1224unlock:
1225 mutex_unlock(&phydev->lock);
1226
1227 return ret;
1228}
1229
1230static int amd_xgbe_phy_resume(struct phy_device *phydev)
1231{
1232 int ret;
1233
1234 mutex_lock(&phydev->lock);
1235
1236 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1237 if (ret < 0)
1238 goto unlock;
1239
1240 ret &= ~MDIO_CTRL1_LPOWER;
1241 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1242
1243 ret = 0;
1244
1245unlock:
1246 mutex_unlock(&phydev->lock);
1247
1248 return ret;
1249}
1250
1251static int amd_xgbe_phy_probe(struct phy_device *phydev)
1252{
1253 struct amd_xgbe_phy_priv *priv;
1254 struct platform_device *pdev;
1255 struct device *dev;
1256 char *wq_name;
f047604a
LT
1257 const __be32 *property;
1258 unsigned int speed_set;
4d874b30
LT
1259 int ret;
1260
1261 if (!phydev->dev.of_node)
1262 return -EINVAL;
1263
1264 pdev = of_find_device_by_node(phydev->dev.of_node);
1265 if (!pdev)
1266 return -EINVAL;
1267 dev = &pdev->dev;
1268
1269 wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name);
1270 if (!wq_name) {
1271 ret = -ENOMEM;
1272 goto err_pdev;
1273 }
1274
1275 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1276 if (!priv) {
1277 ret = -ENOMEM;
1278 goto err_name;
1279 }
1280
1281 priv->pdev = pdev;
1282 priv->dev = dev;
1283 priv->phydev = phydev;
1284
1285 /* Get the device mmio areas */
1286 priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1287 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1288 if (IS_ERR(priv->rxtx_regs)) {
1289 dev_err(dev, "rxtx ioremap failed\n");
1290 ret = PTR_ERR(priv->rxtx_regs);
1291 goto err_priv;
1292 }
1293
1294 priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1295 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1296 if (IS_ERR(priv->sir0_regs)) {
1297 dev_err(dev, "sir0 ioremap failed\n");
1298 ret = PTR_ERR(priv->sir0_regs);
1299 goto err_rxtx;
1300 }
1301
1302 priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1303 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1304 if (IS_ERR(priv->sir1_regs)) {
1305 dev_err(dev, "sir1 ioremap failed\n");
1306 ret = PTR_ERR(priv->sir1_regs);
1307 goto err_sir0;
1308 }
1309
f047604a
LT
1310 /* Get the device speed set property */
1311 speed_set = 0;
1312 property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
1313 NULL);
1314 if (property)
1315 speed_set = be32_to_cpu(*property);
1316
1317 switch (speed_set) {
1318 case 0:
1319 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
1320 break;
1321 case 1:
1322 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
1323 break;
1324 default:
1325 dev_err(dev, "invalid amd,speed-set property\n");
1326 ret = -EINVAL;
1327 goto err_sir1;
1328 }
1329
4d874b30
LT
1330 priv->link = 1;
1331
1332 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1333 if (ret < 0)
1334 goto err_sir1;
1335 if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
1336 priv->mode = AMD_XGBE_MODE_KR;
1337 else
1338 priv->mode = AMD_XGBE_MODE_KX;
1339
1340 mutex_init(&priv->an_mutex);
1341 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1342 priv->an_workqueue = create_singlethread_workqueue(wq_name);
1343 if (!priv->an_workqueue) {
1344 ret = -ENOMEM;
1345 goto err_sir1;
1346 }
1347
1348 phydev->priv = priv;
1349
1350 kfree(wq_name);
1351 of_dev_put(pdev);
1352
1353 return 0;
1354
1355err_sir1:
1356 devm_iounmap(dev, priv->sir1_regs);
1357 devm_release_mem_region(dev, priv->sir1_res->start,
1358 resource_size(priv->sir1_res));
1359
1360err_sir0:
1361 devm_iounmap(dev, priv->sir0_regs);
1362 devm_release_mem_region(dev, priv->sir0_res->start,
1363 resource_size(priv->sir0_res));
1364
1365err_rxtx:
1366 devm_iounmap(dev, priv->rxtx_regs);
1367 devm_release_mem_region(dev, priv->rxtx_res->start,
1368 resource_size(priv->rxtx_res));
1369
1370err_priv:
1371 devm_kfree(dev, priv);
1372
1373err_name:
1374 kfree(wq_name);
1375
1376err_pdev:
1377 of_dev_put(pdev);
1378
1379 return ret;
1380}
1381
1382static void amd_xgbe_phy_remove(struct phy_device *phydev)
1383{
1384 struct amd_xgbe_phy_priv *priv = phydev->priv;
1385 struct device *dev = priv->dev;
1386
1387 /* Stop any in process auto-negotiation */
1388 mutex_lock(&priv->an_mutex);
1389 priv->an_state = AMD_XGBE_AN_EXIT;
1390 mutex_unlock(&priv->an_mutex);
1391
1392 flush_workqueue(priv->an_workqueue);
1393 destroy_workqueue(priv->an_workqueue);
1394
1395 /* Release resources */
1396 devm_iounmap(dev, priv->sir1_regs);
1397 devm_release_mem_region(dev, priv->sir1_res->start,
1398 resource_size(priv->sir1_res));
1399
1400 devm_iounmap(dev, priv->sir0_regs);
1401 devm_release_mem_region(dev, priv->sir0_res->start,
1402 resource_size(priv->sir0_res));
1403
1404 devm_iounmap(dev, priv->rxtx_regs);
1405 devm_release_mem_region(dev, priv->rxtx_res->start,
1406 resource_size(priv->rxtx_res));
1407
1408 devm_kfree(dev, priv);
1409}
1410
1411static int amd_xgbe_match_phy_device(struct phy_device *phydev)
1412{
1413 return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
1414}
1415
1416static struct phy_driver amd_xgbe_phy_driver[] = {
1417 {
1418 .phy_id = XGBE_PHY_ID,
1419 .phy_id_mask = XGBE_PHY_MASK,
1420 .name = "AMD XGBE PHY",
1421 .features = 0,
1422 .probe = amd_xgbe_phy_probe,
1423 .remove = amd_xgbe_phy_remove,
1424 .soft_reset = amd_xgbe_phy_soft_reset,
1425 .config_init = amd_xgbe_phy_config_init,
1426 .suspend = amd_xgbe_phy_suspend,
1427 .resume = amd_xgbe_phy_resume,
1428 .config_aneg = amd_xgbe_phy_config_aneg,
1429 .aneg_done = amd_xgbe_phy_aneg_done,
1430 .read_status = amd_xgbe_phy_read_status,
1431 .match_phy_device = amd_xgbe_match_phy_device,
1432 .driver = {
1433 .owner = THIS_MODULE,
1434 },
1435 },
1436};
1437
1438static int __init amd_xgbe_phy_init(void)
1439{
1440 return phy_drivers_register(amd_xgbe_phy_driver,
1441 ARRAY_SIZE(amd_xgbe_phy_driver));
1442}
1443
1444static void __exit amd_xgbe_phy_exit(void)
1445{
1446 phy_drivers_unregister(amd_xgbe_phy_driver,
1447 ARRAY_SIZE(amd_xgbe_phy_driver));
1448}
1449
1450module_init(amd_xgbe_phy_init);
1451module_exit(amd_xgbe_phy_exit);
1452
a25aafaa 1453static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
4d874b30
LT
1454 { XGBE_PHY_ID, XGBE_PHY_MASK },
1455 { }
1456};
1457MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);