2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/acpi.h>
10 #include <linux/module.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/phy.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
22 #include "thunder_bgx.h"
24 #define DRV_NAME "thunder-BGX"
25 #define DRV_VERSION "1.0"
36 int lmacid
; /* ID within BGX */
37 int lmacid_bd
; /* ID on board */
38 struct net_device netdev
;
39 struct phy_device
*phydev
;
40 unsigned int last_duplex
;
41 unsigned int last_link
;
42 unsigned int last_speed
;
44 struct delayed_work dwork
;
45 struct workqueue_struct
*check_link
;
50 struct lmac lmac
[MAX_LMAC_PER_BGX
];
54 void __iomem
*reg_base
;
60 static struct bgx
*bgx_vnic
[MAX_BGX_THUNDER
];
61 static int lmac_count
; /* Total no of LMACs in system */
63 static int bgx_xaui_check_link(struct lmac
*lmac
);
65 /* Supported devices */
66 static const struct pci_device_id bgx_id_table
[] = {
67 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVICE_ID_THUNDER_BGX
) },
68 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVICE_ID_THUNDER_RGX
) },
69 { 0, } /* end of table */
72 MODULE_AUTHOR("Cavium Inc");
73 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
74 MODULE_LICENSE("GPL v2");
75 MODULE_VERSION(DRV_VERSION
);
76 MODULE_DEVICE_TABLE(pci
, bgx_id_table
);
78 /* The Cavium ThunderX network controller can *only* be found in SoCs
79 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
80 * registers on this platform are implicitly strongly ordered with respect
81 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
82 * with no memory barriers in this driver. The readq()/writeq() functions add
83 * explicit ordering operation which in this case are redundant, and only
87 /* Register read/write APIs */
88 static u64
bgx_reg_read(struct bgx
*bgx
, u8 lmac
, u64 offset
)
90 void __iomem
*addr
= bgx
->reg_base
+ ((u32
)lmac
<< 20) + offset
;
92 return readq_relaxed(addr
);
95 static void bgx_reg_write(struct bgx
*bgx
, u8 lmac
, u64 offset
, u64 val
)
97 void __iomem
*addr
= bgx
->reg_base
+ ((u32
)lmac
<< 20) + offset
;
99 writeq_relaxed(val
, addr
);
102 static void bgx_reg_modify(struct bgx
*bgx
, u8 lmac
, u64 offset
, u64 val
)
104 void __iomem
*addr
= bgx
->reg_base
+ ((u32
)lmac
<< 20) + offset
;
106 writeq_relaxed(val
| readq_relaxed(addr
), addr
);
109 static int bgx_poll_reg(struct bgx
*bgx
, u8 lmac
, u64 reg
, u64 mask
, bool zero
)
115 reg_val
= bgx_reg_read(bgx
, lmac
, reg
);
116 if (zero
&& !(reg_val
& mask
))
118 if (!zero
&& (reg_val
& mask
))
120 usleep_range(1000, 2000);
126 static int max_bgx_per_node
;
127 static void set_max_bgx_per_node(struct pci_dev
*pdev
)
131 if (max_bgx_per_node
)
134 pci_read_config_word(pdev
, PCI_SUBSYSTEM_ID
, &sdevid
);
136 case PCI_SUBSYS_DEVID_81XX_BGX
:
137 max_bgx_per_node
= MAX_BGX_PER_CN81XX
;
139 case PCI_SUBSYS_DEVID_83XX_BGX
:
140 max_bgx_per_node
= MAX_BGX_PER_CN83XX
;
142 case PCI_SUBSYS_DEVID_88XX_BGX
:
144 max_bgx_per_node
= MAX_BGX_PER_CN88XX
;
149 static struct bgx
*get_bgx(int node
, int bgx_idx
)
151 int idx
= (node
* max_bgx_per_node
) + bgx_idx
;
153 return bgx_vnic
[idx
];
156 /* Return number of BGX present in HW */
157 unsigned bgx_get_map(int node
)
162 for (i
= 0; i
< max_bgx_per_node
; i
++) {
163 if (bgx_vnic
[(node
* max_bgx_per_node
) + i
])
169 EXPORT_SYMBOL(bgx_get_map
);
171 /* Return number of LMAC configured for this BGX */
172 int bgx_get_lmac_count(int node
, int bgx_idx
)
176 bgx
= get_bgx(node
, bgx_idx
);
178 return bgx
->lmac_count
;
182 EXPORT_SYMBOL(bgx_get_lmac_count
);
184 /* Returns the current link status of LMAC */
185 void bgx_get_lmac_link_state(int node
, int bgx_idx
, int lmacid
, void *status
)
187 struct bgx_link_status
*link
= (struct bgx_link_status
*)status
;
191 bgx
= get_bgx(node
, bgx_idx
);
195 lmac
= &bgx
->lmac
[lmacid
];
196 link
->mac_type
= lmac
->lmac_type
;
197 link
->link_up
= lmac
->link_up
;
198 link
->duplex
= lmac
->last_duplex
;
199 link
->speed
= lmac
->last_speed
;
201 EXPORT_SYMBOL(bgx_get_lmac_link_state
);
203 const u8
*bgx_get_lmac_mac(int node
, int bgx_idx
, int lmacid
)
205 struct bgx
*bgx
= get_bgx(node
, bgx_idx
);
208 return bgx
->lmac
[lmacid
].mac
;
212 EXPORT_SYMBOL(bgx_get_lmac_mac
);
214 void bgx_set_lmac_mac(int node
, int bgx_idx
, int lmacid
, const u8
*mac
)
216 struct bgx
*bgx
= get_bgx(node
, bgx_idx
);
221 ether_addr_copy(bgx
->lmac
[lmacid
].mac
, mac
);
223 EXPORT_SYMBOL(bgx_set_lmac_mac
);
225 void bgx_lmac_rx_tx_enable(int node
, int bgx_idx
, int lmacid
, bool enable
)
227 struct bgx
*bgx
= get_bgx(node
, bgx_idx
);
233 lmac
= &bgx
->lmac
[lmacid
];
235 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_CMRX_CFG
);
237 cfg
|= CMR_PKT_RX_EN
| CMR_PKT_TX_EN
;
239 cfg
&= ~(CMR_PKT_RX_EN
| CMR_PKT_TX_EN
);
240 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_CFG
, cfg
);
243 xcv_setup_link(enable
? lmac
->link_up
: 0, lmac
->last_speed
);
245 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable
);
247 void bgx_lmac_get_pfc(int node
, int bgx_idx
, int lmacid
, void *pause
)
249 struct pfc
*pfc
= (struct pfc
*)pause
;
250 struct bgx
*bgx
= get_bgx(node
, bgx_idx
);
256 lmac
= &bgx
->lmac
[lmacid
];
260 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_CBFC_CTL
);
261 pfc
->fc_rx
= cfg
& RX_EN
;
262 pfc
->fc_tx
= cfg
& TX_EN
;
265 EXPORT_SYMBOL(bgx_lmac_get_pfc
);
267 void bgx_lmac_set_pfc(int node
, int bgx_idx
, int lmacid
, void *pause
)
269 struct pfc
*pfc
= (struct pfc
*)pause
;
270 struct bgx
*bgx
= get_bgx(node
, bgx_idx
);
276 lmac
= &bgx
->lmac
[lmacid
];
280 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_CBFC_CTL
);
281 cfg
&= ~(RX_EN
| TX_EN
);
282 cfg
|= (pfc
->fc_rx
? RX_EN
: 0x00);
283 cfg
|= (pfc
->fc_tx
? TX_EN
: 0x00);
284 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_CBFC_CTL
, cfg
);
286 EXPORT_SYMBOL(bgx_lmac_set_pfc
);
288 static void bgx_sgmii_change_link_state(struct lmac
*lmac
)
290 struct bgx
*bgx
= lmac
->bgx
;
295 cmr_cfg
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_CMRX_CFG
);
297 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_CMRX_CFG
, cmr_cfg
);
299 port_cfg
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_GMP_GMI_PRTX_CFG
);
300 misc_ctl
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_GMP_PCS_MISCX_CTL
);
303 misc_ctl
&= ~PCS_MISC_CTL_GMX_ENO
;
304 port_cfg
&= ~GMI_PORT_CFG_DUPLEX
;
305 port_cfg
|= (lmac
->last_duplex
<< 2);
307 misc_ctl
|= PCS_MISC_CTL_GMX_ENO
;
310 switch (lmac
->last_speed
) {
312 port_cfg
&= ~GMI_PORT_CFG_SPEED
; /* speed 0 */
313 port_cfg
|= GMI_PORT_CFG_SPEED_MSB
; /* speed_msb 1 */
314 port_cfg
&= ~GMI_PORT_CFG_SLOT_TIME
; /* slottime 0 */
315 misc_ctl
&= ~PCS_MISC_CTL_SAMP_PT_MASK
;
316 misc_ctl
|= 50; /* samp_pt */
317 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_SLOT
, 64);
318 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_BURST
, 0);
321 port_cfg
&= ~GMI_PORT_CFG_SPEED
; /* speed 0 */
322 port_cfg
&= ~GMI_PORT_CFG_SPEED_MSB
; /* speed_msb 0 */
323 port_cfg
&= ~GMI_PORT_CFG_SLOT_TIME
; /* slottime 0 */
324 misc_ctl
&= ~PCS_MISC_CTL_SAMP_PT_MASK
;
325 misc_ctl
|= 5; /* samp_pt */
326 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_SLOT
, 64);
327 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_BURST
, 0);
330 port_cfg
|= GMI_PORT_CFG_SPEED
; /* speed 1 */
331 port_cfg
&= ~GMI_PORT_CFG_SPEED_MSB
; /* speed_msb 0 */
332 port_cfg
|= GMI_PORT_CFG_SLOT_TIME
; /* slottime 1 */
333 misc_ctl
&= ~PCS_MISC_CTL_SAMP_PT_MASK
;
334 misc_ctl
|= 1; /* samp_pt */
335 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_TXX_SLOT
, 512);
336 if (lmac
->last_duplex
)
337 bgx_reg_write(bgx
, lmac
->lmacid
,
338 BGX_GMP_GMI_TXX_BURST
, 0);
340 bgx_reg_write(bgx
, lmac
->lmacid
,
341 BGX_GMP_GMI_TXX_BURST
, 8192);
346 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_PCS_MISCX_CTL
, misc_ctl
);
347 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_GMP_GMI_PRTX_CFG
, port_cfg
);
349 port_cfg
= bgx_reg_read(bgx
, lmac
->lmacid
, BGX_GMP_GMI_PRTX_CFG
);
353 bgx_reg_write(bgx
, lmac
->lmacid
, BGX_CMRX_CFG
, cmr_cfg
);
355 if (bgx
->is_rgx
&& (cmr_cfg
& (CMR_PKT_RX_EN
| CMR_PKT_TX_EN
)))
356 xcv_setup_link(lmac
->link_up
, lmac
->last_speed
);
359 static void bgx_lmac_handler(struct net_device
*netdev
)
361 struct lmac
*lmac
= container_of(netdev
, struct lmac
, netdev
);
362 struct phy_device
*phydev
;
363 int link_changed
= 0;
368 phydev
= lmac
->phydev
;
370 if (!phydev
->link
&& lmac
->last_link
)
374 (lmac
->last_duplex
!= phydev
->duplex
||
375 lmac
->last_link
!= phydev
->link
||
376 lmac
->last_speed
!= phydev
->speed
)) {
380 lmac
->last_link
= phydev
->link
;
381 lmac
->last_speed
= phydev
->speed
;
382 lmac
->last_duplex
= phydev
->duplex
;
387 if (link_changed
> 0)
388 lmac
->link_up
= true;
390 lmac
->link_up
= false;
393 bgx_sgmii_change_link_state(lmac
);
395 bgx_xaui_check_link(lmac
);
398 u64
bgx_get_rx_stats(int node
, int bgx_idx
, int lmac
, int idx
)
402 bgx
= get_bgx(node
, bgx_idx
);
408 return bgx_reg_read(bgx
, lmac
, BGX_CMRX_RX_STAT0
+ (idx
* 8));
410 EXPORT_SYMBOL(bgx_get_rx_stats
);
412 u64
bgx_get_tx_stats(int node
, int bgx_idx
, int lmac
, int idx
)
416 bgx
= get_bgx(node
, bgx_idx
);
420 return bgx_reg_read(bgx
, lmac
, BGX_CMRX_TX_STAT0
+ (idx
* 8));
422 EXPORT_SYMBOL(bgx_get_tx_stats
);
424 static void bgx_flush_dmac_addrs(struct bgx
*bgx
, int lmac
)
428 while (bgx
->lmac
[lmac
].dmac
> 0) {
429 offset
= ((bgx
->lmac
[lmac
].dmac
- 1) * sizeof(u64
)) +
430 (lmac
* MAX_DMAC_PER_LMAC
* sizeof(u64
));
431 bgx_reg_write(bgx
, 0, BGX_CMR_RX_DMACX_CAM
+ offset
, 0);
432 bgx
->lmac
[lmac
].dmac
--;
436 /* Configure BGX LMAC in internal loopback mode */
437 void bgx_lmac_internal_loopback(int node
, int bgx_idx
,
438 int lmac_idx
, bool enable
)
444 bgx
= get_bgx(node
, bgx_idx
);
448 lmac
= &bgx
->lmac
[lmac_idx
];
449 if (lmac
->is_sgmii
) {
450 cfg
= bgx_reg_read(bgx
, lmac_idx
, BGX_GMP_PCS_MRX_CTL
);
452 cfg
|= PCS_MRX_CTL_LOOPBACK1
;
454 cfg
&= ~PCS_MRX_CTL_LOOPBACK1
;
455 bgx_reg_write(bgx
, lmac_idx
, BGX_GMP_PCS_MRX_CTL
, cfg
);
457 cfg
= bgx_reg_read(bgx
, lmac_idx
, BGX_SPUX_CONTROL1
);
459 cfg
|= SPU_CTL_LOOPBACK
;
461 cfg
&= ~SPU_CTL_LOOPBACK
;
462 bgx_reg_write(bgx
, lmac_idx
, BGX_SPUX_CONTROL1
, cfg
);
465 EXPORT_SYMBOL(bgx_lmac_internal_loopback
);
467 static int bgx_lmac_sgmii_init(struct bgx
*bgx
, struct lmac
*lmac
)
469 int lmacid
= lmac
->lmacid
;
472 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_GMI_TXX_THRESH
, 0x30);
473 /* max packet size */
474 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_GMI_RXX_JABBER
, MAX_FRAME_SIZE
);
476 /* Disable frame alignment if using preamble */
477 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_GMP_GMI_TXX_APPEND
);
479 bgx_reg_write(bgx
, lmacid
, BGX_GMP_GMI_TXX_SGMII_CTL
, 0);
482 bgx_reg_modify(bgx
, lmacid
, BGX_CMRX_CFG
, CMR_EN
);
485 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
, PCS_MRX_CTL_RESET
);
486 if (bgx_poll_reg(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
,
487 PCS_MRX_CTL_RESET
, true)) {
488 dev_err(&bgx
->pdev
->dev
, "BGX PCS reset not completed\n");
492 /* power down, reset autoneg, autoneg enable */
493 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
);
494 cfg
&= ~PCS_MRX_CTL_PWR_DN
;
495 cfg
|= PCS_MRX_CTL_RST_AN
;
497 cfg
|= PCS_MRX_CTL_AN_EN
;
499 /* In scenarios where PHY driver is not present or it's a
500 * non-standard PHY, FW sets AN_EN to inform Linux driver
501 * to do auto-neg and link polling or not.
503 if (cfg
& PCS_MRX_CTL_AN_EN
)
504 lmac
->autoneg
= true;
506 bgx_reg_write(bgx
, lmacid
, BGX_GMP_PCS_MRX_CTL
, cfg
);
508 if (lmac
->lmac_type
== BGX_MODE_QSGMII
) {
509 /* Disable disparity check for QSGMII */
510 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_GMP_PCS_MISCX_CTL
);
511 cfg
&= ~PCS_MISC_CTL_DISP_EN
;
512 bgx_reg_write(bgx
, lmacid
, BGX_GMP_PCS_MISCX_CTL
, cfg
);
516 if ((lmac
->lmac_type
== BGX_MODE_SGMII
) && lmac
->phydev
) {
517 if (bgx_poll_reg(bgx
, lmacid
, BGX_GMP_PCS_MRX_STATUS
,
518 PCS_MRX_STATUS_AN_CPT
, false)) {
519 dev_err(&bgx
->pdev
->dev
, "BGX AN_CPT not completed\n");
527 static int bgx_lmac_xaui_init(struct bgx
*bgx
, struct lmac
*lmac
)
530 int lmacid
= lmac
->lmacid
;
533 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_RESET
);
534 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_RESET
, true)) {
535 dev_err(&bgx
->pdev
->dev
, "BGX SPU reset not completed\n");
540 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_CMRX_CFG
);
542 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_CFG
, cfg
);
544 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_LOW_POWER
);
545 /* Set interleaved running disparity for RXAUI */
546 if (lmac
->lmac_type
== BGX_MODE_RXAUI
)
547 bgx_reg_modify(bgx
, lmacid
, BGX_SPUX_MISC_CONTROL
,
548 SPU_MISC_CTL_INTLV_RDISP
);
550 /* Clear receive packet disable */
551 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_MISC_CONTROL
);
552 cfg
&= ~SPU_MISC_CTL_RX_DIS
;
553 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_MISC_CONTROL
, cfg
);
555 /* clear all interrupts */
556 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_RX_INT
);
557 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_RX_INT
, cfg
);
558 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_TX_INT
);
559 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_INT
, cfg
);
560 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_INT
);
561 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_INT
, cfg
);
563 if (lmac
->use_training
) {
564 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_LP_CUP
, 0x00);
565 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_LD_CUP
, 0x00);
566 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_LD_REP
, 0x00);
567 /* training enable */
568 bgx_reg_modify(bgx
, lmacid
,
569 BGX_SPUX_BR_PMD_CRTL
, SPU_PMD_CRTL_TRAIN_EN
);
572 /* Append FCS to each packet */
573 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_TX_APPEND
, SMU_TX_APPEND_FCS_D
);
575 /* Disable forward error correction */
576 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_FEC_CONTROL
);
577 cfg
&= ~SPU_FEC_CTL_FEC_EN
;
578 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_FEC_CONTROL
, cfg
);
580 /* Disable autoneg */
581 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_AN_CONTROL
);
582 cfg
= cfg
& ~(SPU_AN_CTL_AN_EN
| SPU_AN_CTL_XNP_EN
);
583 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_AN_CONTROL
, cfg
);
585 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_AN_ADV
);
586 if (lmac
->lmac_type
== BGX_MODE_10G_KR
)
588 else if (lmac
->lmac_type
== BGX_MODE_40G_KR
)
591 cfg
&= ~((1 << 23) | (1 << 24));
592 cfg
= cfg
& (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
593 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_AN_ADV
, cfg
);
595 cfg
= bgx_reg_read(bgx
, 0, BGX_SPU_DBG_CONTROL
);
596 cfg
&= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN
;
597 bgx_reg_write(bgx
, 0, BGX_SPU_DBG_CONTROL
, cfg
);
600 bgx_reg_modify(bgx
, lmacid
, BGX_CMRX_CFG
, CMR_EN
);
602 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_CONTROL1
);
603 cfg
&= ~SPU_CTL_LOW_POWER
;
604 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_CONTROL1
, cfg
);
606 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_TX_CTL
);
607 cfg
&= ~SMU_TX_CTL_UNI_EN
;
608 cfg
|= SMU_TX_CTL_DIC_EN
;
609 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_CTL
, cfg
);
611 /* Enable receive and transmission of pause frames */
612 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_CBFC_CTL
, ((0xffffULL
<< 32) |
613 BCK_EN
| DRP_EN
| TX_EN
| RX_EN
));
614 /* Configure pause time and interval */
615 bgx_reg_write(bgx
, lmacid
,
616 BGX_SMUX_TX_PAUSE_PKT_TIME
, DEFAULT_PAUSE_TIME
);
617 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_TX_PAUSE_PKT_INTERVAL
);
619 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_PAUSE_PKT_INTERVAL
,
620 cfg
| (DEFAULT_PAUSE_TIME
- 0x1000));
621 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_PAUSE_ZERO
, 0x01);
623 /* take lmac_count into account */
624 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_TX_THRESH
, (0x100 - 1));
625 /* max packet size */
626 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_RX_JABBER
, MAX_FRAME_SIZE
);
631 static int bgx_xaui_check_link(struct lmac
*lmac
)
633 struct bgx
*bgx
= lmac
->bgx
;
634 int lmacid
= lmac
->lmacid
;
635 int lmac_type
= lmac
->lmac_type
;
638 if (lmac
->use_training
) {
639 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_INT
);
640 if (!(cfg
& (1ull << 13))) {
641 cfg
= (1ull << 13) | (1ull << 14);
642 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_INT
, cfg
);
643 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_BR_PMD_CRTL
);
645 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_BR_PMD_CRTL
, cfg
);
650 /* wait for PCS to come out of reset */
651 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_CONTROL1
, SPU_CTL_RESET
, true)) {
652 dev_err(&bgx
->pdev
->dev
, "BGX SPU reset not completed\n");
656 if ((lmac_type
== BGX_MODE_10G_KR
) || (lmac_type
== BGX_MODE_XFI
) ||
657 (lmac_type
== BGX_MODE_40G_KR
) || (lmac_type
== BGX_MODE_XLAUI
)) {
658 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_BR_STATUS1
,
659 SPU_BR_STATUS_BLK_LOCK
, false)) {
660 dev_err(&bgx
->pdev
->dev
,
661 "SPU_BR_STATUS_BLK_LOCK not completed\n");
665 if (bgx_poll_reg(bgx
, lmacid
, BGX_SPUX_BX_STATUS
,
666 SPU_BX_STATUS_RX_ALIGN
, false)) {
667 dev_err(&bgx
->pdev
->dev
,
668 "SPU_BX_STATUS_RX_ALIGN not completed\n");
673 /* Clear rcvflt bit (latching high) and read it back */
674 if (bgx_reg_read(bgx
, lmacid
, BGX_SPUX_STATUS2
) & SPU_STATUS2_RCVFLT
)
675 bgx_reg_modify(bgx
, lmacid
,
676 BGX_SPUX_STATUS2
, SPU_STATUS2_RCVFLT
);
677 if (bgx_reg_read(bgx
, lmacid
, BGX_SPUX_STATUS2
) & SPU_STATUS2_RCVFLT
) {
678 dev_err(&bgx
->pdev
->dev
, "Receive fault, retry training\n");
679 if (lmac
->use_training
) {
680 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_INT
);
681 if (!(cfg
& (1ull << 13))) {
682 cfg
= (1ull << 13) | (1ull << 14);
683 bgx_reg_write(bgx
, lmacid
, BGX_SPUX_INT
, cfg
);
684 cfg
= bgx_reg_read(bgx
, lmacid
,
685 BGX_SPUX_BR_PMD_CRTL
);
687 bgx_reg_write(bgx
, lmacid
,
688 BGX_SPUX_BR_PMD_CRTL
, cfg
);
695 /* Wait for BGX RX to be idle */
696 if (bgx_poll_reg(bgx
, lmacid
, BGX_SMUX_CTL
, SMU_CTL_RX_IDLE
, false)) {
697 dev_err(&bgx
->pdev
->dev
, "SMU RX not idle\n");
701 /* Wait for BGX TX to be idle */
702 if (bgx_poll_reg(bgx
, lmacid
, BGX_SMUX_CTL
, SMU_CTL_TX_IDLE
, false)) {
703 dev_err(&bgx
->pdev
->dev
, "SMU TX not idle\n");
707 /* Check for MAC RX faults */
708 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_RX_CTL
);
709 /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
710 cfg
&= SMU_RX_CTL_STATUS
;
714 /* Rx local/remote fault seen.
715 * Do lmac reinit to see if condition recovers
717 bgx_lmac_xaui_init(bgx
, lmac
);
722 static void bgx_poll_for_sgmii_link(struct lmac
*lmac
)
724 u64 pcs_link
, an_result
;
727 pcs_link
= bgx_reg_read(lmac
->bgx
, lmac
->lmacid
,
728 BGX_GMP_PCS_MRX_STATUS
);
730 /*Link state bit is sticky, read it again*/
731 if (!(pcs_link
& PCS_MRX_STATUS_LINK
))
732 pcs_link
= bgx_reg_read(lmac
->bgx
, lmac
->lmacid
,
733 BGX_GMP_PCS_MRX_STATUS
);
735 if (bgx_poll_reg(lmac
->bgx
, lmac
->lmacid
, BGX_GMP_PCS_MRX_STATUS
,
736 PCS_MRX_STATUS_AN_CPT
, false)) {
737 lmac
->link_up
= false;
738 lmac
->last_speed
= SPEED_UNKNOWN
;
739 lmac
->last_duplex
= DUPLEX_UNKNOWN
;
743 lmac
->link_up
= ((pcs_link
& PCS_MRX_STATUS_LINK
) != 0) ? true : false;
744 an_result
= bgx_reg_read(lmac
->bgx
, lmac
->lmacid
,
745 BGX_GMP_PCS_ANX_AN_RESULTS
);
747 speed
= (an_result
>> 3) & 0x3;
748 lmac
->last_duplex
= (an_result
>> 1) & 0x1;
751 lmac
->last_speed
= 10;
754 lmac
->last_speed
= 100;
757 lmac
->last_speed
= 1000;
760 lmac
->link_up
= false;
761 lmac
->last_speed
= SPEED_UNKNOWN
;
762 lmac
->last_duplex
= DUPLEX_UNKNOWN
;
768 if (lmac
->last_link
!= lmac
->link_up
) {
770 bgx_sgmii_change_link_state(lmac
);
771 lmac
->last_link
= lmac
->link_up
;
774 queue_delayed_work(lmac
->check_link
, &lmac
->dwork
, HZ
* 3);
777 static void bgx_poll_for_link(struct work_struct
*work
)
780 u64 spu_link
, smu_link
;
782 lmac
= container_of(work
, struct lmac
, dwork
.work
);
783 if (lmac
->is_sgmii
) {
784 bgx_poll_for_sgmii_link(lmac
);
788 /* Receive link is latching low. Force it high and verify it */
789 bgx_reg_modify(lmac
->bgx
, lmac
->lmacid
,
790 BGX_SPUX_STATUS1
, SPU_STATUS1_RCV_LNK
);
791 bgx_poll_reg(lmac
->bgx
, lmac
->lmacid
, BGX_SPUX_STATUS1
,
792 SPU_STATUS1_RCV_LNK
, false);
794 spu_link
= bgx_reg_read(lmac
->bgx
, lmac
->lmacid
, BGX_SPUX_STATUS1
);
795 smu_link
= bgx_reg_read(lmac
->bgx
, lmac
->lmacid
, BGX_SMUX_RX_CTL
);
797 if ((spu_link
& SPU_STATUS1_RCV_LNK
) &&
798 !(smu_link
& SMU_RX_CTL_STATUS
)) {
800 if (lmac
->lmac_type
== BGX_MODE_XLAUI
)
801 lmac
->last_speed
= 40000;
803 lmac
->last_speed
= 10000;
804 lmac
->last_duplex
= 1;
807 lmac
->last_speed
= SPEED_UNKNOWN
;
808 lmac
->last_duplex
= DUPLEX_UNKNOWN
;
811 if (lmac
->last_link
!= lmac
->link_up
) {
813 if (bgx_xaui_check_link(lmac
)) {
814 /* Errors, clear link_up state */
816 lmac
->last_speed
= SPEED_UNKNOWN
;
817 lmac
->last_duplex
= DUPLEX_UNKNOWN
;
820 lmac
->last_link
= lmac
->link_up
;
823 queue_delayed_work(lmac
->check_link
, &lmac
->dwork
, HZ
* 2);
826 static int phy_interface_mode(u8 lmac_type
)
828 if (lmac_type
== BGX_MODE_QSGMII
)
829 return PHY_INTERFACE_MODE_QSGMII
;
830 if (lmac_type
== BGX_MODE_RGMII
)
831 return PHY_INTERFACE_MODE_RGMII
;
833 return PHY_INTERFACE_MODE_SGMII
;
836 static int bgx_lmac_enable(struct bgx
*bgx
, u8 lmacid
)
841 lmac
= &bgx
->lmac
[lmacid
];
844 if ((lmac
->lmac_type
== BGX_MODE_SGMII
) ||
845 (lmac
->lmac_type
== BGX_MODE_QSGMII
) ||
846 (lmac
->lmac_type
== BGX_MODE_RGMII
)) {
848 if (bgx_lmac_sgmii_init(bgx
, lmac
))
852 if (bgx_lmac_xaui_init(bgx
, lmac
))
856 if (lmac
->is_sgmii
) {
857 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_GMP_GMI_TXX_APPEND
);
858 cfg
|= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
859 bgx_reg_modify(bgx
, lmacid
, BGX_GMP_GMI_TXX_APPEND
, cfg
);
860 bgx_reg_write(bgx
, lmacid
, BGX_GMP_GMI_TXX_MIN_PKT
, 60 - 1);
862 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_SMUX_TX_APPEND
);
863 cfg
|= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
864 bgx_reg_modify(bgx
, lmacid
, BGX_SMUX_TX_APPEND
, cfg
);
865 bgx_reg_write(bgx
, lmacid
, BGX_SMUX_TX_MIN_PKT
, 60 + 4);
869 bgx_reg_modify(bgx
, lmacid
, BGX_CMRX_CFG
, CMR_EN
);
871 /* Restore default cfg, incase low level firmware changed it */
872 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_RX_DMAC_CTL
, 0x03);
874 if ((lmac
->lmac_type
!= BGX_MODE_XFI
) &&
875 (lmac
->lmac_type
!= BGX_MODE_XLAUI
) &&
876 (lmac
->lmac_type
!= BGX_MODE_40G_KR
) &&
877 (lmac
->lmac_type
!= BGX_MODE_10G_KR
)) {
880 bgx_reg_write(bgx
, lmacid
,
881 BGX_GMP_PCS_LINKX_TIMER
,
882 PCS_LINKX_TIMER_COUNT
);
885 /* Default to below link speed and duplex */
886 lmac
->link_up
= true;
887 lmac
->last_speed
= 1000;
888 lmac
->last_duplex
= 1;
889 bgx_sgmii_change_link_state(lmac
);
893 lmac
->phydev
->dev_flags
= 0;
895 if (phy_connect_direct(&lmac
->netdev
, lmac
->phydev
,
897 phy_interface_mode(lmac
->lmac_type
)))
900 phy_start_aneg(lmac
->phydev
);
905 lmac
->check_link
= alloc_workqueue("check_link", WQ_UNBOUND
|
907 if (!lmac
->check_link
)
909 INIT_DELAYED_WORK(&lmac
->dwork
, bgx_poll_for_link
);
910 queue_delayed_work(lmac
->check_link
, &lmac
->dwork
, 0);
915 static void bgx_lmac_disable(struct bgx
*bgx
, u8 lmacid
)
920 lmac
= &bgx
->lmac
[lmacid
];
921 if (lmac
->check_link
) {
922 /* Destroy work queue */
923 cancel_delayed_work_sync(&lmac
->dwork
);
924 destroy_workqueue(lmac
->check_link
);
927 /* Disable packet reception */
928 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_CMRX_CFG
);
929 cfg
&= ~CMR_PKT_RX_EN
;
930 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_CFG
, cfg
);
932 /* Give chance for Rx/Tx FIFO to get drained */
933 bgx_poll_reg(bgx
, lmacid
, BGX_CMRX_RX_FIFO_LEN
, (u64
)0x1FFF, true);
934 bgx_poll_reg(bgx
, lmacid
, BGX_CMRX_TX_FIFO_LEN
, (u64
)0x3FFF, true);
936 /* Disable packet transmission */
937 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_CMRX_CFG
);
938 cfg
&= ~CMR_PKT_TX_EN
;
939 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_CFG
, cfg
);
941 /* Disable serdes lanes */
943 bgx_reg_modify(bgx
, lmacid
,
944 BGX_SPUX_CONTROL1
, SPU_CTL_LOW_POWER
);
946 bgx_reg_modify(bgx
, lmacid
,
947 BGX_GMP_PCS_MRX_CTL
, PCS_MRX_CTL_PWR_DN
);
950 cfg
= bgx_reg_read(bgx
, lmacid
, BGX_CMRX_CFG
);
952 bgx_reg_write(bgx
, lmacid
, BGX_CMRX_CFG
, cfg
);
954 bgx_flush_dmac_addrs(bgx
, lmacid
);
956 if ((lmac
->lmac_type
!= BGX_MODE_XFI
) &&
957 (lmac
->lmac_type
!= BGX_MODE_XLAUI
) &&
958 (lmac
->lmac_type
!= BGX_MODE_40G_KR
) &&
959 (lmac
->lmac_type
!= BGX_MODE_10G_KR
) && lmac
->phydev
)
960 phy_disconnect(lmac
->phydev
);
965 static void bgx_init_hw(struct bgx
*bgx
)
970 bgx_reg_modify(bgx
, 0, BGX_CMR_GLOBAL_CFG
, CMR_GLOBAL_CFG_FCS_STRIP
);
971 if (bgx_reg_read(bgx
, 0, BGX_CMR_BIST_STATUS
))
972 dev_err(&bgx
->pdev
->dev
, "BGX%d BIST failed\n", bgx
->bgx_id
);
974 /* Set lmac type and lane2serdes mapping */
975 for (i
= 0; i
< bgx
->lmac_count
; i
++) {
976 lmac
= &bgx
->lmac
[i
];
977 bgx_reg_write(bgx
, i
, BGX_CMRX_CFG
,
978 (lmac
->lmac_type
<< 8) | lmac
->lane_to_sds
);
979 bgx
->lmac
[i
].lmacid_bd
= lmac_count
;
983 bgx_reg_write(bgx
, 0, BGX_CMR_TX_LMACS
, bgx
->lmac_count
);
984 bgx_reg_write(bgx
, 0, BGX_CMR_RX_LMACS
, bgx
->lmac_count
);
986 /* Set the backpressure AND mask */
987 for (i
= 0; i
< bgx
->lmac_count
; i
++)
988 bgx_reg_modify(bgx
, 0, BGX_CMR_CHAN_MSK_AND
,
989 ((1ULL << MAX_BGX_CHANS_PER_LMAC
) - 1) <<
990 (i
* MAX_BGX_CHANS_PER_LMAC
));
992 /* Disable all MAC filtering */
993 for (i
= 0; i
< RX_DMAC_COUNT
; i
++)
994 bgx_reg_write(bgx
, 0, BGX_CMR_RX_DMACX_CAM
+ (i
* 8), 0x00);
996 /* Disable MAC steering (NCSI traffic) */
997 for (i
= 0; i
< RX_TRAFFIC_STEER_RULE_COUNT
; i
++)
998 bgx_reg_write(bgx
, 0, BGX_CMR_RX_STREERING
+ (i
* 8), 0x00);
1001 static u8
bgx_get_lane2sds_cfg(struct bgx
*bgx
, struct lmac
*lmac
)
1003 return (u8
)(bgx_reg_read(bgx
, lmac
->lmacid
, BGX_CMRX_CFG
) & 0xFF);
1006 static void bgx_print_qlm_mode(struct bgx
*bgx
, u8 lmacid
)
1008 struct device
*dev
= &bgx
->pdev
->dev
;
1012 if (!bgx
->is_dlm
&& lmacid
)
1015 lmac
= &bgx
->lmac
[lmacid
];
1017 sprintf(str
, "BGX%d QLM mode", bgx
->bgx_id
);
1019 sprintf(str
, "BGX%d LMAC%d mode", bgx
->bgx_id
, lmacid
);
1021 switch (lmac
->lmac_type
) {
1022 case BGX_MODE_SGMII
:
1023 dev_info(dev
, "%s: SGMII\n", (char *)str
);
1026 dev_info(dev
, "%s: XAUI\n", (char *)str
);
1028 case BGX_MODE_RXAUI
:
1029 dev_info(dev
, "%s: RXAUI\n", (char *)str
);
1032 if (!lmac
->use_training
)
1033 dev_info(dev
, "%s: XFI\n", (char *)str
);
1035 dev_info(dev
, "%s: 10G_KR\n", (char *)str
);
1037 case BGX_MODE_XLAUI
:
1038 if (!lmac
->use_training
)
1039 dev_info(dev
, "%s: XLAUI\n", (char *)str
);
1041 dev_info(dev
, "%s: 40G_KR4\n", (char *)str
);
1043 case BGX_MODE_QSGMII
:
1044 dev_info(dev
, "%s: QSGMII\n", (char *)str
);
1046 case BGX_MODE_RGMII
:
1047 dev_info(dev
, "%s: RGMII\n", (char *)str
);
1049 case BGX_MODE_INVALID
:
1055 static void lmac_set_lane2sds(struct bgx
*bgx
, struct lmac
*lmac
)
1057 switch (lmac
->lmac_type
) {
1058 case BGX_MODE_SGMII
:
1060 lmac
->lane_to_sds
= lmac
->lmacid
;
1063 case BGX_MODE_XLAUI
:
1064 case BGX_MODE_RGMII
:
1065 lmac
->lane_to_sds
= 0xE4;
1067 case BGX_MODE_RXAUI
:
1068 lmac
->lane_to_sds
= (lmac
->lmacid
) ? 0xE : 0x4;
1070 case BGX_MODE_QSGMII
:
1071 /* There is no way to determine if DLM0/2 is QSGMII or
1072 * DLM1/3 is configured to QSGMII as bootloader will
1073 * configure all LMACs, so take whatever is configured
1074 * by low level firmware.
1076 lmac
->lane_to_sds
= bgx_get_lane2sds_cfg(bgx
, lmac
);
1079 lmac
->lane_to_sds
= 0;
1084 static void lmac_set_training(struct bgx
*bgx
, struct lmac
*lmac
, int lmacid
)
1086 if ((lmac
->lmac_type
!= BGX_MODE_10G_KR
) &&
1087 (lmac
->lmac_type
!= BGX_MODE_40G_KR
)) {
1088 lmac
->use_training
= 0;
1092 lmac
->use_training
= bgx_reg_read(bgx
, lmacid
, BGX_SPUX_BR_PMD_CRTL
) &
1093 SPU_PMD_CRTL_TRAIN_EN
;
1096 static void bgx_set_lmac_config(struct bgx
*bgx
, u8 idx
)
1103 lmac
= &bgx
->lmac
[idx
];
1105 if (!bgx
->is_dlm
|| bgx
->is_rgx
) {
1106 /* Read LMAC0 type to figure out QLM mode
1107 * This is configured by low level firmware
1109 cmr_cfg
= bgx_reg_read(bgx
, 0, BGX_CMRX_CFG
);
1110 lmac
->lmac_type
= (cmr_cfg
>> 8) & 0x07;
1112 lmac
->lmac_type
= BGX_MODE_RGMII
;
1113 lmac_set_training(bgx
, lmac
, 0);
1114 lmac_set_lane2sds(bgx
, lmac
);
1118 /* For DLMs or SLMs on 80/81/83xx so many lane configurations
1119 * are possible and vary across boards. Also Kernel doesn't have
1120 * any way to identify board type/info and since firmware does,
1121 * just take lmac type and serdes lane config as is.
1123 cmr_cfg
= bgx_reg_read(bgx
, idx
, BGX_CMRX_CFG
);
1124 lmac_type
= (u8
)((cmr_cfg
>> 8) & 0x07);
1125 lane_to_sds
= (u8
)(cmr_cfg
& 0xFF);
1126 /* Check if config is reset value */
1127 if ((lmac_type
== 0) && (lane_to_sds
== 0xE4))
1128 lmac
->lmac_type
= BGX_MODE_INVALID
;
1130 lmac
->lmac_type
= lmac_type
;
1131 lmac
->lane_to_sds
= lane_to_sds
;
1132 lmac_set_training(bgx
, lmac
, lmac
->lmacid
);
1135 static void bgx_get_qlm_mode(struct bgx
*bgx
)
1140 /* Init all LMAC's type to invalid */
1141 for (idx
= 0; idx
< bgx
->max_lmac
; idx
++) {
1142 lmac
= &bgx
->lmac
[idx
];
1144 lmac
->lmac_type
= BGX_MODE_INVALID
;
1145 lmac
->use_training
= false;
1148 /* It is assumed that low level firmware sets this value */
1149 bgx
->lmac_count
= bgx_reg_read(bgx
, 0, BGX_CMR_RX_LMACS
) & 0x7;
1150 if (bgx
->lmac_count
> bgx
->max_lmac
)
1151 bgx
->lmac_count
= bgx
->max_lmac
;
1153 for (idx
= 0; idx
< bgx
->lmac_count
; idx
++) {
1154 bgx_set_lmac_config(bgx
, idx
);
1155 bgx_print_qlm_mode(bgx
, idx
);
1161 static int acpi_get_mac_address(struct device
*dev
, struct acpi_device
*adev
,
1167 ret
= fwnode_property_read_u8_array(acpi_fwnode_handle(adev
),
1168 "mac-address", mac
, ETH_ALEN
);
1172 if (!is_valid_ether_addr(mac
)) {
1173 dev_err(dev
, "MAC address invalid: %pM\n", mac
);
1178 dev_info(dev
, "MAC address set to: %pM\n", mac
);
1180 memcpy(dst
, mac
, ETH_ALEN
);
1185 /* Currently only sets the MAC address. */
1186 static acpi_status
bgx_acpi_register_phy(acpi_handle handle
,
1187 u32 lvl
, void *context
, void **rv
)
1189 struct bgx
*bgx
= context
;
1190 struct device
*dev
= &bgx
->pdev
->dev
;
1191 struct acpi_device
*adev
;
1193 if (acpi_bus_get_device(handle
, &adev
))
1196 acpi_get_mac_address(dev
, adev
, bgx
->lmac
[bgx
->acpi_lmac_idx
].mac
);
1198 SET_NETDEV_DEV(&bgx
->lmac
[bgx
->acpi_lmac_idx
].netdev
, dev
);
1200 bgx
->lmac
[bgx
->acpi_lmac_idx
].lmacid
= bgx
->acpi_lmac_idx
;
1201 bgx
->acpi_lmac_idx
++; /* move to next LMAC */
1206 static acpi_status
bgx_acpi_match_id(acpi_handle handle
, u32 lvl
,
1207 void *context
, void **ret_val
)
1209 struct acpi_buffer string
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1210 struct bgx
*bgx
= context
;
1213 snprintf(bgx_sel
, 5, "BGX%d", bgx
->bgx_id
);
1214 if (ACPI_FAILURE(acpi_get_name(handle
, ACPI_SINGLE_NAME
, &string
))) {
1215 pr_warn("Invalid link device\n");
1219 if (strncmp(string
.pointer
, bgx_sel
, 4))
1222 acpi_walk_namespace(ACPI_TYPE_DEVICE
, handle
, 1,
1223 bgx_acpi_register_phy
, NULL
, bgx
, NULL
);
1225 kfree(string
.pointer
);
1226 return AE_CTRL_TERMINATE
;
1229 static int bgx_init_acpi_phy(struct bgx
*bgx
)
1231 acpi_get_devices(NULL
, bgx_acpi_match_id
, bgx
, (void **)NULL
);
1237 static int bgx_init_acpi_phy(struct bgx
*bgx
)
1242 #endif /* CONFIG_ACPI */
1244 #if IS_ENABLED(CONFIG_OF_MDIO)
1246 static int bgx_init_of_phy(struct bgx
*bgx
)
1248 struct fwnode_handle
*fwn
;
1249 struct device_node
*node
= NULL
;
1252 device_for_each_child_node(&bgx
->pdev
->dev
, fwn
) {
1253 struct phy_device
*pd
;
1254 struct device_node
*phy_np
;
1257 /* Should always be an OF node. But if it is not, we
1258 * cannot handle it, so exit the loop.
1260 node
= to_of_node(fwn
);
1264 mac
= of_get_mac_address(node
);
1266 ether_addr_copy(bgx
->lmac
[lmac
].mac
, mac
);
1268 SET_NETDEV_DEV(&bgx
->lmac
[lmac
].netdev
, &bgx
->pdev
->dev
);
1269 bgx
->lmac
[lmac
].lmacid
= lmac
;
1271 phy_np
= of_parse_phandle(node
, "phy-handle", 0);
1272 /* If there is no phy or defective firmware presents
1273 * this cortina phy, for which there is no driver
1274 * support, ignore it.
1277 !of_device_is_compatible(phy_np
, "cortina,cs4223-slice")) {
1278 /* Wait until the phy drivers are available */
1279 pd
= of_phy_find_device(phy_np
);
1282 bgx
->lmac
[lmac
].phydev
= pd
;
1286 if (lmac
== bgx
->max_lmac
) {
1294 /* We are bailing out, try not to leak device reference counts
1295 * for phy devices we may have already found.
1298 if (bgx
->lmac
[lmac
].phydev
) {
1299 put_device(&bgx
->lmac
[lmac
].phydev
->mdio
.dev
);
1300 bgx
->lmac
[lmac
].phydev
= NULL
;
1305 return -EPROBE_DEFER
;
1310 static int bgx_init_of_phy(struct bgx
*bgx
)
1315 #endif /* CONFIG_OF_MDIO */
1317 static int bgx_init_phy(struct bgx
*bgx
)
1320 return bgx_init_acpi_phy(bgx
);
1322 return bgx_init_of_phy(bgx
);
1325 static int bgx_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1328 struct device
*dev
= &pdev
->dev
;
1329 struct bgx
*bgx
= NULL
;
1333 bgx
= devm_kzalloc(dev
, sizeof(*bgx
), GFP_KERNEL
);
1338 pci_set_drvdata(pdev
, bgx
);
1340 err
= pci_enable_device(pdev
);
1342 dev_err(dev
, "Failed to enable PCI device\n");
1343 pci_set_drvdata(pdev
, NULL
);
1347 err
= pci_request_regions(pdev
, DRV_NAME
);
1349 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1350 goto err_disable_device
;
1353 /* MAP configuration registers */
1354 bgx
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1355 if (!bgx
->reg_base
) {
1356 dev_err(dev
, "BGX: Cannot map CSR memory space, aborting\n");
1358 goto err_release_regions
;
1361 set_max_bgx_per_node(pdev
);
1363 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &sdevid
);
1364 if (sdevid
!= PCI_DEVICE_ID_THUNDER_RGX
) {
1365 bgx
->bgx_id
= (pci_resource_start(pdev
,
1366 PCI_CFG_REG_BAR_NUM
) >> 24) & BGX_ID_MASK
;
1367 bgx
->bgx_id
+= nic_get_node_id(pdev
) * max_bgx_per_node
;
1368 bgx
->max_lmac
= MAX_LMAC_PER_BGX
;
1369 bgx_vnic
[bgx
->bgx_id
] = bgx
;
1373 bgx
->bgx_id
= MAX_BGX_PER_CN81XX
- 1;
1374 bgx_vnic
[bgx
->bgx_id
] = bgx
;
1378 /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
1379 * BGX i.e BGX2 can be split across 2 DLMs.
1381 pci_read_config_word(pdev
, PCI_SUBSYSTEM_ID
, &sdevid
);
1382 if ((sdevid
== PCI_SUBSYS_DEVID_81XX_BGX
) ||
1383 ((sdevid
== PCI_SUBSYS_DEVID_83XX_BGX
) && (bgx
->bgx_id
== 2)))
1386 bgx_get_qlm_mode(bgx
);
1388 err
= bgx_init_phy(bgx
);
1394 /* Enable all LMACs */
1395 for (lmac
= 0; lmac
< bgx
->lmac_count
; lmac
++) {
1396 err
= bgx_lmac_enable(bgx
, lmac
);
1398 dev_err(dev
, "BGX%d failed to enable lmac%d\n",
1401 bgx_lmac_disable(bgx
, --lmac
);
1409 bgx_vnic
[bgx
->bgx_id
] = NULL
;
1410 err_release_regions
:
1411 pci_release_regions(pdev
);
1413 pci_disable_device(pdev
);
1414 pci_set_drvdata(pdev
, NULL
);
1418 static void bgx_remove(struct pci_dev
*pdev
)
1420 struct bgx
*bgx
= pci_get_drvdata(pdev
);
1423 /* Disable all LMACs */
1424 for (lmac
= 0; lmac
< bgx
->lmac_count
; lmac
++)
1425 bgx_lmac_disable(bgx
, lmac
);
1427 bgx_vnic
[bgx
->bgx_id
] = NULL
;
1428 pci_release_regions(pdev
);
1429 pci_disable_device(pdev
);
1430 pci_set_drvdata(pdev
, NULL
);
1433 static struct pci_driver bgx_driver
= {
1435 .id_table
= bgx_id_table
,
1437 .remove
= bgx_remove
,
1440 static int __init
bgx_init_module(void)
1442 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1444 return pci_register_driver(&bgx_driver
);
1447 static void __exit
bgx_cleanup_module(void)
1449 pci_unregister_driver(&bgx_driver
);
1452 module_init(bgx_init_module
);
1453 module_exit(bgx_cleanup_module
);