]>
Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
46b903a0 | 9 | #include <linux/acpi.h> |
4863dea3 SG |
10 | #include <linux/module.h> |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/pci.h> | |
13 | #include <linux/netdevice.h> | |
14 | #include <linux/etherdevice.h> | |
15 | #include <linux/phy.h> | |
16 | #include <linux/of.h> | |
17 | #include <linux/of_mdio.h> | |
18 | #include <linux/of_net.h> | |
19 | ||
20 | #include "nic_reg.h" | |
21 | #include "nic.h" | |
22 | #include "thunder_bgx.h" | |
23 | ||
24 | #define DRV_NAME "thunder-BGX" | |
25 | #define DRV_VERSION "1.0" | |
26 | ||
27 | struct lmac { | |
28 | struct bgx *bgx; | |
29 | int dmac; | |
46b903a0 | 30 | u8 mac[ETH_ALEN]; |
0bcb7d51 SG |
31 | u8 lmac_type; |
32 | u8 lane_to_sds; | |
33 | bool use_training; | |
4863dea3 SG |
34 | bool link_up; |
35 | int lmacid; /* ID within BGX */ | |
36 | int lmacid_bd; /* ID on board */ | |
37 | struct net_device netdev; | |
38 | struct phy_device *phydev; | |
39 | unsigned int last_duplex; | |
40 | unsigned int last_link; | |
41 | unsigned int last_speed; | |
42 | bool is_sgmii; | |
43 | struct delayed_work dwork; | |
44 | struct workqueue_struct *check_link; | |
0c886a1d | 45 | }; |
4863dea3 SG |
46 | |
47 | struct bgx { | |
48 | u8 bgx_id; | |
4863dea3 SG |
49 | struct lmac lmac[MAX_LMAC_PER_BGX]; |
50 | int lmac_count; | |
6465859a | 51 | u8 max_lmac; |
4863dea3 SG |
52 | void __iomem *reg_base; |
53 | struct pci_dev *pdev; | |
09de3917 | 54 | bool is_dlm; |
6465859a | 55 | bool is_rgx; |
0c886a1d | 56 | }; |
4863dea3 | 57 | |
fd7ec062 | 58 | static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; |
4863dea3 SG |
59 | static int lmac_count; /* Total no of LMACs in system */ |
60 | ||
61 | static int bgx_xaui_check_link(struct lmac *lmac); | |
62 | ||
63 | /* Supported devices */ | |
64 | static const struct pci_device_id bgx_id_table[] = { | |
65 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, | |
6465859a | 66 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) }, |
4863dea3 SG |
67 | { 0, } /* end of table */ |
68 | }; | |
69 | ||
70 | MODULE_AUTHOR("Cavium Inc"); | |
71 | MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); | |
72 | MODULE_LICENSE("GPL v2"); | |
73 | MODULE_VERSION(DRV_VERSION); | |
74 | MODULE_DEVICE_TABLE(pci, bgx_id_table); | |
75 | ||
76 | /* The Cavium ThunderX network controller can *only* be found in SoCs | |
77 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | |
78 | * registers on this platform are implicitly strongly ordered with respect | |
79 | * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use | |
80 | * with no memory barriers in this driver. The readq()/writeq() functions add | |
81 | * explicit ordering operation which in this case are redundant, and only | |
82 | * add overhead. | |
83 | */ | |
84 | ||
85 | /* Register read/write APIs */ | |
86 | static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) | |
87 | { | |
88 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; | |
89 | ||
90 | return readq_relaxed(addr); | |
91 | } | |
92 | ||
93 | static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) | |
94 | { | |
95 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; | |
96 | ||
97 | writeq_relaxed(val, addr); | |
98 | } | |
99 | ||
100 | static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) | |
101 | { | |
102 | void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; | |
103 | ||
104 | writeq_relaxed(val | readq_relaxed(addr), addr); | |
105 | } | |
106 | ||
107 | static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) | |
108 | { | |
109 | int timeout = 100; | |
110 | u64 reg_val; | |
111 | ||
112 | while (timeout) { | |
113 | reg_val = bgx_reg_read(bgx, lmac, reg); | |
114 | if (zero && !(reg_val & mask)) | |
115 | return 0; | |
116 | if (!zero && (reg_val & mask)) | |
117 | return 0; | |
118 | usleep_range(1000, 2000); | |
119 | timeout--; | |
120 | } | |
121 | return 1; | |
122 | } | |
123 | ||
124 | /* Return number of BGX present in HW */ | |
125 | unsigned bgx_get_map(int node) | |
126 | { | |
127 | int i; | |
128 | unsigned map = 0; | |
129 | ||
09de3917 SG |
130 | for (i = 0; i < MAX_BGX_PER_NODE; i++) { |
131 | if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i]) | |
4863dea3 SG |
132 | map |= (1 << i); |
133 | } | |
134 | ||
135 | return map; | |
136 | } | |
137 | EXPORT_SYMBOL(bgx_get_map); | |
138 | ||
139 | /* Return number of LMAC configured for this BGX */ | |
140 | int bgx_get_lmac_count(int node, int bgx_idx) | |
141 | { | |
142 | struct bgx *bgx; | |
143 | ||
09de3917 | 144 | bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
4863dea3 SG |
145 | if (bgx) |
146 | return bgx->lmac_count; | |
147 | ||
148 | return 0; | |
149 | } | |
150 | EXPORT_SYMBOL(bgx_get_lmac_count); | |
151 | ||
152 | /* Returns the current link status of LMAC */ | |
153 | void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) | |
154 | { | |
155 | struct bgx_link_status *link = (struct bgx_link_status *)status; | |
156 | struct bgx *bgx; | |
157 | struct lmac *lmac; | |
158 | ||
09de3917 | 159 | bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
4863dea3 SG |
160 | if (!bgx) |
161 | return; | |
162 | ||
163 | lmac = &bgx->lmac[lmacid]; | |
164 | link->link_up = lmac->link_up; | |
165 | link->duplex = lmac->last_duplex; | |
166 | link->speed = lmac->last_speed; | |
167 | } | |
168 | EXPORT_SYMBOL(bgx_get_lmac_link_state); | |
169 | ||
e610cb32 | 170 | const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) |
4863dea3 | 171 | { |
09de3917 | 172 | struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
4863dea3 SG |
173 | |
174 | if (bgx) | |
175 | return bgx->lmac[lmacid].mac; | |
176 | ||
177 | return NULL; | |
178 | } | |
179 | EXPORT_SYMBOL(bgx_get_lmac_mac); | |
180 | ||
e610cb32 | 181 | void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) |
4863dea3 | 182 | { |
09de3917 | 183 | struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
4863dea3 SG |
184 | |
185 | if (!bgx) | |
186 | return; | |
187 | ||
188 | ether_addr_copy(bgx->lmac[lmacid].mac, mac); | |
189 | } | |
190 | EXPORT_SYMBOL(bgx_set_lmac_mac); | |
191 | ||
bc69fdfc SG |
192 | void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) |
193 | { | |
09de3917 | 194 | struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
6465859a | 195 | struct lmac *lmac; |
bc69fdfc SG |
196 | u64 cfg; |
197 | ||
198 | if (!bgx) | |
199 | return; | |
6465859a | 200 | lmac = &bgx->lmac[lmacid]; |
bc69fdfc SG |
201 | |
202 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | |
203 | if (enable) | |
204 | cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; | |
205 | else | |
206 | cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); | |
207 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | |
6465859a SG |
208 | |
209 | if (bgx->is_rgx) | |
210 | xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed); | |
bc69fdfc SG |
211 | } |
212 | EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); | |
213 | ||
4863dea3 SG |
214 | static void bgx_sgmii_change_link_state(struct lmac *lmac) |
215 | { | |
216 | struct bgx *bgx = lmac->bgx; | |
217 | u64 cmr_cfg; | |
218 | u64 port_cfg = 0; | |
219 | u64 misc_ctl = 0; | |
220 | ||
221 | cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); | |
222 | cmr_cfg &= ~CMR_EN; | |
223 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); | |
224 | ||
225 | port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); | |
226 | misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); | |
227 | ||
228 | if (lmac->link_up) { | |
229 | misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; | |
230 | port_cfg &= ~GMI_PORT_CFG_DUPLEX; | |
231 | port_cfg |= (lmac->last_duplex << 2); | |
232 | } else { | |
233 | misc_ctl |= PCS_MISC_CTL_GMX_ENO; | |
234 | } | |
235 | ||
236 | switch (lmac->last_speed) { | |
237 | case 10: | |
238 | port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ | |
239 | port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ | |
240 | port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ | |
241 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; | |
242 | misc_ctl |= 50; /* samp_pt */ | |
243 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); | |
244 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); | |
245 | break; | |
246 | case 100: | |
247 | port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ | |
248 | port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ | |
249 | port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ | |
250 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; | |
251 | misc_ctl |= 5; /* samp_pt */ | |
252 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); | |
253 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); | |
254 | break; | |
255 | case 1000: | |
256 | port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ | |
257 | port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ | |
258 | port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ | |
259 | misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; | |
260 | misc_ctl |= 1; /* samp_pt */ | |
261 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); | |
262 | if (lmac->last_duplex) | |
263 | bgx_reg_write(bgx, lmac->lmacid, | |
264 | BGX_GMP_GMI_TXX_BURST, 0); | |
265 | else | |
266 | bgx_reg_write(bgx, lmac->lmacid, | |
267 | BGX_GMP_GMI_TXX_BURST, 8192); | |
268 | break; | |
269 | default: | |
270 | break; | |
271 | } | |
272 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); | |
273 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); | |
274 | ||
275 | port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); | |
276 | ||
6465859a | 277 | /* Re-enable lmac */ |
4863dea3 SG |
278 | cmr_cfg |= CMR_EN; |
279 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); | |
6465859a SG |
280 | |
281 | if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) | |
282 | xcv_setup_link(lmac->link_up, lmac->last_speed); | |
4863dea3 SG |
283 | } |
284 | ||
fd7ec062 | 285 | static void bgx_lmac_handler(struct net_device *netdev) |
4863dea3 SG |
286 | { |
287 | struct lmac *lmac = container_of(netdev, struct lmac, netdev); | |
099a728d | 288 | struct phy_device *phydev; |
4863dea3 SG |
289 | int link_changed = 0; |
290 | ||
291 | if (!lmac) | |
292 | return; | |
293 | ||
099a728d | 294 | phydev = lmac->phydev; |
295 | ||
4863dea3 SG |
296 | if (!phydev->link && lmac->last_link) |
297 | link_changed = -1; | |
298 | ||
299 | if (phydev->link && | |
300 | (lmac->last_duplex != phydev->duplex || | |
301 | lmac->last_link != phydev->link || | |
302 | lmac->last_speed != phydev->speed)) { | |
303 | link_changed = 1; | |
304 | } | |
305 | ||
306 | lmac->last_link = phydev->link; | |
307 | lmac->last_speed = phydev->speed; | |
308 | lmac->last_duplex = phydev->duplex; | |
309 | ||
310 | if (!link_changed) | |
311 | return; | |
312 | ||
313 | if (link_changed > 0) | |
314 | lmac->link_up = true; | |
315 | else | |
316 | lmac->link_up = false; | |
317 | ||
318 | if (lmac->is_sgmii) | |
319 | bgx_sgmii_change_link_state(lmac); | |
320 | else | |
321 | bgx_xaui_check_link(lmac); | |
322 | } | |
323 | ||
324 | u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) | |
325 | { | |
326 | struct bgx *bgx; | |
327 | ||
09de3917 | 328 | bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
4863dea3 SG |
329 | if (!bgx) |
330 | return 0; | |
331 | ||
332 | if (idx > 8) | |
333 | lmac = 0; | |
334 | return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); | |
335 | } | |
336 | EXPORT_SYMBOL(bgx_get_rx_stats); | |
337 | ||
338 | u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) | |
339 | { | |
340 | struct bgx *bgx; | |
341 | ||
09de3917 | 342 | bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
4863dea3 SG |
343 | if (!bgx) |
344 | return 0; | |
345 | ||
346 | return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); | |
347 | } | |
348 | EXPORT_SYMBOL(bgx_get_tx_stats); | |
349 | ||
350 | static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) | |
351 | { | |
352 | u64 offset; | |
353 | ||
354 | while (bgx->lmac[lmac].dmac > 0) { | |
355 | offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + | |
356 | (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); | |
357 | bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); | |
358 | bgx->lmac[lmac].dmac--; | |
359 | } | |
360 | } | |
361 | ||
d77a2384 SG |
362 | /* Configure BGX LMAC in internal loopback mode */ |
363 | void bgx_lmac_internal_loopback(int node, int bgx_idx, | |
364 | int lmac_idx, bool enable) | |
365 | { | |
366 | struct bgx *bgx; | |
367 | struct lmac *lmac; | |
368 | u64 cfg; | |
369 | ||
09de3917 | 370 | bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; |
d77a2384 SG |
371 | if (!bgx) |
372 | return; | |
373 | ||
374 | lmac = &bgx->lmac[lmac_idx]; | |
375 | if (lmac->is_sgmii) { | |
376 | cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); | |
377 | if (enable) | |
378 | cfg |= PCS_MRX_CTL_LOOPBACK1; | |
379 | else | |
380 | cfg &= ~PCS_MRX_CTL_LOOPBACK1; | |
381 | bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); | |
382 | } else { | |
383 | cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); | |
384 | if (enable) | |
385 | cfg |= SPU_CTL_LOOPBACK; | |
386 | else | |
387 | cfg &= ~SPU_CTL_LOOPBACK; | |
388 | bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); | |
389 | } | |
390 | } | |
391 | EXPORT_SYMBOL(bgx_lmac_internal_loopback); | |
392 | ||
3f8057cf | 393 | static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) |
4863dea3 | 394 | { |
3f8057cf | 395 | int lmacid = lmac->lmacid; |
4863dea3 SG |
396 | u64 cfg; |
397 | ||
398 | bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); | |
399 | /* max packet size */ | |
400 | bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); | |
401 | ||
402 | /* Disable frame alignment if using preamble */ | |
403 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); | |
404 | if (cfg & 1) | |
405 | bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); | |
406 | ||
407 | /* Enable lmac */ | |
408 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); | |
409 | ||
410 | /* PCS reset */ | |
411 | bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); | |
412 | if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, | |
413 | PCS_MRX_CTL_RESET, true)) { | |
414 | dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); | |
415 | return -1; | |
416 | } | |
417 | ||
418 | /* power down, reset autoneg, autoneg enable */ | |
419 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); | |
420 | cfg &= ~PCS_MRX_CTL_PWR_DN; | |
421 | cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); | |
422 | bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); | |
423 | ||
3f8057cf SG |
424 | if (lmac->lmac_type == BGX_MODE_QSGMII) { |
425 | /* Disable disparity check for QSGMII */ | |
426 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL); | |
427 | cfg &= ~PCS_MISC_CTL_DISP_EN; | |
428 | bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg); | |
429 | return 0; | |
430 | } | |
431 | ||
6465859a SG |
432 | if (lmac->lmac_type == BGX_MODE_SGMII) { |
433 | if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, | |
434 | PCS_MRX_STATUS_AN_CPT, false)) { | |
435 | dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); | |
436 | return -1; | |
437 | } | |
4863dea3 SG |
438 | } |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
0bcb7d51 | 443 | static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac) |
4863dea3 SG |
444 | { |
445 | u64 cfg; | |
0bcb7d51 | 446 | int lmacid = lmac->lmacid; |
4863dea3 SG |
447 | |
448 | /* Reset SPU */ | |
449 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); | |
450 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { | |
451 | dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); | |
452 | return -1; | |
453 | } | |
454 | ||
455 | /* Disable LMAC */ | |
456 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | |
457 | cfg &= ~CMR_EN; | |
458 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | |
459 | ||
460 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); | |
461 | /* Set interleaved running disparity for RXAUI */ | |
93db2cf8 | 462 | if (lmac->lmac_type == BGX_MODE_RXAUI) |
4863dea3 | 463 | bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, |
93db2cf8 SG |
464 | SPU_MISC_CTL_INTLV_RDISP); |
465 | ||
466 | /* Clear receive packet disable */ | |
467 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); | |
468 | cfg &= ~SPU_MISC_CTL_RX_DIS; | |
469 | bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); | |
4863dea3 SG |
470 | |
471 | /* clear all interrupts */ | |
472 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); | |
473 | bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); | |
474 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); | |
475 | bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); | |
476 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); | |
477 | bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); | |
478 | ||
0bcb7d51 | 479 | if (lmac->use_training) { |
4863dea3 SG |
480 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); |
481 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); | |
482 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); | |
483 | /* training enable */ | |
484 | bgx_reg_modify(bgx, lmacid, | |
485 | BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); | |
486 | } | |
487 | ||
488 | /* Append FCS to each packet */ | |
489 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); | |
490 | ||
491 | /* Disable forward error correction */ | |
492 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); | |
493 | cfg &= ~SPU_FEC_CTL_FEC_EN; | |
494 | bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); | |
495 | ||
496 | /* Disable autoneg */ | |
497 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); | |
498 | cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); | |
499 | bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); | |
500 | ||
501 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); | |
0bcb7d51 | 502 | if (lmac->lmac_type == BGX_MODE_10G_KR) |
4863dea3 | 503 | cfg |= (1 << 23); |
0bcb7d51 | 504 | else if (lmac->lmac_type == BGX_MODE_40G_KR) |
4863dea3 SG |
505 | cfg |= (1 << 24); |
506 | else | |
507 | cfg &= ~((1 << 23) | (1 << 24)); | |
508 | cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); | |
509 | bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); | |
510 | ||
511 | cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); | |
512 | cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; | |
513 | bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); | |
514 | ||
515 | /* Enable lmac */ | |
516 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); | |
517 | ||
518 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); | |
519 | cfg &= ~SPU_CTL_LOW_POWER; | |
520 | bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); | |
521 | ||
522 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); | |
523 | cfg &= ~SMU_TX_CTL_UNI_EN; | |
524 | cfg |= SMU_TX_CTL_DIC_EN; | |
525 | bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); | |
526 | ||
527 | /* take lmac_count into account */ | |
528 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); | |
529 | /* max packet size */ | |
530 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); | |
531 | ||
532 | return 0; | |
533 | } | |
534 | ||
535 | static int bgx_xaui_check_link(struct lmac *lmac) | |
536 | { | |
537 | struct bgx *bgx = lmac->bgx; | |
538 | int lmacid = lmac->lmacid; | |
0bcb7d51 | 539 | int lmac_type = lmac->lmac_type; |
4863dea3 SG |
540 | u64 cfg; |
541 | ||
0bcb7d51 | 542 | if (lmac->use_training) { |
4863dea3 SG |
543 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); |
544 | if (!(cfg & (1ull << 13))) { | |
545 | cfg = (1ull << 13) | (1ull << 14); | |
546 | bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); | |
547 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); | |
548 | cfg |= (1ull << 0); | |
549 | bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); | |
550 | return -1; | |
551 | } | |
552 | } | |
553 | ||
554 | /* wait for PCS to come out of reset */ | |
555 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { | |
556 | dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); | |
557 | return -1; | |
558 | } | |
559 | ||
560 | if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || | |
561 | (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { | |
562 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, | |
563 | SPU_BR_STATUS_BLK_LOCK, false)) { | |
564 | dev_err(&bgx->pdev->dev, | |
565 | "SPU_BR_STATUS_BLK_LOCK not completed\n"); | |
566 | return -1; | |
567 | } | |
568 | } else { | |
569 | if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, | |
570 | SPU_BX_STATUS_RX_ALIGN, false)) { | |
571 | dev_err(&bgx->pdev->dev, | |
572 | "SPU_BX_STATUS_RX_ALIGN not completed\n"); | |
573 | return -1; | |
574 | } | |
575 | } | |
576 | ||
577 | /* Clear rcvflt bit (latching high) and read it back */ | |
3f4c68cf SG |
578 | if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) |
579 | bgx_reg_modify(bgx, lmacid, | |
580 | BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); | |
4863dea3 SG |
581 | if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { |
582 | dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); | |
0bcb7d51 | 583 | if (lmac->use_training) { |
4863dea3 SG |
584 | cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); |
585 | if (!(cfg & (1ull << 13))) { | |
586 | cfg = (1ull << 13) | (1ull << 14); | |
587 | bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); | |
588 | cfg = bgx_reg_read(bgx, lmacid, | |
589 | BGX_SPUX_BR_PMD_CRTL); | |
590 | cfg |= (1ull << 0); | |
591 | bgx_reg_write(bgx, lmacid, | |
592 | BGX_SPUX_BR_PMD_CRTL, cfg); | |
593 | return -1; | |
594 | } | |
595 | } | |
596 | return -1; | |
597 | } | |
598 | ||
4863dea3 SG |
599 | /* Wait for BGX RX to be idle */ |
600 | if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { | |
601 | dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); | |
602 | return -1; | |
603 | } | |
604 | ||
605 | /* Wait for BGX TX to be idle */ | |
606 | if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { | |
607 | dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); | |
608 | return -1; | |
609 | } | |
610 | ||
3f4c68cf SG |
611 | /* Check for MAC RX faults */ |
612 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); | |
613 | /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ | |
614 | cfg &= SMU_RX_CTL_STATUS; | |
615 | if (!cfg) | |
616 | return 0; | |
617 | ||
618 | /* Rx local/remote fault seen. | |
619 | * Do lmac reinit to see if condition recovers | |
620 | */ | |
0bcb7d51 | 621 | bgx_lmac_xaui_init(bgx, lmac); |
3f4c68cf SG |
622 | |
623 | return -1; | |
4863dea3 SG |
624 | } |
625 | ||
626 | static void bgx_poll_for_link(struct work_struct *work) | |
627 | { | |
628 | struct lmac *lmac; | |
3f4c68cf | 629 | u64 spu_link, smu_link; |
4863dea3 SG |
630 | |
631 | lmac = container_of(work, struct lmac, dwork.work); | |
632 | ||
633 | /* Receive link is latching low. Force it high and verify it */ | |
634 | bgx_reg_modify(lmac->bgx, lmac->lmacid, | |
635 | BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); | |
636 | bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, | |
637 | SPU_STATUS1_RCV_LNK, false); | |
638 | ||
3f4c68cf SG |
639 | spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); |
640 | smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL); | |
641 | ||
642 | if ((spu_link & SPU_STATUS1_RCV_LNK) && | |
643 | !(smu_link & SMU_RX_CTL_STATUS)) { | |
4863dea3 | 644 | lmac->link_up = 1; |
0bcb7d51 | 645 | if (lmac->lmac_type == BGX_MODE_XLAUI) |
4863dea3 SG |
646 | lmac->last_speed = 40000; |
647 | else | |
648 | lmac->last_speed = 10000; | |
649 | lmac->last_duplex = 1; | |
650 | } else { | |
651 | lmac->link_up = 0; | |
0b72a9a1 SG |
652 | lmac->last_speed = SPEED_UNKNOWN; |
653 | lmac->last_duplex = DUPLEX_UNKNOWN; | |
4863dea3 SG |
654 | } |
655 | ||
656 | if (lmac->last_link != lmac->link_up) { | |
3f4c68cf SG |
657 | if (lmac->link_up) { |
658 | if (bgx_xaui_check_link(lmac)) { | |
659 | /* Errors, clear link_up state */ | |
660 | lmac->link_up = 0; | |
661 | lmac->last_speed = SPEED_UNKNOWN; | |
662 | lmac->last_duplex = DUPLEX_UNKNOWN; | |
663 | } | |
664 | } | |
4863dea3 | 665 | lmac->last_link = lmac->link_up; |
4863dea3 SG |
666 | } |
667 | ||
668 | queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); | |
669 | } | |
670 | ||
3f8057cf SG |
671 | static int phy_interface_mode(u8 lmac_type) |
672 | { | |
673 | if (lmac_type == BGX_MODE_QSGMII) | |
674 | return PHY_INTERFACE_MODE_QSGMII; | |
6465859a SG |
675 | if (lmac_type == BGX_MODE_RGMII) |
676 | return PHY_INTERFACE_MODE_RGMII; | |
3f8057cf SG |
677 | |
678 | return PHY_INTERFACE_MODE_SGMII; | |
679 | } | |
680 | ||
4863dea3 SG |
681 | static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) |
682 | { | |
683 | struct lmac *lmac; | |
684 | u64 cfg; | |
685 | ||
686 | lmac = &bgx->lmac[lmacid]; | |
687 | lmac->bgx = bgx; | |
688 | ||
3f8057cf | 689 | if ((lmac->lmac_type == BGX_MODE_SGMII) || |
6465859a SG |
690 | (lmac->lmac_type == BGX_MODE_QSGMII) || |
691 | (lmac->lmac_type == BGX_MODE_RGMII)) { | |
4863dea3 | 692 | lmac->is_sgmii = 1; |
3f8057cf | 693 | if (bgx_lmac_sgmii_init(bgx, lmac)) |
4863dea3 SG |
694 | return -1; |
695 | } else { | |
696 | lmac->is_sgmii = 0; | |
0bcb7d51 | 697 | if (bgx_lmac_xaui_init(bgx, lmac)) |
4863dea3 SG |
698 | return -1; |
699 | } | |
700 | ||
701 | if (lmac->is_sgmii) { | |
702 | cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); | |
703 | cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ | |
704 | bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); | |
705 | bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); | |
706 | } else { | |
707 | cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); | |
708 | cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ | |
709 | bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); | |
710 | bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); | |
711 | } | |
712 | ||
713 | /* Enable lmac */ | |
bc69fdfc | 714 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); |
4863dea3 SG |
715 | |
716 | /* Restore default cfg, incase low level firmware changed it */ | |
717 | bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); | |
718 | ||
0bcb7d51 SG |
719 | if ((lmac->lmac_type != BGX_MODE_XFI) && |
720 | (lmac->lmac_type != BGX_MODE_XLAUI) && | |
721 | (lmac->lmac_type != BGX_MODE_40G_KR) && | |
722 | (lmac->lmac_type != BGX_MODE_10G_KR)) { | |
4863dea3 SG |
723 | if (!lmac->phydev) |
724 | return -ENODEV; | |
725 | ||
726 | lmac->phydev->dev_flags = 0; | |
727 | ||
728 | if (phy_connect_direct(&lmac->netdev, lmac->phydev, | |
729 | bgx_lmac_handler, | |
3f8057cf | 730 | phy_interface_mode(lmac->lmac_type))) |
4863dea3 SG |
731 | return -ENODEV; |
732 | ||
733 | phy_start_aneg(lmac->phydev); | |
734 | } else { | |
735 | lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | | |
736 | WQ_MEM_RECLAIM, 1); | |
737 | if (!lmac->check_link) | |
738 | return -ENOMEM; | |
739 | INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); | |
740 | queue_delayed_work(lmac->check_link, &lmac->dwork, 0); | |
741 | } | |
742 | ||
743 | return 0; | |
744 | } | |
745 | ||
fd7ec062 | 746 | static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) |
4863dea3 SG |
747 | { |
748 | struct lmac *lmac; | |
3f4c68cf | 749 | u64 cfg; |
4863dea3 SG |
750 | |
751 | lmac = &bgx->lmac[lmacid]; | |
752 | if (lmac->check_link) { | |
753 | /* Destroy work queue */ | |
a7b1f535 | 754 | cancel_delayed_work_sync(&lmac->dwork); |
4863dea3 SG |
755 | destroy_workqueue(lmac->check_link); |
756 | } | |
757 | ||
3f4c68cf SG |
758 | /* Disable packet reception */ |
759 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | |
760 | cfg &= ~CMR_PKT_RX_EN; | |
761 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | |
762 | ||
763 | /* Give chance for Rx/Tx FIFO to get drained */ | |
764 | bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true); | |
765 | bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true); | |
766 | ||
767 | /* Disable packet transmission */ | |
768 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | |
769 | cfg &= ~CMR_PKT_TX_EN; | |
770 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | |
771 | ||
772 | /* Disable serdes lanes */ | |
773 | if (!lmac->is_sgmii) | |
774 | bgx_reg_modify(bgx, lmacid, | |
775 | BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); | |
776 | else | |
777 | bgx_reg_modify(bgx, lmacid, | |
778 | BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN); | |
779 | ||
780 | /* Disable LMAC */ | |
781 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | |
782 | cfg &= ~CMR_EN; | |
783 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | |
784 | ||
4863dea3 SG |
785 | bgx_flush_dmac_addrs(bgx, lmacid); |
786 | ||
0bcb7d51 SG |
787 | if ((lmac->lmac_type != BGX_MODE_XFI) && |
788 | (lmac->lmac_type != BGX_MODE_XLAUI) && | |
789 | (lmac->lmac_type != BGX_MODE_40G_KR) && | |
790 | (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) | |
4863dea3 SG |
791 | phy_disconnect(lmac->phydev); |
792 | ||
793 | lmac->phydev = NULL; | |
794 | } | |
795 | ||
4863dea3 SG |
796 | static void bgx_init_hw(struct bgx *bgx) |
797 | { | |
798 | int i; | |
0bcb7d51 | 799 | struct lmac *lmac; |
4863dea3 SG |
800 | |
801 | bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); | |
802 | if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) | |
803 | dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); | |
804 | ||
805 | /* Set lmac type and lane2serdes mapping */ | |
806 | for (i = 0; i < bgx->lmac_count; i++) { | |
0bcb7d51 | 807 | lmac = &bgx->lmac[i]; |
4863dea3 | 808 | bgx_reg_write(bgx, i, BGX_CMRX_CFG, |
0bcb7d51 | 809 | (lmac->lmac_type << 8) | lmac->lane_to_sds); |
4863dea3 SG |
810 | bgx->lmac[i].lmacid_bd = lmac_count; |
811 | lmac_count++; | |
812 | } | |
813 | ||
814 | bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); | |
815 | bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); | |
816 | ||
817 | /* Set the backpressure AND mask */ | |
818 | for (i = 0; i < bgx->lmac_count; i++) | |
819 | bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, | |
820 | ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << | |
821 | (i * MAX_BGX_CHANS_PER_LMAC)); | |
822 | ||
823 | /* Disable all MAC filtering */ | |
824 | for (i = 0; i < RX_DMAC_COUNT; i++) | |
825 | bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); | |
826 | ||
827 | /* Disable MAC steering (NCSI traffic) */ | |
828 | for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) | |
829 | bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); | |
830 | } | |
831 | ||
3f8057cf SG |
832 | static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) |
833 | { | |
834 | return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF); | |
835 | } | |
836 | ||
0bcb7d51 | 837 | static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) |
4863dea3 SG |
838 | { |
839 | struct device *dev = &bgx->pdev->dev; | |
0bcb7d51 SG |
840 | struct lmac *lmac; |
841 | char str[20]; | |
57aaf63c SG |
842 | u8 dlm; |
843 | ||
6465859a | 844 | if (lmacid > bgx->max_lmac) |
57aaf63c | 845 | return; |
4863dea3 | 846 | |
0bcb7d51 | 847 | lmac = &bgx->lmac[lmacid]; |
57aaf63c | 848 | dlm = (lmacid / 2) + (bgx->bgx_id * 2); |
09de3917 | 849 | if (!bgx->is_dlm) |
57aaf63c SG |
850 | sprintf(str, "BGX%d QLM mode", bgx->bgx_id); |
851 | else | |
852 | sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); | |
4863dea3 | 853 | |
0bcb7d51 | 854 | switch (lmac->lmac_type) { |
4863dea3 | 855 | case BGX_MODE_SGMII: |
0bcb7d51 | 856 | dev_info(dev, "%s: SGMII\n", (char *)str); |
4863dea3 SG |
857 | break; |
858 | case BGX_MODE_XAUI: | |
0bcb7d51 | 859 | dev_info(dev, "%s: XAUI\n", (char *)str); |
4863dea3 SG |
860 | break; |
861 | case BGX_MODE_RXAUI: | |
0bcb7d51 | 862 | dev_info(dev, "%s: RXAUI\n", (char *)str); |
4863dea3 SG |
863 | break; |
864 | case BGX_MODE_XFI: | |
0bcb7d51 SG |
865 | if (!lmac->use_training) |
866 | dev_info(dev, "%s: XFI\n", (char *)str); | |
867 | else | |
868 | dev_info(dev, "%s: 10G_KR\n", (char *)str); | |
4863dea3 SG |
869 | break; |
870 | case BGX_MODE_XLAUI: | |
0bcb7d51 SG |
871 | if (!lmac->use_training) |
872 | dev_info(dev, "%s: XLAUI\n", (char *)str); | |
873 | else | |
874 | dev_info(dev, "%s: 40G_KR4\n", (char *)str); | |
4863dea3 | 875 | break; |
3f8057cf SG |
876 | case BGX_MODE_QSGMII: |
877 | if ((lmacid == 0) && | |
878 | (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid)) | |
879 | return; | |
880 | if ((lmacid == 2) && | |
881 | (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid)) | |
882 | return; | |
883 | dev_info(dev, "%s: QSGMII\n", (char *)str); | |
884 | break; | |
6465859a SG |
885 | case BGX_MODE_RGMII: |
886 | dev_info(dev, "%s: RGMII\n", (char *)str); | |
887 | break; | |
3f8057cf SG |
888 | case BGX_MODE_INVALID: |
889 | /* Nothing to do */ | |
890 | break; | |
4863dea3 SG |
891 | } |
892 | } | |
893 | ||
3f8057cf | 894 | static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac) |
0bcb7d51 SG |
895 | { |
896 | switch (lmac->lmac_type) { | |
897 | case BGX_MODE_SGMII: | |
898 | case BGX_MODE_XFI: | |
899 | lmac->lane_to_sds = lmac->lmacid; | |
900 | break; | |
901 | case BGX_MODE_XAUI: | |
902 | case BGX_MODE_XLAUI: | |
6465859a | 903 | case BGX_MODE_RGMII: |
0bcb7d51 SG |
904 | lmac->lane_to_sds = 0xE4; |
905 | break; | |
906 | case BGX_MODE_RXAUI: | |
907 | lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4; | |
908 | break; | |
3f8057cf SG |
909 | case BGX_MODE_QSGMII: |
910 | /* There is no way to determine if DLM0/2 is QSGMII or | |
911 | * DLM1/3 is configured to QSGMII as bootloader will | |
912 | * configure all LMACs, so take whatever is configured | |
913 | * by low level firmware. | |
914 | */ | |
915 | lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac); | |
916 | break; | |
0bcb7d51 SG |
917 | default: |
918 | lmac->lane_to_sds = 0; | |
919 | break; | |
920 | } | |
921 | } | |
922 | ||
6465859a SG |
923 | static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) |
924 | { | |
925 | if ((lmac->lmac_type != BGX_MODE_10G_KR) && | |
926 | (lmac->lmac_type != BGX_MODE_40G_KR)) { | |
927 | lmac->use_training = 0; | |
928 | return; | |
929 | } | |
930 | ||
931 | lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) & | |
932 | SPU_PMD_CRTL_TRAIN_EN; | |
933 | } | |
934 | ||
0bcb7d51 SG |
935 | static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) |
936 | { | |
937 | struct lmac *lmac; | |
57aaf63c | 938 | struct lmac *olmac; |
0bcb7d51 | 939 | u64 cmr_cfg; |
57aaf63c SG |
940 | u8 lmac_type; |
941 | u8 lane_to_sds; | |
0bcb7d51 SG |
942 | |
943 | lmac = &bgx->lmac[idx]; | |
0bcb7d51 | 944 | |
09de3917 | 945 | if (!bgx->is_dlm || bgx->is_rgx) { |
57aaf63c SG |
946 | /* Read LMAC0 type to figure out QLM mode |
947 | * This is configured by low level firmware | |
948 | */ | |
949 | cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); | |
950 | lmac->lmac_type = (cmr_cfg >> 8) & 0x07; | |
6465859a SG |
951 | if (bgx->is_rgx) |
952 | lmac->lmac_type = BGX_MODE_RGMII; | |
953 | lmac_set_training(bgx, lmac, 0); | |
3f8057cf | 954 | lmac_set_lane2sds(bgx, lmac); |
57aaf63c SG |
955 | return; |
956 | } | |
957 | ||
958 | /* On 81xx BGX can be split across 2 DLMs | |
959 | * firmware programs lmac_type of LMAC0 and LMAC2 | |
0bcb7d51 | 960 | */ |
57aaf63c SG |
961 | if ((idx == 0) || (idx == 2)) { |
962 | cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); | |
963 | lmac_type = (u8)((cmr_cfg >> 8) & 0x07); | |
964 | lane_to_sds = (u8)(cmr_cfg & 0xFF); | |
965 | /* Check if config is not reset value */ | |
966 | if ((lmac_type == 0) && (lane_to_sds == 0xE4)) | |
967 | lmac->lmac_type = BGX_MODE_INVALID; | |
968 | else | |
969 | lmac->lmac_type = lmac_type; | |
6465859a | 970 | lmac_set_training(bgx, lmac, lmac->lmacid); |
3f8057cf | 971 | lmac_set_lane2sds(bgx, lmac); |
57aaf63c SG |
972 | |
973 | /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */ | |
974 | olmac = &bgx->lmac[idx + 1]; | |
975 | olmac->lmac_type = lmac->lmac_type; | |
6465859a | 976 | lmac_set_training(bgx, olmac, olmac->lmacid); |
3f8057cf | 977 | lmac_set_lane2sds(bgx, olmac); |
57aaf63c SG |
978 | } |
979 | } | |
980 | ||
981 | static bool is_dlm0_in_bgx_mode(struct bgx *bgx) | |
982 | { | |
983 | struct lmac *lmac; | |
984 | ||
09de3917 | 985 | if (!bgx->is_dlm) |
57aaf63c SG |
986 | return true; |
987 | ||
3f8057cf | 988 | lmac = &bgx->lmac[0]; |
57aaf63c SG |
989 | if (lmac->lmac_type == BGX_MODE_INVALID) |
990 | return false; | |
991 | ||
992 | return true; | |
0bcb7d51 SG |
993 | } |
994 | ||
995 | static void bgx_get_qlm_mode(struct bgx *bgx) | |
996 | { | |
57aaf63c SG |
997 | struct lmac *lmac; |
998 | struct lmac *lmac01; | |
999 | struct lmac *lmac23; | |
0bcb7d51 SG |
1000 | u8 idx; |
1001 | ||
57aaf63c | 1002 | /* Init all LMAC's type to invalid */ |
6465859a | 1003 | for (idx = 0; idx < bgx->max_lmac; idx++) { |
57aaf63c | 1004 | lmac = &bgx->lmac[idx]; |
57aaf63c | 1005 | lmac->lmacid = idx; |
6465859a SG |
1006 | lmac->lmac_type = BGX_MODE_INVALID; |
1007 | lmac->use_training = false; | |
57aaf63c SG |
1008 | } |
1009 | ||
0bcb7d51 SG |
1010 | /* It is assumed that low level firmware sets this value */ |
1011 | bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; | |
6465859a SG |
1012 | if (bgx->lmac_count > bgx->max_lmac) |
1013 | bgx->lmac_count = bgx->max_lmac; | |
0bcb7d51 | 1014 | |
6465859a | 1015 | for (idx = 0; idx < bgx->max_lmac; idx++) |
0bcb7d51 | 1016 | bgx_set_lmac_config(bgx, idx); |
57aaf63c | 1017 | |
09de3917 | 1018 | if (!bgx->is_dlm || bgx->is_rgx) { |
57aaf63c SG |
1019 | bgx_print_qlm_mode(bgx, 0); |
1020 | return; | |
1021 | } | |
1022 | ||
1023 | if (bgx->lmac_count) { | |
1024 | bgx_print_qlm_mode(bgx, 0); | |
1025 | bgx_print_qlm_mode(bgx, 2); | |
1026 | } | |
1027 | ||
1028 | /* If DLM0 is not in BGX mode then LMAC0/1 have | |
1029 | * to be configured with serdes lanes of DLM1 | |
1030 | */ | |
1031 | if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) | |
1032 | return; | |
1033 | for (idx = 0; idx < bgx->lmac_count; idx++) { | |
1034 | lmac01 = &bgx->lmac[idx]; | |
1035 | lmac23 = &bgx->lmac[idx + 2]; | |
1036 | lmac01->lmac_type = lmac23->lmac_type; | |
1037 | lmac01->lane_to_sds = lmac23->lane_to_sds; | |
1038 | } | |
0bcb7d51 SG |
1039 | } |
1040 | ||
46b903a0 DD |
1041 | #ifdef CONFIG_ACPI |
1042 | ||
1d82efac RR |
1043 | static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, |
1044 | u8 *dst) | |
46b903a0 DD |
1045 | { |
1046 | u8 mac[ETH_ALEN]; | |
1047 | int ret; | |
1048 | ||
1049 | ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), | |
1050 | "mac-address", mac, ETH_ALEN); | |
1051 | if (ret) | |
1052 | goto out; | |
1053 | ||
1054 | if (!is_valid_ether_addr(mac)) { | |
1d82efac | 1055 | dev_err(dev, "MAC address invalid: %pM\n", mac); |
46b903a0 DD |
1056 | ret = -EINVAL; |
1057 | goto out; | |
1058 | } | |
1059 | ||
1d82efac RR |
1060 | dev_info(dev, "MAC address set to: %pM\n", mac); |
1061 | ||
46b903a0 DD |
1062 | memcpy(dst, mac, ETH_ALEN); |
1063 | out: | |
1064 | return ret; | |
1065 | } | |
1066 | ||
1067 | /* Currently only sets the MAC address. */ | |
1068 | static acpi_status bgx_acpi_register_phy(acpi_handle handle, | |
1069 | u32 lvl, void *context, void **rv) | |
1070 | { | |
1071 | struct bgx *bgx = context; | |
1d82efac | 1072 | struct device *dev = &bgx->pdev->dev; |
46b903a0 DD |
1073 | struct acpi_device *adev; |
1074 | ||
1075 | if (acpi_bus_get_device(handle, &adev)) | |
1076 | goto out; | |
1077 | ||
1d82efac | 1078 | acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); |
46b903a0 | 1079 | |
1d82efac | 1080 | SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); |
46b903a0 DD |
1081 | |
1082 | bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; | |
1083 | out: | |
1084 | bgx->lmac_count++; | |
1085 | return AE_OK; | |
1086 | } | |
1087 | ||
1088 | static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, | |
1089 | void *context, void **ret_val) | |
1090 | { | |
1091 | struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; | |
1092 | struct bgx *bgx = context; | |
1093 | char bgx_sel[5]; | |
1094 | ||
1095 | snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); | |
1096 | if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { | |
1097 | pr_warn("Invalid link device\n"); | |
1098 | return AE_OK; | |
1099 | } | |
1100 | ||
1101 | if (strncmp(string.pointer, bgx_sel, 4)) | |
1102 | return AE_OK; | |
1103 | ||
1104 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | |
1105 | bgx_acpi_register_phy, NULL, bgx, NULL); | |
1106 | ||
1107 | kfree(string.pointer); | |
1108 | return AE_CTRL_TERMINATE; | |
1109 | } | |
1110 | ||
1111 | static int bgx_init_acpi_phy(struct bgx *bgx) | |
1112 | { | |
1113 | acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); | |
1114 | return 0; | |
1115 | } | |
1116 | ||
1117 | #else | |
1118 | ||
1119 | static int bgx_init_acpi_phy(struct bgx *bgx) | |
1120 | { | |
1121 | return -ENODEV; | |
1122 | } | |
1123 | ||
1124 | #endif /* CONFIG_ACPI */ | |
1125 | ||
de387e11 RR |
1126 | #if IS_ENABLED(CONFIG_OF_MDIO) |
1127 | ||
1128 | static int bgx_init_of_phy(struct bgx *bgx) | |
4863dea3 | 1129 | { |
eee326fd | 1130 | struct fwnode_handle *fwn; |
b7d3e3d3 | 1131 | struct device_node *node = NULL; |
4863dea3 SG |
1132 | u8 lmac = 0; |
1133 | ||
eee326fd | 1134 | device_for_each_child_node(&bgx->pdev->dev, fwn) { |
5fc7cf17 | 1135 | struct phy_device *pd; |
eee326fd | 1136 | struct device_node *phy_np; |
b7d3e3d3 | 1137 | const char *mac; |
eee326fd | 1138 | |
5fc7cf17 DD |
1139 | /* Should always be an OF node. But if it is not, we |
1140 | * cannot handle it, so exit the loop. | |
eee326fd | 1141 | */ |
b7d3e3d3 | 1142 | node = to_of_node(fwn); |
eee326fd DD |
1143 | if (!node) |
1144 | break; | |
4863dea3 | 1145 | |
eee326fd | 1146 | mac = of_get_mac_address(node); |
4863dea3 SG |
1147 | if (mac) |
1148 | ether_addr_copy(bgx->lmac[lmac].mac, mac); | |
1149 | ||
1150 | SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); | |
1151 | bgx->lmac[lmac].lmacid = lmac; | |
5fc7cf17 DD |
1152 | |
1153 | phy_np = of_parse_phandle(node, "phy-handle", 0); | |
1154 | /* If there is no phy or defective firmware presents | |
1155 | * this cortina phy, for which there is no driver | |
1156 | * support, ignore it. | |
1157 | */ | |
1158 | if (phy_np && | |
1159 | !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { | |
1160 | /* Wait until the phy drivers are available */ | |
1161 | pd = of_phy_find_device(phy_np); | |
1162 | if (!pd) | |
b7d3e3d3 | 1163 | goto defer; |
5fc7cf17 DD |
1164 | bgx->lmac[lmac].phydev = pd; |
1165 | } | |
1166 | ||
4863dea3 | 1167 | lmac++; |
6465859a | 1168 | if (lmac == bgx->max_lmac) { |
65c66af6 | 1169 | of_node_put(node); |
4863dea3 | 1170 | break; |
65c66af6 | 1171 | } |
4863dea3 | 1172 | } |
de387e11 | 1173 | return 0; |
b7d3e3d3 DD |
1174 | |
1175 | defer: | |
1176 | /* We are bailing out, try not to leak device reference counts | |
1177 | * for phy devices we may have already found. | |
1178 | */ | |
1179 | while (lmac) { | |
1180 | if (bgx->lmac[lmac].phydev) { | |
1181 | put_device(&bgx->lmac[lmac].phydev->mdio.dev); | |
1182 | bgx->lmac[lmac].phydev = NULL; | |
1183 | } | |
1184 | lmac--; | |
1185 | } | |
1186 | of_node_put(node); | |
1187 | return -EPROBE_DEFER; | |
de387e11 RR |
1188 | } |
1189 | ||
1190 | #else | |
1191 | ||
1192 | static int bgx_init_of_phy(struct bgx *bgx) | |
1193 | { | |
1194 | return -ENODEV; | |
1195 | } | |
1196 | ||
1197 | #endif /* CONFIG_OF_MDIO */ | |
1198 | ||
1199 | static int bgx_init_phy(struct bgx *bgx) | |
1200 | { | |
46b903a0 DD |
1201 | if (!acpi_disabled) |
1202 | return bgx_init_acpi_phy(bgx); | |
1203 | ||
de387e11 | 1204 | return bgx_init_of_phy(bgx); |
4863dea3 SG |
1205 | } |
1206 | ||
1207 | static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1208 | { | |
1209 | int err; | |
1210 | struct device *dev = &pdev->dev; | |
1211 | struct bgx *bgx = NULL; | |
4863dea3 | 1212 | u8 lmac; |
57aaf63c | 1213 | u16 sdevid; |
4863dea3 SG |
1214 | |
1215 | bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); | |
1216 | if (!bgx) | |
1217 | return -ENOMEM; | |
1218 | bgx->pdev = pdev; | |
1219 | ||
1220 | pci_set_drvdata(pdev, bgx); | |
1221 | ||
1222 | err = pci_enable_device(pdev); | |
1223 | if (err) { | |
1224 | dev_err(dev, "Failed to enable PCI device\n"); | |
1225 | pci_set_drvdata(pdev, NULL); | |
1226 | return err; | |
1227 | } | |
1228 | ||
1229 | err = pci_request_regions(pdev, DRV_NAME); | |
1230 | if (err) { | |
1231 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | |
1232 | goto err_disable_device; | |
1233 | } | |
1234 | ||
1235 | /* MAP configuration registers */ | |
1236 | bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | |
1237 | if (!bgx->reg_base) { | |
1238 | dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); | |
1239 | err = -ENOMEM; | |
1240 | goto err_release_regions; | |
1241 | } | |
d768b678 | 1242 | |
6465859a SG |
1243 | pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); |
1244 | if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { | |
612e94bd RMC |
1245 | bgx->bgx_id = (pci_resource_start(pdev, |
1246 | PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; | |
09de3917 | 1247 | bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; |
6465859a SG |
1248 | bgx->max_lmac = MAX_LMAC_PER_BGX; |
1249 | bgx_vnic[bgx->bgx_id] = bgx; | |
1250 | } else { | |
1251 | bgx->is_rgx = true; | |
1252 | bgx->max_lmac = 1; | |
1253 | bgx->bgx_id = MAX_BGX_PER_CN81XX - 1; | |
1254 | bgx_vnic[bgx->bgx_id] = bgx; | |
1255 | xcv_init_hw(); | |
1256 | } | |
1257 | ||
09de3917 SG |
1258 | /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one |
1259 | * BGX i.e BGX2 can be split across 2 DLMs. | |
1260 | */ | |
1261 | pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); | |
1262 | if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) || | |
1263 | ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2))) | |
1264 | bgx->is_dlm = true; | |
1265 | ||
4863dea3 SG |
1266 | bgx_get_qlm_mode(bgx); |
1267 | ||
de387e11 RR |
1268 | err = bgx_init_phy(bgx); |
1269 | if (err) | |
1270 | goto err_enable; | |
4863dea3 SG |
1271 | |
1272 | bgx_init_hw(bgx); | |
1273 | ||
1274 | /* Enable all LMACs */ | |
1275 | for (lmac = 0; lmac < bgx->lmac_count; lmac++) { | |
1276 | err = bgx_lmac_enable(bgx, lmac); | |
1277 | if (err) { | |
1278 | dev_err(dev, "BGX%d failed to enable lmac%d\n", | |
1279 | bgx->bgx_id, lmac); | |
57aaf63c SG |
1280 | while (lmac) |
1281 | bgx_lmac_disable(bgx, --lmac); | |
4863dea3 SG |
1282 | goto err_enable; |
1283 | } | |
1284 | } | |
1285 | ||
1286 | return 0; | |
1287 | ||
1288 | err_enable: | |
1289 | bgx_vnic[bgx->bgx_id] = NULL; | |
1290 | err_release_regions: | |
1291 | pci_release_regions(pdev); | |
1292 | err_disable_device: | |
1293 | pci_disable_device(pdev); | |
1294 | pci_set_drvdata(pdev, NULL); | |
1295 | return err; | |
1296 | } | |
1297 | ||
1298 | static void bgx_remove(struct pci_dev *pdev) | |
1299 | { | |
1300 | struct bgx *bgx = pci_get_drvdata(pdev); | |
1301 | u8 lmac; | |
1302 | ||
1303 | /* Disable all LMACs */ | |
1304 | for (lmac = 0; lmac < bgx->lmac_count; lmac++) | |
1305 | bgx_lmac_disable(bgx, lmac); | |
1306 | ||
1307 | bgx_vnic[bgx->bgx_id] = NULL; | |
1308 | pci_release_regions(pdev); | |
1309 | pci_disable_device(pdev); | |
1310 | pci_set_drvdata(pdev, NULL); | |
1311 | } | |
1312 | ||
1313 | static struct pci_driver bgx_driver = { | |
1314 | .name = DRV_NAME, | |
1315 | .id_table = bgx_id_table, | |
1316 | .probe = bgx_probe, | |
1317 | .remove = bgx_remove, | |
1318 | }; | |
1319 | ||
1320 | static int __init bgx_init_module(void) | |
1321 | { | |
1322 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | |
1323 | ||
1324 | return pci_register_driver(&bgx_driver); | |
1325 | } | |
1326 | ||
1327 | static void __exit bgx_cleanup_module(void) | |
1328 | { | |
1329 | pci_unregister_driver(&bgx_driver); | |
1330 | } | |
1331 | ||
1332 | module_init(bgx_init_module); | |
1333 | module_exit(bgx_cleanup_module); |