]>
Commit | Line | Data |
---|---|---|
8e22f040 SG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell OcteonTx2 CGX driver | |
3 | * | |
4 | * Copyright (C) 2018 Marvell International Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/acpi.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/pci.h> | |
15 | #include <linux/netdevice.h> | |
16 | #include <linux/etherdevice.h> | |
17 | #include <linux/phy.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/of_mdio.h> | |
20 | #include <linux/of_net.h> | |
21 | ||
22 | #include "cgx.h" | |
23 | ||
24 | #define DRV_NAME "octeontx2-cgx" | |
25 | #define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver" | |
26 | ||
1463f382 LC |
27 | /** |
28 | * struct lmac | |
29 | * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion | |
30 | * @cmd_lock: Lock to serialize the command interface | |
31 | * @resp: command response | |
61071a87 | 32 | * @link_info: link related information |
1463f382 | 33 | * @event_cb: callback for linkchange events |
c9293236 | 34 | * @event_cb_lock: lock for serializing callback with unregister |
1463f382 LC |
35 | * @cmd_pend: flag set before new command is started |
36 | * flag cleared after command response is received | |
37 | * @cgx: parent cgx port | |
38 | * @lmac_id: lmac port id | |
39 | * @name: lmac port name | |
40 | */ | |
41 | struct lmac { | |
42 | wait_queue_head_t wq_cmd_cmplt; | |
43 | struct mutex cmd_lock; | |
44 | u64 resp; | |
61071a87 | 45 | struct cgx_link_user_info link_info; |
1463f382 | 46 | struct cgx_event_cb event_cb; |
c9293236 | 47 | spinlock_t event_cb_lock; |
1463f382 LC |
48 | bool cmd_pend; |
49 | struct cgx *cgx; | |
50 | u8 lmac_id; | |
51 | char *name; | |
52 | }; | |
53 | ||
8e22f040 SG |
54 | struct cgx { |
55 | void __iomem *reg_base; | |
56 | struct pci_dev *pdev; | |
57 | u8 cgx_id; | |
3a4fa841 | 58 | u8 lmac_count; |
1463f382 | 59 | struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; |
d3b2b9ab LC |
60 | struct work_struct cgx_cmd_work; |
61 | struct workqueue_struct *cgx_cmd_workq; | |
3a4fa841 | 62 | struct list_head cgx_list; |
8e22f040 SG |
63 | }; |
64 | ||
3a4fa841 LC |
65 | static LIST_HEAD(cgx_list); |
66 | ||
61071a87 LC |
67 | /* Convert firmware speed encoding to user format(Mbps) */ |
68 | static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; | |
69 | ||
70 | /* Convert firmware lmac type encoding to string */ | |
71 | static char *cgx_lmactype_string[LMAC_MODE_MAX]; | |
72 | ||
d3b2b9ab LC |
73 | /* CGX PHY management internal APIs */ |
74 | static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); | |
75 | ||
8e22f040 SG |
76 | /* Supported devices */ |
77 | static const struct pci_device_id cgx_id_table[] = { | |
78 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, | |
79 | { 0, } /* end of table */ | |
80 | }; | |
81 | ||
82 | MODULE_DEVICE_TABLE(pci, cgx_id_table); | |
83 | ||
1463f382 LC |
84 | static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) |
85 | { | |
86 | writeq(val, cgx->reg_base + (lmac << 18) + offset); | |
87 | } | |
88 | ||
3a4fa841 LC |
89 | static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) |
90 | { | |
91 | return readq(cgx->reg_base + (lmac << 18) + offset); | |
92 | } | |
93 | ||
1463f382 LC |
94 | static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) |
95 | { | |
96 | if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) | |
97 | return NULL; | |
98 | ||
99 | return cgx->lmac_idmap[lmac_id]; | |
100 | } | |
101 | ||
12e4c9ab | 102 | int cgx_get_cgxcnt_max(void) |
3a4fa841 LC |
103 | { |
104 | struct cgx *cgx_dev; | |
12e4c9ab | 105 | int idmax = -ENODEV; |
3a4fa841 LC |
106 | |
107 | list_for_each_entry(cgx_dev, &cgx_list, cgx_list) | |
12e4c9ab LC |
108 | if (cgx_dev->cgx_id > idmax) |
109 | idmax = cgx_dev->cgx_id; | |
3a4fa841 | 110 | |
12e4c9ab LC |
111 | if (idmax < 0) |
112 | return 0; | |
113 | ||
114 | return idmax + 1; | |
3a4fa841 | 115 | } |
3a4fa841 LC |
116 | |
117 | int cgx_get_lmac_cnt(void *cgxd) | |
118 | { | |
119 | struct cgx *cgx = cgxd; | |
120 | ||
121 | if (!cgx) | |
122 | return -ENODEV; | |
123 | ||
124 | return cgx->lmac_count; | |
125 | } | |
3a4fa841 LC |
126 | |
127 | void *cgx_get_pdata(int cgx_id) | |
128 | { | |
129 | struct cgx *cgx_dev; | |
130 | ||
131 | list_for_each_entry(cgx_dev, &cgx_list, cgx_list) { | |
132 | if (cgx_dev->cgx_id == cgx_id) | |
133 | return cgx_dev; | |
134 | } | |
135 | return NULL; | |
136 | } | |
3a4fa841 | 137 | |
f967488d LC |
138 | int cgx_get_cgxid(void *cgxd) |
139 | { | |
140 | struct cgx *cgx = cgxd; | |
141 | ||
142 | if (!cgx) | |
143 | return -EINVAL; | |
144 | ||
145 | return cgx->cgx_id; | |
146 | } | |
147 | ||
61071a87 LC |
148 | /* Ensure the required lock for event queue(where asynchronous events are |
149 | * posted) is acquired before calling this API. Else an asynchronous event(with | |
150 | * latest link status) can reach the destination before this function returns | |
151 | * and could make the link status appear wrong. | |
152 | */ | |
153 | int cgx_get_link_info(void *cgxd, int lmac_id, | |
154 | struct cgx_link_user_info *linfo) | |
155 | { | |
156 | struct lmac *lmac = lmac_pdata(lmac_id, cgxd); | |
157 | ||
158 | if (!lmac) | |
159 | return -ENODEV; | |
160 | ||
161 | *linfo = lmac->link_info; | |
162 | return 0; | |
163 | } | |
61071a87 | 164 | |
96be2e0d VR |
165 | static u64 mac2u64 (u8 *mac_addr) |
166 | { | |
167 | u64 mac = 0; | |
168 | int index; | |
169 | ||
170 | for (index = ETH_ALEN - 1; index >= 0; index--) | |
171 | mac |= ((u64)*mac_addr++) << (8 * index); | |
172 | return mac; | |
173 | } | |
174 | ||
175 | int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) | |
176 | { | |
177 | struct cgx *cgx_dev = cgx_get_pdata(cgx_id); | |
178 | u64 cfg; | |
179 | ||
180 | /* copy 6bytes from macaddr */ | |
181 | /* memcpy(&cfg, mac_addr, 6); */ | |
182 | ||
183 | cfg = mac2u64 (mac_addr); | |
184 | ||
185 | cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), | |
186 | cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); | |
187 | ||
188 | cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); | |
189 | cfg |= CGX_DMAC_CTL0_CAM_ENABLE; | |
190 | cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); | |
191 | ||
192 | return 0; | |
193 | } | |
96be2e0d VR |
194 | |
195 | u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) | |
196 | { | |
197 | struct cgx *cgx_dev = cgx_get_pdata(cgx_id); | |
198 | u64 cfg; | |
199 | ||
200 | cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); | |
201 | return cfg & CGX_RX_DMAC_ADR_MASK; | |
202 | } | |
96be2e0d | 203 | |
94d942c5 G |
204 | int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) |
205 | { | |
206 | struct cgx *cgx = cgxd; | |
207 | ||
208 | if (!cgx || lmac_id >= cgx->lmac_count) | |
209 | return -ENODEV; | |
210 | ||
211 | cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F)); | |
212 | return 0; | |
213 | } | |
94d942c5 | 214 | |
61071a87 LC |
215 | static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id) |
216 | { | |
217 | u64 cfg; | |
218 | ||
219 | cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); | |
220 | return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK; | |
221 | } | |
222 | ||
23999b30 G |
223 | /* Configure CGX LMAC in internal loopback mode */ |
224 | int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) | |
225 | { | |
226 | struct cgx *cgx = cgxd; | |
227 | u8 lmac_type; | |
228 | u64 cfg; | |
229 | ||
230 | if (!cgx || lmac_id >= cgx->lmac_count) | |
231 | return -ENODEV; | |
232 | ||
233 | lmac_type = cgx_get_lmac_type(cgx, lmac_id); | |
234 | if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) { | |
235 | cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); | |
236 | if (enable) | |
237 | cfg |= CGXX_GMP_PCS_MRX_CTL_LBK; | |
238 | else | |
239 | cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK; | |
240 | cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg); | |
241 | } else { | |
242 | cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1); | |
243 | if (enable) | |
244 | cfg |= CGXX_SPUX_CONTROL1_LBK; | |
245 | else | |
246 | cfg &= ~CGXX_SPUX_CONTROL1_LBK; | |
247 | cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg); | |
248 | } | |
249 | return 0; | |
250 | } | |
23999b30 | 251 | |
96be2e0d VR |
252 | void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) |
253 | { | |
254 | struct cgx *cgx = cgx_get_pdata(cgx_id); | |
255 | u64 cfg = 0; | |
256 | ||
257 | if (!cgx) | |
258 | return; | |
259 | ||
260 | if (enable) { | |
261 | /* Enable promiscuous mode on LMAC */ | |
262 | cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); | |
263 | cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); | |
264 | cfg |= CGX_DMAC_BCAST_MODE; | |
265 | cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); | |
266 | ||
267 | cfg = cgx_read(cgx, 0, | |
268 | (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); | |
269 | cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; | |
270 | cgx_write(cgx, 0, | |
271 | (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); | |
272 | } else { | |
273 | /* Disable promiscuous mode */ | |
274 | cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); | |
275 | cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; | |
276 | cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); | |
277 | cfg = cgx_read(cgx, 0, | |
278 | (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); | |
279 | cfg |= CGX_DMAC_CAM_ADDR_ENABLE; | |
280 | cgx_write(cgx, 0, | |
281 | (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); | |
282 | } | |
283 | } | |
96be2e0d | 284 | |
5d9b976d SG |
285 | /* Enable or disable forwarding received pause frames to Tx block */ |
286 | void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable) | |
287 | { | |
288 | struct cgx *cgx = cgxd; | |
289 | u64 cfg; | |
290 | ||
291 | if (!cgx) | |
292 | return; | |
293 | ||
294 | if (enable) { | |
295 | cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); | |
296 | cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; | |
297 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); | |
298 | ||
299 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
300 | cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; | |
301 | cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); | |
302 | } else { | |
303 | cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); | |
304 | cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; | |
305 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); | |
306 | ||
307 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
308 | cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; | |
309 | cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); | |
310 | } | |
311 | } | |
5d9b976d | 312 | |
66208910 CJ |
313 | int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) |
314 | { | |
315 | struct cgx *cgx = cgxd; | |
316 | ||
317 | if (!cgx || lmac_id >= cgx->lmac_count) | |
318 | return -ENODEV; | |
319 | *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); | |
320 | return 0; | |
321 | } | |
66208910 CJ |
322 | |
323 | int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat) | |
324 | { | |
325 | struct cgx *cgx = cgxd; | |
326 | ||
327 | if (!cgx || lmac_id >= cgx->lmac_count) | |
328 | return -ENODEV; | |
329 | *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8)); | |
330 | return 0; | |
331 | } | |
66208910 | 332 | |
1435f66a SG |
333 | int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) |
334 | { | |
335 | struct cgx *cgx = cgxd; | |
336 | u64 cfg; | |
337 | ||
338 | if (!cgx || lmac_id >= cgx->lmac_count) | |
339 | return -ENODEV; | |
340 | ||
341 | cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); | |
342 | if (enable) | |
343 | cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN; | |
344 | else | |
345 | cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN); | |
346 | cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); | |
347 | return 0; | |
348 | } | |
1435f66a | 349 | |
5d9b976d SG |
350 | int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) |
351 | { | |
352 | struct cgx *cgx = cgxd; | |
353 | u64 cfg, last; | |
354 | ||
355 | if (!cgx || lmac_id >= cgx->lmac_count) | |
356 | return -ENODEV; | |
357 | ||
358 | cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); | |
359 | last = cfg; | |
360 | if (enable) | |
361 | cfg |= DATA_PKT_TX_EN; | |
362 | else | |
363 | cfg &= ~DATA_PKT_TX_EN; | |
364 | ||
365 | if (cfg != last) | |
366 | cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); | |
367 | return !!(last & DATA_PKT_TX_EN); | |
368 | } | |
5d9b976d | 369 | |
f7e086e7 G |
370 | int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, |
371 | u8 *tx_pause, u8 *rx_pause) | |
372 | { | |
373 | struct cgx *cgx = cgxd; | |
374 | u64 cfg; | |
375 | ||
376 | if (!cgx || lmac_id >= cgx->lmac_count) | |
377 | return -ENODEV; | |
378 | ||
379 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
380 | *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK); | |
381 | ||
382 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); | |
383 | *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV); | |
384 | return 0; | |
385 | } | |
386 | ||
387 | int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, | |
388 | u8 tx_pause, u8 rx_pause) | |
389 | { | |
390 | struct cgx *cgx = cgxd; | |
391 | u64 cfg; | |
392 | ||
393 | if (!cgx || lmac_id >= cgx->lmac_count) | |
394 | return -ENODEV; | |
395 | ||
396 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
397 | cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; | |
398 | cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; | |
399 | cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); | |
400 | ||
401 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); | |
402 | cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; | |
403 | cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0; | |
404 | cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); | |
405 | ||
406 | cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP); | |
407 | if (tx_pause) { | |
408 | cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id); | |
409 | } else { | |
410 | cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id); | |
411 | cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id); | |
412 | } | |
413 | cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg); | |
414 | return 0; | |
415 | } | |
416 | ||
417 | static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable) | |
418 | { | |
419 | u64 cfg; | |
420 | ||
421 | if (!cgx || lmac_id >= cgx->lmac_count) | |
422 | return; | |
423 | if (enable) { | |
424 | /* Enable receive pause frames */ | |
425 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
426 | cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; | |
427 | cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); | |
428 | ||
429 | cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); | |
430 | cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; | |
431 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); | |
432 | ||
433 | /* Enable pause frames transmission */ | |
434 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); | |
435 | cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV; | |
436 | cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); | |
437 | ||
438 | /* Set pause time and interval */ | |
439 | cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME, | |
440 | DEFAULT_PAUSE_TIME); | |
441 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL); | |
442 | cfg &= ~0xFFFFULL; | |
443 | cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL, | |
444 | cfg | (DEFAULT_PAUSE_TIME / 2)); | |
445 | ||
446 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME, | |
447 | DEFAULT_PAUSE_TIME); | |
448 | ||
449 | cfg = cgx_read(cgx, lmac_id, | |
450 | CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL); | |
451 | cfg &= ~0xFFFFULL; | |
452 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL, | |
453 | cfg | (DEFAULT_PAUSE_TIME / 2)); | |
454 | } else { | |
455 | /* ALL pause frames received are completely ignored */ | |
456 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
457 | cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; | |
458 | cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); | |
459 | ||
460 | cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); | |
461 | cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; | |
462 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); | |
463 | ||
464 | /* Disable pause frames transmission */ | |
465 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); | |
466 | cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; | |
467 | cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); | |
468 | } | |
469 | } | |
470 | ||
42157217 ZS |
471 | void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) |
472 | { | |
473 | struct cgx *cgx = cgxd; | |
474 | u64 cfg; | |
475 | ||
476 | if (!cgx) | |
477 | return; | |
478 | ||
479 | if (enable) { | |
480 | /* Enable inbound PTP timestamping */ | |
481 | cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); | |
482 | cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; | |
483 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); | |
484 | ||
485 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
486 | cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE; | |
487 | cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); | |
488 | } else { | |
489 | /* Disable inbound PTP stamping */ | |
490 | cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); | |
491 | cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; | |
492 | cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); | |
493 | ||
494 | cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); | |
495 | cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE; | |
496 | cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); | |
497 | } | |
498 | } | |
499 | ||
1463f382 LC |
500 | /* CGX Firmware interface low level support */ |
501 | static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) | |
502 | { | |
503 | struct cgx *cgx = lmac->cgx; | |
504 | struct device *dev; | |
505 | int err = 0; | |
506 | u64 cmd; | |
507 | ||
508 | /* Ensure no other command is in progress */ | |
509 | err = mutex_lock_interruptible(&lmac->cmd_lock); | |
510 | if (err) | |
511 | return err; | |
512 | ||
513 | /* Ensure command register is free */ | |
514 | cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); | |
515 | if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) { | |
516 | err = -EBUSY; | |
517 | goto unlock; | |
518 | } | |
519 | ||
520 | /* Update ownership in command request */ | |
521 | req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req); | |
522 | ||
523 | /* Mark this lmac as pending, before we start */ | |
524 | lmac->cmd_pend = true; | |
525 | ||
526 | /* Start command in hardware */ | |
527 | cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); | |
528 | ||
529 | /* Ensure command is completed without errors */ | |
530 | if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, | |
531 | msecs_to_jiffies(CGX_CMD_TIMEOUT))) { | |
532 | dev = &cgx->pdev->dev; | |
533 | dev_err(dev, "cgx port %d:%d cmd timeout\n", | |
534 | cgx->cgx_id, lmac->lmac_id); | |
535 | err = -EIO; | |
536 | goto unlock; | |
537 | } | |
538 | ||
539 | /* we have a valid command response */ | |
540 | smp_rmb(); /* Ensure the latest updates are visible */ | |
541 | *resp = lmac->resp; | |
542 | ||
543 | unlock: | |
544 | mutex_unlock(&lmac->cmd_lock); | |
545 | ||
546 | return err; | |
547 | } | |
548 | ||
549 | static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, | |
550 | struct cgx *cgx, int lmac_id) | |
551 | { | |
552 | struct lmac *lmac; | |
553 | int err; | |
554 | ||
555 | lmac = lmac_pdata(lmac_id, cgx); | |
556 | if (!lmac) | |
557 | return -ENODEV; | |
558 | ||
559 | err = cgx_fwi_cmd_send(req, resp, lmac); | |
560 | ||
561 | /* Check for valid response */ | |
562 | if (!err) { | |
563 | if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL) | |
564 | return -EIO; | |
565 | else | |
566 | return 0; | |
567 | } | |
568 | ||
569 | return err; | |
570 | } | |
571 | ||
61071a87 LC |
572 | static inline void cgx_link_usertable_init(void) |
573 | { | |
574 | cgx_speed_mbps[CGX_LINK_NONE] = 0; | |
575 | cgx_speed_mbps[CGX_LINK_10M] = 10; | |
576 | cgx_speed_mbps[CGX_LINK_100M] = 100; | |
577 | cgx_speed_mbps[CGX_LINK_1G] = 1000; | |
578 | cgx_speed_mbps[CGX_LINK_2HG] = 2500; | |
579 | cgx_speed_mbps[CGX_LINK_5G] = 5000; | |
580 | cgx_speed_mbps[CGX_LINK_10G] = 10000; | |
581 | cgx_speed_mbps[CGX_LINK_20G] = 20000; | |
582 | cgx_speed_mbps[CGX_LINK_25G] = 25000; | |
583 | cgx_speed_mbps[CGX_LINK_40G] = 40000; | |
584 | cgx_speed_mbps[CGX_LINK_50G] = 50000; | |
585 | cgx_speed_mbps[CGX_LINK_100G] = 100000; | |
586 | ||
587 | cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII"; | |
588 | cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI"; | |
589 | cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI"; | |
590 | cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R"; | |
591 | cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R"; | |
592 | cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII"; | |
593 | cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R"; | |
594 | cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R"; | |
595 | cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R"; | |
596 | cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII"; | |
597 | } | |
598 | ||
599 | static inline void link_status_user_format(u64 lstat, | |
600 | struct cgx_link_user_info *linfo, | |
601 | struct cgx *cgx, u8 lmac_id) | |
602 | { | |
603 | char *lmac_string; | |
604 | ||
605 | linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); | |
606 | linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); | |
607 | linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; | |
608 | linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id); | |
609 | lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; | |
610 | strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); | |
611 | } | |
612 | ||
1463f382 LC |
613 | /* Hardware event handlers */ |
614 | static inline void cgx_link_change_handler(u64 lstat, | |
615 | struct lmac *lmac) | |
616 | { | |
61071a87 | 617 | struct cgx_link_user_info *linfo; |
1463f382 LC |
618 | struct cgx *cgx = lmac->cgx; |
619 | struct cgx_link_event event; | |
620 | struct device *dev; | |
61071a87 | 621 | int err_type; |
1463f382 LC |
622 | |
623 | dev = &cgx->pdev->dev; | |
624 | ||
61071a87 LC |
625 | link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); |
626 | err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat); | |
1463f382 LC |
627 | |
628 | event.cgx_id = cgx->cgx_id; | |
629 | event.lmac_id = lmac->lmac_id; | |
630 | ||
61071a87 LC |
631 | /* update the local copy of link status */ |
632 | lmac->link_info = event.link_uinfo; | |
633 | linfo = &lmac->link_info; | |
634 | ||
c9293236 LC |
635 | /* Ensure callback doesn't get unregistered until we finish it */ |
636 | spin_lock(&lmac->event_cb_lock); | |
637 | ||
1463f382 LC |
638 | if (!lmac->event_cb.notify_link_chg) { |
639 | dev_dbg(dev, "cgx port %d:%d Link change handler null", | |
640 | cgx->cgx_id, lmac->lmac_id); | |
61071a87 | 641 | if (err_type != CGX_ERR_NONE) { |
1463f382 | 642 | dev_err(dev, "cgx port %d:%d Link error %d\n", |
61071a87 | 643 | cgx->cgx_id, lmac->lmac_id, err_type); |
1463f382 | 644 | } |
61071a87 | 645 | dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", |
1463f382 | 646 | cgx->cgx_id, lmac->lmac_id, |
61071a87 | 647 | linfo->link_up ? "UP" : "DOWN", linfo->speed); |
c9293236 | 648 | goto err; |
1463f382 LC |
649 | } |
650 | ||
651 | if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) | |
652 | dev_err(dev, "event notification failure\n"); | |
c9293236 LC |
653 | err: |
654 | spin_unlock(&lmac->event_cb_lock); | |
1463f382 LC |
655 | } |
656 | ||
657 | static inline bool cgx_cmdresp_is_linkevent(u64 event) | |
658 | { | |
659 | u8 id; | |
660 | ||
661 | id = FIELD_GET(EVTREG_ID, event); | |
662 | if (id == CGX_CMD_LINK_BRING_UP || | |
663 | id == CGX_CMD_LINK_BRING_DOWN) | |
664 | return true; | |
665 | else | |
666 | return false; | |
667 | } | |
668 | ||
669 | static inline bool cgx_event_is_linkevent(u64 event) | |
670 | { | |
671 | if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) | |
672 | return true; | |
673 | else | |
674 | return false; | |
675 | } | |
676 | ||
677 | static irqreturn_t cgx_fwi_event_handler(int irq, void *data) | |
678 | { | |
679 | struct lmac *lmac = data; | |
1463f382 LC |
680 | struct cgx *cgx; |
681 | u64 event; | |
682 | ||
683 | cgx = lmac->cgx; | |
684 | ||
685 | event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); | |
686 | ||
687 | if (!FIELD_GET(EVTREG_ACK, event)) | |
688 | return IRQ_NONE; | |
689 | ||
1463f382 LC |
690 | switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { |
691 | case CGX_EVT_CMD_RESP: | |
692 | /* Copy the response. Since only one command is active at a | |
693 | * time, there is no way a response can get overwritten | |
694 | */ | |
695 | lmac->resp = event; | |
696 | /* Ensure response is updated before thread context starts */ | |
697 | smp_wmb(); | |
698 | ||
699 | /* There wont be separate events for link change initiated from | |
700 | * software; Hence report the command responses as events | |
701 | */ | |
702 | if (cgx_cmdresp_is_linkevent(event)) | |
703 | cgx_link_change_handler(event, lmac); | |
704 | ||
705 | /* Release thread waiting for completion */ | |
706 | lmac->cmd_pend = false; | |
707 | wake_up_interruptible(&lmac->wq_cmd_cmplt); | |
708 | break; | |
709 | case CGX_EVT_ASYNC: | |
710 | if (cgx_event_is_linkevent(event)) | |
711 | cgx_link_change_handler(event, lmac); | |
712 | break; | |
713 | } | |
714 | ||
715 | /* Any new event or command response will be posted by firmware | |
716 | * only after the current status is acked. | |
717 | * Ack the interrupt register as well. | |
718 | */ | |
719 | cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); | |
720 | cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT); | |
721 | ||
722 | return IRQ_HANDLED; | |
723 | } | |
724 | ||
725 | /* APIs for PHY management using CGX firmware interface */ | |
726 | ||
727 | /* callback registration for hardware events like link change */ | |
728 | int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) | |
729 | { | |
730 | struct cgx *cgx = cgxd; | |
731 | struct lmac *lmac; | |
732 | ||
733 | lmac = lmac_pdata(lmac_id, cgx); | |
734 | if (!lmac) | |
735 | return -ENODEV; | |
736 | ||
737 | lmac->event_cb = *cb; | |
738 | ||
739 | return 0; | |
740 | } | |
1463f382 | 741 | |
c9293236 LC |
742 | int cgx_lmac_evh_unregister(void *cgxd, int lmac_id) |
743 | { | |
744 | struct lmac *lmac; | |
745 | unsigned long flags; | |
746 | struct cgx *cgx = cgxd; | |
747 | ||
748 | lmac = lmac_pdata(lmac_id, cgx); | |
749 | if (!lmac) | |
750 | return -ENODEV; | |
751 | ||
752 | spin_lock_irqsave(&lmac->event_cb_lock, flags); | |
753 | lmac->event_cb.notify_link_chg = NULL; | |
754 | lmac->event_cb.data = NULL; | |
755 | spin_unlock_irqrestore(&lmac->event_cb_lock, flags); | |
756 | ||
757 | return 0; | |
758 | } | |
c9293236 | 759 | |
4f4eebf2 LC |
760 | int cgx_get_fwdata_base(u64 *base) |
761 | { | |
762 | u64 req = 0, resp; | |
763 | struct cgx *cgx; | |
764 | int err; | |
765 | ||
766 | cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list); | |
767 | if (!cgx) | |
768 | return -ENXIO; | |
769 | ||
770 | req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); | |
771 | err = cgx_fwi_cmd_generic(req, &resp, cgx, 0); | |
772 | if (!err) | |
773 | *base = FIELD_GET(RESP_FWD_BASE, resp); | |
774 | ||
775 | return err; | |
776 | } | |
777 | ||
d3b2b9ab LC |
778 | static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) |
779 | { | |
780 | u64 req = 0; | |
781 | u64 resp; | |
782 | ||
783 | if (enable) | |
784 | req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); | |
785 | else | |
786 | req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); | |
787 | ||
788 | return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); | |
789 | } | |
790 | ||
1463f382 LC |
791 | static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) |
792 | { | |
793 | u64 req = 0; | |
794 | ||
795 | req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); | |
796 | return cgx_fwi_cmd_generic(req, resp, cgx, 0); | |
797 | } | |
798 | ||
799 | static int cgx_lmac_verify_fwi_version(struct cgx *cgx) | |
3a4fa841 | 800 | { |
1463f382 LC |
801 | struct device *dev = &cgx->pdev->dev; |
802 | int major_ver, minor_ver; | |
803 | u64 resp; | |
804 | int err; | |
805 | ||
806 | if (!cgx->lmac_count) | |
807 | return 0; | |
808 | ||
809 | err = cgx_fwi_read_version(&resp, cgx); | |
810 | if (err) | |
811 | return err; | |
812 | ||
813 | major_ver = FIELD_GET(RESP_MAJOR_VER, resp); | |
814 | minor_ver = FIELD_GET(RESP_MINOR_VER, resp); | |
815 | dev_dbg(dev, "Firmware command interface version = %d.%d\n", | |
816 | major_ver, minor_ver); | |
817 | if (major_ver != CGX_FIRMWARE_MAJOR_VER || | |
818 | minor_ver != CGX_FIRMWARE_MINOR_VER) | |
819 | return -EIO; | |
820 | else | |
821 | return 0; | |
822 | } | |
823 | ||
d3b2b9ab LC |
824 | static void cgx_lmac_linkup_work(struct work_struct *work) |
825 | { | |
826 | struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work); | |
827 | struct device *dev = &cgx->pdev->dev; | |
828 | int i, err; | |
829 | ||
830 | /* Do Link up for all the lmacs */ | |
831 | for (i = 0; i < cgx->lmac_count; i++) { | |
832 | err = cgx_fwi_link_change(cgx, i, true); | |
833 | if (err) | |
834 | dev_info(dev, "cgx port %d:%d Link up command failed\n", | |
835 | cgx->cgx_id, i); | |
836 | } | |
837 | } | |
838 | ||
839 | int cgx_lmac_linkup_start(void *cgxd) | |
840 | { | |
841 | struct cgx *cgx = cgxd; | |
842 | ||
843 | if (!cgx) | |
844 | return -ENODEV; | |
845 | ||
846 | queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); | |
847 | ||
848 | return 0; | |
849 | } | |
d3b2b9ab | 850 | |
1463f382 LC |
851 | static int cgx_lmac_init(struct cgx *cgx) |
852 | { | |
853 | struct lmac *lmac; | |
854 | int i, err; | |
855 | ||
3a4fa841 LC |
856 | cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7; |
857 | if (cgx->lmac_count > MAX_LMAC_PER_CGX) | |
858 | cgx->lmac_count = MAX_LMAC_PER_CGX; | |
1463f382 LC |
859 | |
860 | for (i = 0; i < cgx->lmac_count; i++) { | |
861 | lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL); | |
862 | if (!lmac) | |
863 | return -ENOMEM; | |
864 | lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); | |
865 | if (!lmac->name) | |
866 | return -ENOMEM; | |
867 | sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); | |
868 | lmac->lmac_id = i; | |
869 | lmac->cgx = cgx; | |
870 | init_waitqueue_head(&lmac->wq_cmd_cmplt); | |
871 | mutex_init(&lmac->cmd_lock); | |
c9293236 | 872 | spin_lock_init(&lmac->event_cb_lock); |
1463f382 LC |
873 | err = request_irq(pci_irq_vector(cgx->pdev, |
874 | CGX_LMAC_FWI + i * 9), | |
875 | cgx_fwi_event_handler, 0, lmac->name, lmac); | |
876 | if (err) | |
877 | return err; | |
878 | ||
879 | /* Enable interrupt */ | |
880 | cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, | |
881 | FW_CGX_INT); | |
882 | ||
883 | /* Add reference */ | |
884 | cgx->lmac_idmap[i] = lmac; | |
f7e086e7 | 885 | cgx_lmac_pause_frm_config(cgx, i, true); |
1463f382 LC |
886 | } |
887 | ||
888 | return cgx_lmac_verify_fwi_version(cgx); | |
889 | } | |
890 | ||
891 | static int cgx_lmac_exit(struct cgx *cgx) | |
892 | { | |
893 | struct lmac *lmac; | |
894 | int i; | |
895 | ||
d3b2b9ab LC |
896 | if (cgx->cgx_cmd_workq) { |
897 | flush_workqueue(cgx->cgx_cmd_workq); | |
898 | destroy_workqueue(cgx->cgx_cmd_workq); | |
899 | cgx->cgx_cmd_workq = NULL; | |
900 | } | |
901 | ||
1463f382 LC |
902 | /* Free all lmac related resources */ |
903 | for (i = 0; i < cgx->lmac_count; i++) { | |
f7e086e7 | 904 | cgx_lmac_pause_frm_config(cgx, i, false); |
1463f382 LC |
905 | lmac = cgx->lmac_idmap[i]; |
906 | if (!lmac) | |
907 | continue; | |
908 | free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac); | |
909 | kfree(lmac->name); | |
910 | kfree(lmac); | |
911 | } | |
912 | ||
913 | return 0; | |
3a4fa841 LC |
914 | } |
915 | ||
8e22f040 SG |
916 | static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
917 | { | |
918 | struct device *dev = &pdev->dev; | |
919 | struct cgx *cgx; | |
1463f382 | 920 | int err, nvec; |
8e22f040 SG |
921 | |
922 | cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL); | |
923 | if (!cgx) | |
924 | return -ENOMEM; | |
925 | cgx->pdev = pdev; | |
926 | ||
927 | pci_set_drvdata(pdev, cgx); | |
928 | ||
929 | err = pci_enable_device(pdev); | |
930 | if (err) { | |
931 | dev_err(dev, "Failed to enable PCI device\n"); | |
932 | pci_set_drvdata(pdev, NULL); | |
933 | return err; | |
934 | } | |
935 | ||
936 | err = pci_request_regions(pdev, DRV_NAME); | |
937 | if (err) { | |
938 | dev_err(dev, "PCI request regions failed 0x%x\n", err); | |
939 | goto err_disable_device; | |
940 | } | |
941 | ||
942 | /* MAP configuration registers */ | |
943 | cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); | |
944 | if (!cgx->reg_base) { | |
945 | dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n"); | |
946 | err = -ENOMEM; | |
947 | goto err_release_regions; | |
948 | } | |
949 | ||
1463f382 LC |
950 | nvec = CGX_NVEC; |
951 | err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); | |
952 | if (err < 0 || err != nvec) { | |
953 | dev_err(dev, "Request for %d msix vectors failed, err %d\n", | |
954 | nvec, err); | |
955 | goto err_release_regions; | |
956 | } | |
957 | ||
12e4c9ab LC |
958 | cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) |
959 | & CGX_ID_MASK; | |
960 | ||
d3b2b9ab LC |
961 | /* init wq for processing linkup requests */ |
962 | INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); | |
963 | cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); | |
964 | if (!cgx->cgx_cmd_workq) { | |
965 | dev_err(dev, "alloc workqueue failed for cgx cmd"); | |
966 | err = -ENOMEM; | |
1492623e | 967 | goto err_free_irq_vectors; |
d3b2b9ab LC |
968 | } |
969 | ||
3a4fa841 | 970 | list_add(&cgx->cgx_list, &cgx_list); |
1463f382 | 971 | |
61071a87 LC |
972 | cgx_link_usertable_init(); |
973 | ||
1463f382 LC |
974 | err = cgx_lmac_init(cgx); |
975 | if (err) | |
976 | goto err_release_lmac; | |
3a4fa841 | 977 | |
8e22f040 SG |
978 | return 0; |
979 | ||
1463f382 LC |
980 | err_release_lmac: |
981 | cgx_lmac_exit(cgx); | |
3a4fa841 | 982 | list_del(&cgx->cgx_list); |
1492623e CJ |
983 | err_free_irq_vectors: |
984 | pci_free_irq_vectors(pdev); | |
1463f382 | 985 | err_release_regions: |
8e22f040 SG |
986 | pci_release_regions(pdev); |
987 | err_disable_device: | |
988 | pci_disable_device(pdev); | |
989 | pci_set_drvdata(pdev, NULL); | |
990 | return err; | |
991 | } | |
992 | ||
993 | static void cgx_remove(struct pci_dev *pdev) | |
994 | { | |
3a4fa841 LC |
995 | struct cgx *cgx = pci_get_drvdata(pdev); |
996 | ||
1463f382 | 997 | cgx_lmac_exit(cgx); |
3a4fa841 | 998 | list_del(&cgx->cgx_list); |
1463f382 | 999 | pci_free_irq_vectors(pdev); |
8e22f040 SG |
1000 | pci_release_regions(pdev); |
1001 | pci_disable_device(pdev); | |
1002 | pci_set_drvdata(pdev, NULL); | |
1003 | } | |
1004 | ||
1005 | struct pci_driver cgx_driver = { | |
1006 | .name = DRV_NAME, | |
1007 | .id_table = cgx_id_table, | |
1008 | .probe = cgx_probe, | |
1009 | .remove = cgx_remove, | |
1010 | }; |