1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/acpi.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/phy.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
24 #define DRV_NAME "octeontx2-cgx"
25 #define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
29 * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
30 * @cmd_lock: Lock to serialize the command interface
31 * @resp: command response
32 * @link_info: link related information
33 * @event_cb: callback for linkchange events
34 * @event_cb_lock: lock for serializing callback with unregister
35 * @cmd_pend: flag set before new command is started
36 * flag cleared after command response is received
37 * @cgx: parent cgx port
38 * @lmac_id: lmac port id
39 * @name: lmac port name
42 wait_queue_head_t wq_cmd_cmplt
;
43 struct mutex cmd_lock
;
45 struct cgx_link_user_info link_info
;
46 struct cgx_event_cb event_cb
;
47 spinlock_t event_cb_lock
;
55 void __iomem
*reg_base
;
59 struct lmac
*lmac_idmap
[MAX_LMAC_PER_CGX
];
60 struct work_struct cgx_cmd_work
;
61 struct workqueue_struct
*cgx_cmd_workq
;
62 struct list_head cgx_list
;
65 static LIST_HEAD(cgx_list
);
67 /* Convert firmware speed encoding to user format(Mbps) */
68 static u32 cgx_speed_mbps
[CGX_LINK_SPEED_MAX
];
70 /* Convert firmware lmac type encoding to string */
71 static char *cgx_lmactype_string
[LMAC_MODE_MAX
];
73 /* CGX PHY management internal APIs */
74 static int cgx_fwi_link_change(struct cgx
*cgx
, int lmac_id
, bool en
);
76 /* Supported devices */
77 static const struct pci_device_id cgx_id_table
[] = {
78 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_CGX
) },
79 { 0, } /* end of table */
82 MODULE_DEVICE_TABLE(pci
, cgx_id_table
);
84 static void cgx_write(struct cgx
*cgx
, u64 lmac
, u64 offset
, u64 val
)
86 writeq(val
, cgx
->reg_base
+ (lmac
<< 18) + offset
);
89 static u64
cgx_read(struct cgx
*cgx
, u64 lmac
, u64 offset
)
91 return readq(cgx
->reg_base
+ (lmac
<< 18) + offset
);
94 static inline struct lmac
*lmac_pdata(u8 lmac_id
, struct cgx
*cgx
)
96 if (!cgx
|| lmac_id
>= MAX_LMAC_PER_CGX
)
99 return cgx
->lmac_idmap
[lmac_id
];
102 int cgx_get_cgxcnt_max(void)
107 list_for_each_entry(cgx_dev
, &cgx_list
, cgx_list
)
108 if (cgx_dev
->cgx_id
> idmax
)
109 idmax
= cgx_dev
->cgx_id
;
116 EXPORT_SYMBOL(cgx_get_cgxcnt_max
);
118 int cgx_get_lmac_cnt(void *cgxd
)
120 struct cgx
*cgx
= cgxd
;
125 return cgx
->lmac_count
;
127 EXPORT_SYMBOL(cgx_get_lmac_cnt
);
129 void *cgx_get_pdata(int cgx_id
)
133 list_for_each_entry(cgx_dev
, &cgx_list
, cgx_list
) {
134 if (cgx_dev
->cgx_id
== cgx_id
)
139 EXPORT_SYMBOL(cgx_get_pdata
);
141 /* Ensure the required lock for event queue(where asynchronous events are
142 * posted) is acquired before calling this API. Else an asynchronous event(with
143 * latest link status) can reach the destination before this function returns
144 * and could make the link status appear wrong.
146 int cgx_get_link_info(void *cgxd
, int lmac_id
,
147 struct cgx_link_user_info
*linfo
)
149 struct lmac
*lmac
= lmac_pdata(lmac_id
, cgxd
);
154 *linfo
= lmac
->link_info
;
157 EXPORT_SYMBOL(cgx_get_link_info
);
159 static u64
mac2u64 (u8
*mac_addr
)
164 for (index
= ETH_ALEN
- 1; index
>= 0; index
--)
165 mac
|= ((u64
)*mac_addr
++) << (8 * index
);
169 int cgx_lmac_addr_set(u8 cgx_id
, u8 lmac_id
, u8
*mac_addr
)
171 struct cgx
*cgx_dev
= cgx_get_pdata(cgx_id
);
174 /* copy 6bytes from macaddr */
175 /* memcpy(&cfg, mac_addr, 6); */
177 cfg
= mac2u64 (mac_addr
);
179 cgx_write(cgx_dev
, 0, (CGXX_CMRX_RX_DMAC_CAM0
+ (lmac_id
* 0x8)),
180 cfg
| CGX_DMAC_CAM_ADDR_ENABLE
| ((u64
)lmac_id
<< 49));
182 cfg
= cgx_read(cgx_dev
, lmac_id
, CGXX_CMRX_RX_DMAC_CTL0
);
183 cfg
|= CGX_DMAC_CTL0_CAM_ENABLE
;
184 cgx_write(cgx_dev
, lmac_id
, CGXX_CMRX_RX_DMAC_CTL0
, cfg
);
188 EXPORT_SYMBOL(cgx_lmac_addr_set
);
190 u64
cgx_lmac_addr_get(u8 cgx_id
, u8 lmac_id
)
192 struct cgx
*cgx_dev
= cgx_get_pdata(cgx_id
);
195 cfg
= cgx_read(cgx_dev
, 0, CGXX_CMRX_RX_DMAC_CAM0
+ lmac_id
* 0x8);
196 return cfg
& CGX_RX_DMAC_ADR_MASK
;
198 EXPORT_SYMBOL(cgx_lmac_addr_get
);
200 int cgx_set_pkind(void *cgxd
, u8 lmac_id
, int pkind
)
202 struct cgx
*cgx
= cgxd
;
204 if (!cgx
|| lmac_id
>= cgx
->lmac_count
)
207 cgx_write(cgx
, lmac_id
, CGXX_CMRX_RX_ID_MAP
, (pkind
& 0x3F));
210 EXPORT_SYMBOL(cgx_set_pkind
);
212 static inline u8
cgx_get_lmac_type(struct cgx
*cgx
, int lmac_id
)
216 cfg
= cgx_read(cgx
, lmac_id
, CGXX_CMRX_CFG
);
217 return (cfg
>> CGX_LMAC_TYPE_SHIFT
) & CGX_LMAC_TYPE_MASK
;
220 /* Configure CGX LMAC in internal loopback mode */
221 int cgx_lmac_internal_loopback(void *cgxd
, int lmac_id
, bool enable
)
223 struct cgx
*cgx
= cgxd
;
227 if (!cgx
|| lmac_id
>= cgx
->lmac_count
)
230 lmac_type
= cgx_get_lmac_type(cgx
, lmac_id
);
231 if (lmac_type
== LMAC_MODE_SGMII
|| lmac_type
== LMAC_MODE_QSGMII
) {
232 cfg
= cgx_read(cgx
, lmac_id
, CGXX_GMP_PCS_MRX_CTL
);
234 cfg
|= CGXX_GMP_PCS_MRX_CTL_LBK
;
236 cfg
&= ~CGXX_GMP_PCS_MRX_CTL_LBK
;
237 cgx_write(cgx
, lmac_id
, CGXX_GMP_PCS_MRX_CTL
, cfg
);
239 cfg
= cgx_read(cgx
, lmac_id
, CGXX_SPUX_CONTROL1
);
241 cfg
|= CGXX_SPUX_CONTROL1_LBK
;
243 cfg
&= ~CGXX_SPUX_CONTROL1_LBK
;
244 cgx_write(cgx
, lmac_id
, CGXX_SPUX_CONTROL1
, cfg
);
248 EXPORT_SYMBOL(cgx_lmac_internal_loopback
);
250 void cgx_lmac_promisc_config(int cgx_id
, int lmac_id
, bool enable
)
252 struct cgx
*cgx
= cgx_get_pdata(cgx_id
);
259 /* Enable promiscuous mode on LMAC */
260 cfg
= cgx_read(cgx
, lmac_id
, CGXX_CMRX_RX_DMAC_CTL0
);
261 cfg
&= ~(CGX_DMAC_CAM_ACCEPT
| CGX_DMAC_MCAST_MODE
);
262 cfg
|= CGX_DMAC_BCAST_MODE
;
263 cgx_write(cgx
, lmac_id
, CGXX_CMRX_RX_DMAC_CTL0
, cfg
);
265 cfg
= cgx_read(cgx
, 0,
266 (CGXX_CMRX_RX_DMAC_CAM0
+ lmac_id
* 0x8));
267 cfg
&= ~CGX_DMAC_CAM_ADDR_ENABLE
;
269 (CGXX_CMRX_RX_DMAC_CAM0
+ lmac_id
* 0x8), cfg
);
271 /* Disable promiscuous mode */
272 cfg
= cgx_read(cgx
, lmac_id
, CGXX_CMRX_RX_DMAC_CTL0
);
273 cfg
|= CGX_DMAC_CAM_ACCEPT
| CGX_DMAC_MCAST_MODE
;
274 cgx_write(cgx
, lmac_id
, CGXX_CMRX_RX_DMAC_CTL0
, cfg
);
275 cfg
= cgx_read(cgx
, 0,
276 (CGXX_CMRX_RX_DMAC_CAM0
+ lmac_id
* 0x8));
277 cfg
|= CGX_DMAC_CAM_ADDR_ENABLE
;
279 (CGXX_CMRX_RX_DMAC_CAM0
+ lmac_id
* 0x8), cfg
);
282 EXPORT_SYMBOL(cgx_lmac_promisc_config
);
284 int cgx_get_rx_stats(void *cgxd
, int lmac_id
, int idx
, u64
*rx_stat
)
286 struct cgx
*cgx
= cgxd
;
288 if (!cgx
|| lmac_id
>= cgx
->lmac_count
)
290 *rx_stat
= cgx_read(cgx
, lmac_id
, CGXX_CMRX_RX_STAT0
+ (idx
* 8));
293 EXPORT_SYMBOL(cgx_get_rx_stats
);
295 int cgx_get_tx_stats(void *cgxd
, int lmac_id
, int idx
, u64
*tx_stat
)
297 struct cgx
*cgx
= cgxd
;
299 if (!cgx
|| lmac_id
>= cgx
->lmac_count
)
301 *tx_stat
= cgx_read(cgx
, lmac_id
, CGXX_CMRX_TX_STAT0
+ (idx
* 8));
304 EXPORT_SYMBOL(cgx_get_tx_stats
);
306 int cgx_lmac_rx_tx_enable(void *cgxd
, int lmac_id
, bool enable
)
308 struct cgx
*cgx
= cgxd
;
311 if (!cgx
|| lmac_id
>= cgx
->lmac_count
)
314 cfg
= cgx_read(cgx
, lmac_id
, CGXX_CMRX_CFG
);
316 cfg
|= CMR_EN
| DATA_PKT_RX_EN
| DATA_PKT_TX_EN
;
318 cfg
&= ~(CMR_EN
| DATA_PKT_RX_EN
| DATA_PKT_TX_EN
);
319 cgx_write(cgx
, lmac_id
, CGXX_CMRX_CFG
, cfg
);
322 EXPORT_SYMBOL(cgx_lmac_rx_tx_enable
);
324 /* CGX Firmware interface low level support */
325 static int cgx_fwi_cmd_send(u64 req
, u64
*resp
, struct lmac
*lmac
)
327 struct cgx
*cgx
= lmac
->cgx
;
332 /* Ensure no other command is in progress */
333 err
= mutex_lock_interruptible(&lmac
->cmd_lock
);
337 /* Ensure command register is free */
338 cmd
= cgx_read(cgx
, lmac
->lmac_id
, CGX_COMMAND_REG
);
339 if (FIELD_GET(CMDREG_OWN
, cmd
) != CGX_CMD_OWN_NS
) {
344 /* Update ownership in command request */
345 req
= FIELD_SET(CMDREG_OWN
, CGX_CMD_OWN_FIRMWARE
, req
);
347 /* Mark this lmac as pending, before we start */
348 lmac
->cmd_pend
= true;
350 /* Start command in hardware */
351 cgx_write(cgx
, lmac
->lmac_id
, CGX_COMMAND_REG
, req
);
353 /* Ensure command is completed without errors */
354 if (!wait_event_timeout(lmac
->wq_cmd_cmplt
, !lmac
->cmd_pend
,
355 msecs_to_jiffies(CGX_CMD_TIMEOUT
))) {
356 dev
= &cgx
->pdev
->dev
;
357 dev_err(dev
, "cgx port %d:%d cmd timeout\n",
358 cgx
->cgx_id
, lmac
->lmac_id
);
363 /* we have a valid command response */
364 smp_rmb(); /* Ensure the latest updates are visible */
368 mutex_unlock(&lmac
->cmd_lock
);
373 static inline int cgx_fwi_cmd_generic(u64 req
, u64
*resp
,
374 struct cgx
*cgx
, int lmac_id
)
379 lmac
= lmac_pdata(lmac_id
, cgx
);
383 err
= cgx_fwi_cmd_send(req
, resp
, lmac
);
385 /* Check for valid response */
387 if (FIELD_GET(EVTREG_STAT
, *resp
) == CGX_STAT_FAIL
)
396 static inline void cgx_link_usertable_init(void)
398 cgx_speed_mbps
[CGX_LINK_NONE
] = 0;
399 cgx_speed_mbps
[CGX_LINK_10M
] = 10;
400 cgx_speed_mbps
[CGX_LINK_100M
] = 100;
401 cgx_speed_mbps
[CGX_LINK_1G
] = 1000;
402 cgx_speed_mbps
[CGX_LINK_2HG
] = 2500;
403 cgx_speed_mbps
[CGX_LINK_5G
] = 5000;
404 cgx_speed_mbps
[CGX_LINK_10G
] = 10000;
405 cgx_speed_mbps
[CGX_LINK_20G
] = 20000;
406 cgx_speed_mbps
[CGX_LINK_25G
] = 25000;
407 cgx_speed_mbps
[CGX_LINK_40G
] = 40000;
408 cgx_speed_mbps
[CGX_LINK_50G
] = 50000;
409 cgx_speed_mbps
[CGX_LINK_100G
] = 100000;
411 cgx_lmactype_string
[LMAC_MODE_SGMII
] = "SGMII";
412 cgx_lmactype_string
[LMAC_MODE_XAUI
] = "XAUI";
413 cgx_lmactype_string
[LMAC_MODE_RXAUI
] = "RXAUI";
414 cgx_lmactype_string
[LMAC_MODE_10G_R
] = "10G_R";
415 cgx_lmactype_string
[LMAC_MODE_40G_R
] = "40G_R";
416 cgx_lmactype_string
[LMAC_MODE_QSGMII
] = "QSGMII";
417 cgx_lmactype_string
[LMAC_MODE_25G_R
] = "25G_R";
418 cgx_lmactype_string
[LMAC_MODE_50G_R
] = "50G_R";
419 cgx_lmactype_string
[LMAC_MODE_100G_R
] = "100G_R";
420 cgx_lmactype_string
[LMAC_MODE_USXGMII
] = "USXGMII";
423 static inline void link_status_user_format(u64 lstat
,
424 struct cgx_link_user_info
*linfo
,
425 struct cgx
*cgx
, u8 lmac_id
)
429 linfo
->link_up
= FIELD_GET(RESP_LINKSTAT_UP
, lstat
);
430 linfo
->full_duplex
= FIELD_GET(RESP_LINKSTAT_FDUPLEX
, lstat
);
431 linfo
->speed
= cgx_speed_mbps
[FIELD_GET(RESP_LINKSTAT_SPEED
, lstat
)];
432 linfo
->lmac_type_id
= cgx_get_lmac_type(cgx
, lmac_id
);
433 lmac_string
= cgx_lmactype_string
[linfo
->lmac_type_id
];
434 strncpy(linfo
->lmac_type
, lmac_string
, LMACTYPE_STR_LEN
- 1);
437 /* Hardware event handlers */
438 static inline void cgx_link_change_handler(u64 lstat
,
441 struct cgx_link_user_info
*linfo
;
442 struct cgx
*cgx
= lmac
->cgx
;
443 struct cgx_link_event event
;
447 dev
= &cgx
->pdev
->dev
;
449 link_status_user_format(lstat
, &event
.link_uinfo
, cgx
, lmac
->lmac_id
);
450 err_type
= FIELD_GET(RESP_LINKSTAT_ERRTYPE
, lstat
);
452 event
.cgx_id
= cgx
->cgx_id
;
453 event
.lmac_id
= lmac
->lmac_id
;
455 /* update the local copy of link status */
456 lmac
->link_info
= event
.link_uinfo
;
457 linfo
= &lmac
->link_info
;
459 /* Ensure callback doesn't get unregistered until we finish it */
460 spin_lock(&lmac
->event_cb_lock
);
462 if (!lmac
->event_cb
.notify_link_chg
) {
463 dev_dbg(dev
, "cgx port %d:%d Link change handler null",
464 cgx
->cgx_id
, lmac
->lmac_id
);
465 if (err_type
!= CGX_ERR_NONE
) {
466 dev_err(dev
, "cgx port %d:%d Link error %d\n",
467 cgx
->cgx_id
, lmac
->lmac_id
, err_type
);
469 dev_info(dev
, "cgx port %d:%d Link is %s %d Mbps\n",
470 cgx
->cgx_id
, lmac
->lmac_id
,
471 linfo
->link_up
? "UP" : "DOWN", linfo
->speed
);
475 if (lmac
->event_cb
.notify_link_chg(&event
, lmac
->event_cb
.data
))
476 dev_err(dev
, "event notification failure\n");
478 spin_unlock(&lmac
->event_cb_lock
);
481 static inline bool cgx_cmdresp_is_linkevent(u64 event
)
485 id
= FIELD_GET(EVTREG_ID
, event
);
486 if (id
== CGX_CMD_LINK_BRING_UP
||
487 id
== CGX_CMD_LINK_BRING_DOWN
)
493 static inline bool cgx_event_is_linkevent(u64 event
)
495 if (FIELD_GET(EVTREG_ID
, event
) == CGX_EVT_LINK_CHANGE
)
501 static inline int cgx_fwi_get_mkex_prfl_sz(u64
*prfl_sz
,
508 req
= FIELD_SET(CMDREG_ID
, CGX_CMD_GET_MKEX_PRFL_SIZE
, req
);
509 err
= cgx_fwi_cmd_generic(req
, &resp
, cgx
, 0);
511 *prfl_sz
= FIELD_GET(RESP_MKEX_PRFL_SIZE
, resp
);
516 static inline int cgx_fwi_get_mkex_prfl_addr(u64
*prfl_addr
,
523 req
= FIELD_SET(CMDREG_ID
, CGX_CMD_GET_MKEX_PRFL_ADDR
, req
);
524 err
= cgx_fwi_cmd_generic(req
, &resp
, cgx
, 0);
526 *prfl_addr
= FIELD_GET(RESP_MKEX_PRFL_ADDR
, resp
);
531 int cgx_get_mkex_prfl_info(u64
*addr
, u64
*size
)
539 cgx_dev
= list_first_entry(&cgx_list
, struct cgx
, cgx_list
);
543 err
= cgx_fwi_get_mkex_prfl_sz(size
, cgx_dev
);
547 err
= cgx_fwi_get_mkex_prfl_addr(addr
, cgx_dev
);
553 EXPORT_SYMBOL(cgx_get_mkex_prfl_info
);
555 static irqreturn_t
cgx_fwi_event_handler(int irq
, void *data
)
557 struct lmac
*lmac
= data
;
563 event
= cgx_read(cgx
, lmac
->lmac_id
, CGX_EVENT_REG
);
565 if (!FIELD_GET(EVTREG_ACK
, event
))
568 switch (FIELD_GET(EVTREG_EVT_TYPE
, event
)) {
569 case CGX_EVT_CMD_RESP
:
570 /* Copy the response. Since only one command is active at a
571 * time, there is no way a response can get overwritten
574 /* Ensure response is updated before thread context starts */
577 /* There wont be separate events for link change initiated from
578 * software; Hence report the command responses as events
580 if (cgx_cmdresp_is_linkevent(event
))
581 cgx_link_change_handler(event
, lmac
);
583 /* Release thread waiting for completion */
584 lmac
->cmd_pend
= false;
585 wake_up_interruptible(&lmac
->wq_cmd_cmplt
);
588 if (cgx_event_is_linkevent(event
))
589 cgx_link_change_handler(event
, lmac
);
593 /* Any new event or command response will be posted by firmware
594 * only after the current status is acked.
595 * Ack the interrupt register as well.
597 cgx_write(lmac
->cgx
, lmac
->lmac_id
, CGX_EVENT_REG
, 0);
598 cgx_write(lmac
->cgx
, lmac
->lmac_id
, CGXX_CMRX_INT
, FW_CGX_INT
);
603 /* APIs for PHY management using CGX firmware interface */
605 /* callback registration for hardware events like link change */
606 int cgx_lmac_evh_register(struct cgx_event_cb
*cb
, void *cgxd
, int lmac_id
)
608 struct cgx
*cgx
= cgxd
;
611 lmac
= lmac_pdata(lmac_id
, cgx
);
615 lmac
->event_cb
= *cb
;
619 EXPORT_SYMBOL(cgx_lmac_evh_register
);
621 int cgx_lmac_evh_unregister(void *cgxd
, int lmac_id
)
625 struct cgx
*cgx
= cgxd
;
627 lmac
= lmac_pdata(lmac_id
, cgx
);
631 spin_lock_irqsave(&lmac
->event_cb_lock
, flags
);
632 lmac
->event_cb
.notify_link_chg
= NULL
;
633 lmac
->event_cb
.data
= NULL
;
634 spin_unlock_irqrestore(&lmac
->event_cb_lock
, flags
);
638 EXPORT_SYMBOL(cgx_lmac_evh_unregister
);
640 static int cgx_fwi_link_change(struct cgx
*cgx
, int lmac_id
, bool enable
)
646 req
= FIELD_SET(CMDREG_ID
, CGX_CMD_LINK_BRING_UP
, req
);
648 req
= FIELD_SET(CMDREG_ID
, CGX_CMD_LINK_BRING_DOWN
, req
);
650 return cgx_fwi_cmd_generic(req
, &resp
, cgx
, lmac_id
);
653 static inline int cgx_fwi_read_version(u64
*resp
, struct cgx
*cgx
)
657 req
= FIELD_SET(CMDREG_ID
, CGX_CMD_GET_FW_VER
, req
);
658 return cgx_fwi_cmd_generic(req
, resp
, cgx
, 0);
661 static int cgx_lmac_verify_fwi_version(struct cgx
*cgx
)
663 struct device
*dev
= &cgx
->pdev
->dev
;
664 int major_ver
, minor_ver
;
668 if (!cgx
->lmac_count
)
671 err
= cgx_fwi_read_version(&resp
, cgx
);
675 major_ver
= FIELD_GET(RESP_MAJOR_VER
, resp
);
676 minor_ver
= FIELD_GET(RESP_MINOR_VER
, resp
);
677 dev_dbg(dev
, "Firmware command interface version = %d.%d\n",
678 major_ver
, minor_ver
);
679 if (major_ver
!= CGX_FIRMWARE_MAJOR_VER
||
680 minor_ver
!= CGX_FIRMWARE_MINOR_VER
)
686 static void cgx_lmac_linkup_work(struct work_struct
*work
)
688 struct cgx
*cgx
= container_of(work
, struct cgx
, cgx_cmd_work
);
689 struct device
*dev
= &cgx
->pdev
->dev
;
692 /* Do Link up for all the lmacs */
693 for (i
= 0; i
< cgx
->lmac_count
; i
++) {
694 err
= cgx_fwi_link_change(cgx
, i
, true);
696 dev_info(dev
, "cgx port %d:%d Link up command failed\n",
701 int cgx_lmac_linkup_start(void *cgxd
)
703 struct cgx
*cgx
= cgxd
;
708 queue_work(cgx
->cgx_cmd_workq
, &cgx
->cgx_cmd_work
);
712 EXPORT_SYMBOL(cgx_lmac_linkup_start
);
714 static int cgx_lmac_init(struct cgx
*cgx
)
719 cgx
->lmac_count
= cgx_read(cgx
, 0, CGXX_CMRX_RX_LMACS
) & 0x7;
720 if (cgx
->lmac_count
> MAX_LMAC_PER_CGX
)
721 cgx
->lmac_count
= MAX_LMAC_PER_CGX
;
723 for (i
= 0; i
< cgx
->lmac_count
; i
++) {
724 lmac
= kcalloc(1, sizeof(struct lmac
), GFP_KERNEL
);
727 lmac
->name
= kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL
);
730 sprintf(lmac
->name
, "cgx_fwi_%d_%d", cgx
->cgx_id
, i
);
733 init_waitqueue_head(&lmac
->wq_cmd_cmplt
);
734 mutex_init(&lmac
->cmd_lock
);
735 spin_lock_init(&lmac
->event_cb_lock
);
736 err
= request_irq(pci_irq_vector(cgx
->pdev
,
737 CGX_LMAC_FWI
+ i
* 9),
738 cgx_fwi_event_handler
, 0, lmac
->name
, lmac
);
742 /* Enable interrupt */
743 cgx_write(cgx
, lmac
->lmac_id
, CGXX_CMRX_INT_ENA_W1S
,
747 cgx
->lmac_idmap
[i
] = lmac
;
750 return cgx_lmac_verify_fwi_version(cgx
);
753 static int cgx_lmac_exit(struct cgx
*cgx
)
758 if (cgx
->cgx_cmd_workq
) {
759 flush_workqueue(cgx
->cgx_cmd_workq
);
760 destroy_workqueue(cgx
->cgx_cmd_workq
);
761 cgx
->cgx_cmd_workq
= NULL
;
764 /* Free all lmac related resources */
765 for (i
= 0; i
< cgx
->lmac_count
; i
++) {
766 lmac
= cgx
->lmac_idmap
[i
];
769 free_irq(pci_irq_vector(cgx
->pdev
, CGX_LMAC_FWI
+ i
* 9), lmac
);
777 static int cgx_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
779 struct device
*dev
= &pdev
->dev
;
783 cgx
= devm_kzalloc(dev
, sizeof(*cgx
), GFP_KERNEL
);
788 pci_set_drvdata(pdev
, cgx
);
790 err
= pci_enable_device(pdev
);
792 dev_err(dev
, "Failed to enable PCI device\n");
793 pci_set_drvdata(pdev
, NULL
);
797 err
= pci_request_regions(pdev
, DRV_NAME
);
799 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
800 goto err_disable_device
;
803 /* MAP configuration registers */
804 cgx
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
805 if (!cgx
->reg_base
) {
806 dev_err(dev
, "CGX: Cannot map CSR memory space, aborting\n");
808 goto err_release_regions
;
812 err
= pci_alloc_irq_vectors(pdev
, nvec
, nvec
, PCI_IRQ_MSIX
);
813 if (err
< 0 || err
!= nvec
) {
814 dev_err(dev
, "Request for %d msix vectors failed, err %d\n",
816 goto err_release_regions
;
819 cgx
->cgx_id
= (pci_resource_start(pdev
, PCI_CFG_REG_BAR_NUM
) >> 24)
822 /* init wq for processing linkup requests */
823 INIT_WORK(&cgx
->cgx_cmd_work
, cgx_lmac_linkup_work
);
824 cgx
->cgx_cmd_workq
= alloc_workqueue("cgx_cmd_workq", 0, 0);
825 if (!cgx
->cgx_cmd_workq
) {
826 dev_err(dev
, "alloc workqueue failed for cgx cmd");
828 goto err_release_regions
;
831 list_add(&cgx
->cgx_list
, &cgx_list
);
833 cgx_link_usertable_init();
835 err
= cgx_lmac_init(cgx
);
837 goto err_release_lmac
;
843 list_del(&cgx
->cgx_list
);
845 pci_release_regions(pdev
);
847 pci_disable_device(pdev
);
848 pci_set_drvdata(pdev
, NULL
);
852 static void cgx_remove(struct pci_dev
*pdev
)
854 struct cgx
*cgx
= pci_get_drvdata(pdev
);
857 list_del(&cgx
->cgx_list
);
858 pci_free_irq_vectors(pdev
);
859 pci_release_regions(pdev
);
860 pci_disable_device(pdev
);
861 pci_set_drvdata(pdev
, NULL
);
864 struct pci_driver cgx_driver
= {
866 .id_table
= cgx_id_table
,
868 .remove
= cgx_remove
,