u8 cgx_id;
u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;
};
/* Convert firmware lmac type encoding to string */
static char *cgx_lmactype_string[LMAC_MODE_MAX];
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
}
EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 req = 0;
+ u64 resp;
+
+ if (enable)
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+ else
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
u64 req = 0;
return 0;
}
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+ struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+ struct device *dev = &cgx->pdev->dev;
+ int i, err;
+
+ /* Do Link up for all the lmacs */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ err = cgx_fwi_link_change(cgx, i, true);
+ if (err)
+ dev_info(dev, "cgx port %d:%d Link up command failed\n",
+ cgx->cgx_id, i);
+ }
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_linkup_start);
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
struct lmac *lmac;
int i;
+ if (cgx->cgx_cmd_workq) {
+ flush_workqueue(cgx->cgx_cmd_workq);
+ destroy_workqueue(cgx->cgx_cmd_workq);
+ cgx->cgx_cmd_workq = NULL;
+ }
+
/* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) {
lmac = cgx->lmac_idmap[i];
cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
& CGX_ID_MASK;
+ /* init wq for processing linkup requests */
+ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ if (!cgx->cgx_cmd_workq) {
+ dev_err(dev, "alloc workqueue failed for cgx cmd");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
list_add(&cgx->cgx_list, &cgx_list);
cgx_link_usertable_init();
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
int rvu_cgx_init(struct rvu *rvu)
{
int cgx, err;
+ void *cgxd;
/* CGX port id starts from 0 and are not necessarily contiguous
* Hence we allocate resources based on the maximum port id value.
if (err)
return err;
+ /* Ensure event handler registration is completed, before
+ * we turn on the links
+ */
+ mb();
+
+ /* Do link up for all CGX ports */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ err = cgx_lmac_linkup_start(cgxd);
+ if (err)
+ dev_err(rvu->dev,
+ "Link up process failed to start on cgx %d\n",
+ cgx);
+ }
+
return 0;
}