]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
net: hns3: Add mtu setting support for vf
authorYunsheng Lin <linyunsheng@huawei.com>
Sun, 18 Nov 2018 03:19:13 +0000 (03:19 +0000)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Mon, 14 Jan 2019 09:28:55 +0000 (09:28 +0000)
BugLink: https://bugs.launchpad.net/bugs/1810457
The patch adds mtu setting support for vf, currently
vf and pf share the same hardware mtu setting. Mtu set
by vf must be less than or equal to pf' mtu, and mtu
set by pf must be greater than or equal to vf' mtu.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Jian Shen <shenjian15@huawei.com>
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(backported from commit 818f167587f402aedcf406ba57d0caff739dcad8)
[ dannf: Trivial context fix in hclgevf_main.c ]
Signed-off-by: dann frazier <dann.frazier@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c

index fd2338f0c34ecba3030f24b698c2fb4517a00848..4d9cf39da48c7abc77fd7fa734770db8719d699f 100644 (file)
@@ -38,6 +38,7 @@ enum HCLGE_MBX_OPCODE {
        HCLGE_MBX_QUEUE_RESET,          /* (VF -> PF) reset queue */
        HCLGE_MBX_KEEP_ALIVE,           /* (VF -> PF) send keep alive cmd */
        HCLGE_MBX_SET_ALIVE,            /* (VF -> PF) set alive state */
+       HCLGE_MBX_SET_MTU,              /* (VF -> PF) set mtu */
 };
 
 /* below are per-VF mac-vlan subcodes */
index 2ab0d6a415e8207128da4b2a268f72c4680a9bfc..9ff026db2825f17da03f5169234fa147300e0c63 100644 (file)
@@ -1166,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
        for (i = 0; i < num_vport; i++) {
                vport->back = hdev;
                vport->vport_id = i;
+               vport->mps = HCLGE_MAC_DEFAULT_FRAME;
 
                if (i == 0)
                        ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -2921,6 +2922,10 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
 
                if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
                        clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+               /* If vf is not alive, set to default value */
+               if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+                       vport->mps = HCLGE_MAC_DEFAULT_FRAME;
        }
 }
 
@@ -6400,8 +6405,6 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
        struct hclge_config_max_frm_size_cmd *req;
        struct hclge_desc desc;
 
-       new_mps = max(new_mps, HCLGE_MAC_DEFAULT_FRAME);
-
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
 
        req = (struct hclge_config_max_frm_size_cmd *)desc.data;
@@ -6414,28 +6417,56 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
+
+       return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
        struct hclge_dev *hdev = vport->back;
-       int max_frm_size, ret;
+       int i, max_frm_size, ret = 0;
 
        max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
        if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
            max_frm_size > HCLGE_MAC_MAX_FRAME)
                return -EINVAL;
 
+       max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+       mutex_lock(&hdev->vport_lock);
+       /* VF's mps must fit within hdev->mps */
+       if (vport->vport_id && max_frm_size > hdev->mps) {
+               mutex_unlock(&hdev->vport_lock);
+               return -EINVAL;
+       } else if (vport->vport_id) {
+               vport->mps = max_frm_size;
+               mutex_unlock(&hdev->vport_lock);
+               return 0;
+       }
+
+       /* PF's mps must be greater then VF's mps */
+       for (i = 1; i < hdev->num_alloc_vport; i++)
+               if (max_frm_size < hdev->vport[i].mps) {
+                       mutex_unlock(&hdev->vport_lock);
+                       return -EINVAL;
+               }
+
        ret = hclge_set_mac_mtu(hdev, max_frm_size);
        if (ret) {
                dev_err(&hdev->pdev->dev,
                        "Change mtu fail, ret =%d\n", ret);
-               return ret;
+               goto out;
        }
 
        hdev->mps = max_frm_size;
+       vport->mps = max_frm_size;
 
        ret = hclge_buffer_alloc(hdev);
        if (ret)
                dev_err(&hdev->pdev->dev,
                        "Allocate buffer fail, ret =%d\n", ret);
 
+out:
+       mutex_unlock(&hdev->vport_lock);
        return ret;
 }
 
@@ -7064,6 +7095,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        ae_dev->priv = hdev;
        hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
 
+       mutex_init(&hdev->vport_lock);
+
        ret = hclge_pci_init(hdev);
        if (ret) {
                dev_err(&pdev->dev, "PCI init failed\n");
@@ -7363,6 +7396,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        hclge_destroy_cmd_queue(&hdev->hw);
        hclge_misc_irq_uninit(hdev);
        hclge_pci_uninit(hdev);
+       mutex_destroy(&hdev->vport_lock);
        ae_dev->priv = NULL;
 }
 
index 5617770c9eb8b4d45f65b48bd5b9a75e29215da3..5f24dd41d7eb292871170fa0a0bc5f294bc14147 100644 (file)
@@ -678,6 +678,8 @@ struct hclge_dev {
 
        u32 pkt_buf_size; /* Total pf buf size for tx/rx */
        u32 mps; /* Max packet size */
+       /* vport_lock protect resource shared by vports */
+       struct mutex vport_lock;
 
        struct hclge_vlan_type_cfg vlan_type_cfg;
 
@@ -761,6 +763,7 @@ struct hclge_vport {
 
        unsigned long state;
        unsigned long last_active_jiffies;
+       u32 mps; /* Max packet size */
 };
 
 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -810,4 +813,5 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev);
 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
 int hclge_vport_start(struct hclge_vport *vport);
 void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
 #endif
index b64f4424837db91b29a17cc55f403df718f03015..e16a730a5f5452485c23990954d2a1097cc70831 100644 (file)
@@ -401,6 +401,18 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport,
        vport->last_active_jiffies = jiffies;
 }
 
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+                           struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+       int ret;
+       u32 mtu;
+
+       memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+       ret = hclge_set_vport_mtu(vport, mtu);
+
+       return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
 static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
 {
        u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
@@ -515,6 +527,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                case HCLGE_MBX_KEEP_ALIVE:
                        hclge_vf_keep_alive(vport, req);
                        break;
+               case HCLGE_MBX_SET_MTU:
+                       ret = hclge_set_vf_mtu(vport, req);
+                       if (ret)
+                               dev_err(&hdev->pdev->dev,
+                                       "VF fail(%d) to set mtu\n", ret);
+                       break;
                default:
                        dev_err(&hdev->pdev->dev,
                                "un-supported mailbox message, code = %d\n",
index 646b92de5623047567a5842decc76a320c8d16a8..355b1e50ce9e5048514b5ad7a07eda2b47315f13 100644 (file)
@@ -1081,6 +1081,14 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
                                    2, true, NULL, 0);
 }
 
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+       struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+       return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+                                   sizeof(new_mtu), true, NULL, 0);
+}
+
 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
                                 enum hnae3_reset_notify_type type)
 {
@@ -2505,6 +2513,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
        .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
        .ae_dev_resetting = hclgevf_ae_dev_resetting,
        .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+       .set_mtu = hclgevf_set_mtu,
 };
 
 static struct hnae3_ae_algo ae_algovf = {