]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: Refine the MSIX allocation for PF
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
index 18027bc543c152781ebfff7a7651eb204a17d9d4..2c309dec01f3328376db019bee3c2c92b1c3e228 100644 (file)
@@ -1,11 +1,5 @@
-/*
- * Copyright (c) 2016-2017 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
 
 #include <linux/acpi.h>
 #include <linux/device.h>
@@ -793,9 +787,10 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
                    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
                        count += 1;
                        handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
-               } else {
-                       count = -EOPNOTSUPP;
                }
+
+               count++;
+               handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
        } else if (stringset == ETH_SS_STATS) {
                count = ARRAY_SIZE(g_mac_stats_string) +
                        ARRAY_SIZE(g_all_32bit_stats_string) +
@@ -938,18 +933,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
        hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
 
        if (hnae3_dev_roce_supported(hdev)) {
+               hdev->roce_base_msix_offset =
+               hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+                               HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
                hdev->num_roce_msi =
-               hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
-                              HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+               hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+                               HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
 
                /* PF should have NIC vectors and Roce vectors,
                 * NIC vectors are queued before Roce vectors.
                 */
-               hdev->num_msi = hdev->num_roce_msi  + HCLGE_ROCE_VECTOR_OFFSET;
+               hdev->num_msi = hdev->num_roce_msi  +
+                               hdev->roce_base_msix_offset;
        } else {
                hdev->num_msi =
-               hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
-                              HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+               hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
+                               HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
        }
 
        return 0;
@@ -1038,38 +1037,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
        req = (struct hclge_cfg_param_cmd *)desc[0].data;
 
        /* get the configuration */
-       cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
-                                            HCLGE_CFG_VMDQ_M,
-                                            HCLGE_CFG_VMDQ_S);
-       cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
-                                    HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
-       cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
-                                          HCLGE_CFG_TQP_DESC_N_M,
-                                          HCLGE_CFG_TQP_DESC_N_S);
-
-       cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                      HCLGE_CFG_PHY_ADDR_M,
-                                      HCLGE_CFG_PHY_ADDR_S);
-       cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                        HCLGE_CFG_MEDIA_TP_M,
-                                        HCLGE_CFG_MEDIA_TP_S);
-       cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                        HCLGE_CFG_RX_BUF_LEN_M,
-                                        HCLGE_CFG_RX_BUF_LEN_S);
+       cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+                                             HCLGE_CFG_VMDQ_M,
+                                             HCLGE_CFG_VMDQ_S);
+       cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+                                     HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+       cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+                                           HCLGE_CFG_TQP_DESC_N_M,
+                                           HCLGE_CFG_TQP_DESC_N_S);
+
+       cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                       HCLGE_CFG_PHY_ADDR_M,
+                                       HCLGE_CFG_PHY_ADDR_S);
+       cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                         HCLGE_CFG_MEDIA_TP_M,
+                                         HCLGE_CFG_MEDIA_TP_S);
+       cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                         HCLGE_CFG_RX_BUF_LEN_M,
+                                         HCLGE_CFG_RX_BUF_LEN_S);
        /* get mac_address */
        mac_addr_tmp = __le32_to_cpu(req->param[2]);
-       mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
-                                          HCLGE_CFG_MAC_ADDR_H_M,
-                                          HCLGE_CFG_MAC_ADDR_H_S);
+       mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+                                           HCLGE_CFG_MAC_ADDR_H_M,
+                                           HCLGE_CFG_MAC_ADDR_H_S);
 
        mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
 
-       cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
-                                           HCLGE_CFG_DEFAULT_SPEED_M,
-                                           HCLGE_CFG_DEFAULT_SPEED_S);
-       cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
-                                          HCLGE_CFG_RSS_SIZE_M,
-                                          HCLGE_CFG_RSS_SIZE_S);
+       cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+                                            HCLGE_CFG_DEFAULT_SPEED_M,
+                                            HCLGE_CFG_DEFAULT_SPEED_S);
+       cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+                                           HCLGE_CFG_RSS_SIZE_M,
+                                           HCLGE_CFG_RSS_SIZE_S);
 
        for (i = 0; i < ETH_ALEN; i++)
                cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
@@ -1077,9 +1076,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
        req = (struct hclge_cfg_param_cmd *)desc[1].data;
        cfg->numa_node_map = __le32_to_cpu(req->param[0]);
 
-       cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]),
-                                           HCLGE_CFG_SPEED_ABILITY_M,
-                                           HCLGE_CFG_SPEED_ABILITY_S);
+       cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+                                            HCLGE_CFG_SPEED_ABILITY_M,
+                                            HCLGE_CFG_SPEED_ABILITY_S);
 }
 
 /* hclge_get_cfg: query the static parameter from flash
@@ -1098,22 +1097,22 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
                req = (struct hclge_cfg_param_cmd *)desc[i].data;
                hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
                                           true);
-               hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
-                              HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+               hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+                               HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
                /* Len should be united by 4 bytes when send to hardware */
-               hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
-                              HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+               hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+                               HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
                req->offset = cpu_to_le32(offset);
        }
 
        ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
        if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "get config failed %d.\n", ret);
+               dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
                return ret;
        }
 
        hclge_parse_cfg(hcfg, desc);
+
        return 0;
 }
 
@@ -1130,13 +1129,10 @@ static int hclge_get_cap(struct hclge_dev *hdev)
 
        /* get pf resource */
        ret = hclge_query_pf_resource(hdev);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "query pf resource error %d.\n", ret);
-               return ret;
-       }
+       if (ret)
+               dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
 
-       return 0;
+       return ret;
 }
 
 static int hclge_configure(struct hclge_dev *hdev)
@@ -1189,7 +1185,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        /* Currently not support uncontiuous tc */
        for (i = 0; i < hdev->tm_info.num_tc; i++)
-               hnae_set_bit(hdev->hw_tc_map, i, 1);
+               hnae3_set_bit(hdev->hw_tc_map, i, 1);
 
        hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
 
@@ -1208,13 +1204,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
        req = (struct hclge_cfg_tso_status_cmd *)desc.data;
 
        tso_mss = 0;
-       hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
-                      HCLGE_TSO_MSS_MIN_S, tso_mss_min);
+       hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+                       HCLGE_TSO_MSS_MIN_S, tso_mss_min);
        req->tso_mss_min = cpu_to_le16(tso_mss);
 
        tso_mss = 0;
-       hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
-                      HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+       hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
+                       HCLGE_TSO_MSS_MIN_S, tso_mss_max);
        req->tso_mss_max = cpu_to_le16(tso_mss);
 
        return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1265,13 +1261,10 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
        req->tqp_vid = cpu_to_le16(tqp_vid);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
-               dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
-                       ret);
-               return ret;
-       }
+       if (ret)
+               dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
 
-       return 0;
+       return ret;
 }
 
 static int  hclge_assign_tqp(struct hclge_vport *vport,
@@ -1330,12 +1323,10 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
                return -ENOMEM;
 
        ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
-               return -EINVAL;
-       }
 
-       return 0;
+       return ret;
 }
 
 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
@@ -1487,13 +1478,11 @@ static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
        }
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
                        ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
@@ -1501,13 +1490,10 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
 {
        int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
 
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "tx buffer alloc failed %d\n", ret);
-               return ret;
-       }
+       if (ret)
+               dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
 
-       return 0;
+       return ret;
 }
 
 static int hclge_get_tc_num(struct hclge_dev *hdev)
@@ -1825,17 +1811,13 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
                            (1 << HCLGE_TC0_PRI_BUF_EN_B));
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "rx private buffer alloc cmd failed %d\n", ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
-#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-
 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
                                   struct hclge_pkt_buf_alloc *buf_alloc)
 {
@@ -1863,25 +1845,21 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
                        req->tc_wl[j].high =
                                cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
                        req->tc_wl[j].high |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                               cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                        req->tc_wl[j].low =
                                cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
                        req->tc_wl[j].low |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                }
        }
 
        /* Send 2 descriptor at one time */
        ret = hclge_cmd_send(&hdev->hw, desc, 2);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "rx private waterline config cmd failed %d\n",
                        ret);
-               return ret;
-       }
-       return 0;
+       return ret;
 }
 
 static int hclge_common_thrd_config(struct hclge_dev *hdev,
@@ -1911,24 +1889,20 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
                        req->com_thrd[j].high =
                                cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
                        req->com_thrd[j].high |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                        req->com_thrd[j].low =
                                cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
                        req->com_thrd[j].low |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                }
        }
 
        /* Send 2 descriptors at one time */
        ret = hclge_cmd_send(&hdev->hw, desc, 2);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "common threshold config cmd failed %d\n", ret);
-               return ret;
-       }
-       return 0;
+       return ret;
 }
 
 static int hclge_common_wl_config(struct hclge_dev *hdev,
@@ -1943,23 +1917,17 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
 
        req = (struct hclge_rx_com_wl *)desc.data;
        req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
-       req->com_wl.high |=
-               cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
-                           HCLGE_RX_PRIV_EN_B);
+       req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
 
        req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
-       req->com_wl.low |=
-               cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
-                           HCLGE_RX_PRIV_EN_B);
+       req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "common waterline config cmd failed %d\n", ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 int hclge_buffer_alloc(struct hclge_dev *hdev)
@@ -2074,7 +2042,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
        hdev->num_msi_left = vectors;
        hdev->base_msi_vector = pdev->irq;
        hdev->roce_base_vector = hdev->base_msi_vector +
-                               HCLGE_ROCE_VECTOR_OFFSET;
+                               hdev->roce_base_msix_offset;
 
        hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
                                           sizeof(u16), GFP_KERNEL);
@@ -2118,48 +2086,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
 
-       hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
+       hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
 
        switch (speed) {
        case HCLGE_MAC_SPEED_10M:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 6);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 6);
                break;
        case HCLGE_MAC_SPEED_100M:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 7);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 7);
                break;
        case HCLGE_MAC_SPEED_1G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 0);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 0);
                break;
        case HCLGE_MAC_SPEED_10G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 1);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 1);
                break;
        case HCLGE_MAC_SPEED_25G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 2);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 2);
                break;
        case HCLGE_MAC_SPEED_40G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 3);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 3);
                break;
        case HCLGE_MAC_SPEED_50G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 4);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 4);
                break;
        case HCLGE_MAC_SPEED_100G:
-               hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
-                              HCLGE_CFG_SPEED_S, 5);
+               hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+                               HCLGE_CFG_SPEED_S, 5);
                break;
        default:
                dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
                return -EINVAL;
        }
 
-       hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
-                    1);
+       hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+                     1);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
@@ -2201,18 +2169,16 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
                return ret;
        }
 
-       *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
-       speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
-                                  HCLGE_QUERY_SPEED_S);
+       *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
+       speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
+                                   HCLGE_QUERY_SPEED_S);
 
        ret = hclge_parse_speed(speed_tmp, speed);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "could not parse speed(=%d), %d\n", speed_tmp, ret);
-               return -EIO;
-       }
 
-       return 0;
+       return ret;
 }
 
 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2225,17 +2191,15 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
 
        req = (struct hclge_config_auto_neg_cmd *)desc.data;
-       hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+       hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
        req->cfg_an_cmd_flag = cpu_to_le32(flag);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
                        ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
@@ -2269,8 +2233,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
        req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
 
-       hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
-                    mask_vlan ? 1 : 0);
+       hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+                     mask_vlan ? 1 : 0);
        ether_addr_copy(req->mac_mask, mac_mask);
 
        status = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2288,8 +2252,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
        struct net_device *netdev = handle->kinfo.netdev;
        struct hclge_mac *mac = &hdev->hw.mac;
        u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+       struct hclge_vport *vport;
        int mtu;
        int ret;
+       int i;
 
        ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
        if (ret) {
@@ -2301,7 +2267,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
        mac->link = 0;
 
        /* Initialize the MTA table work mode */
-       hdev->accept_mta_mc     = true;
        hdev->enable_mta        = true;
        hdev->mta_mac_sel_type  = HCLGE_MAC_ADDR_47_36;
 
@@ -2314,11 +2279,17 @@ static int hclge_mac_init(struct hclge_dev *hdev)
                return ret;
        }
 
-       ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "set mta filter mode fail ret=%d\n", ret);
-               return ret;
+       for (i = 0; i < hdev->num_alloc_vport; i++) {
+               vport = &hdev->vport[i];
+               vport->accept_mta_mc = false;
+
+               memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
+               ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "set mta filter mode fail ret=%d\n", ret);
+                       return ret;
+               }
        }
 
        ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
@@ -2334,13 +2305,11 @@ static int hclge_mac_init(struct hclge_dev *hdev)
                mtu = ETH_DATA_LEN;
 
        ret = hclge_set_mtu(handle, mtu);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "set mtu failed ret=%d\n", ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -2379,7 +2348,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
        }
 
        req = (struct hclge_link_status_cmd *)desc.data;
-       link_status = req->status & HCLGE_LINK_STATUS;
+       link_status = req->status & HCLGE_LINK_STATUS_UP_M;
 
        return !!link_status;
 }
@@ -2407,7 +2376,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
 
 static void hclge_update_link_status(struct hclge_dev *hdev)
 {
+       struct hnae3_client *rclient = hdev->roce_client;
        struct hnae3_client *client = hdev->nic_client;
+       struct hnae3_handle *rhandle;
        struct hnae3_handle *handle;
        int state;
        int i;
@@ -2419,6 +2390,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
                for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
                        handle = &hdev->vport[i].nic;
                        client->ops->link_status_change(handle, state);
+                       rhandle = &hdev->vport[i].roce;
+                       if (rclient && rclient->ops->link_status_change)
+                               rclient->ops->link_status_change(rhandle,
+                                                                state);
                }
                hdev->hw.mac.link = state;
        }
@@ -2498,7 +2473,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
        u32 cmdq_src_reg;
 
        /* fetch the events from their corresponding regs */
-       rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+       rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
        cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
 
        /* Assumption: If by any chance reset and mailbox events are reported
@@ -2510,12 +2485,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
 
        /* check for vector0 reset event sources */
        if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
                set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
                *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
                return HCLGE_VECTOR0_EVENT_RST;
        }
 
        if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
                set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
                *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
                return HCLGE_VECTOR0_EVENT_RST;
@@ -2550,6 +2527,15 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
        }
 }
 
+static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
+{
+       hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
+                               BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
+                               BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
+                               BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
+       hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
+}
+
 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
 {
        writel(enable ? 1 : 0, vector->addr);
@@ -2580,22 +2566,30 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
                 * mbx messages reported by this interrupt.
                 */
                hclge_mbx_task_schedule(hdev);
-
+               break;
        default:
-               dev_dbg(&hdev->pdev->dev,
-                       "received unknown or unhandled event of vector0\n");
+               dev_warn(&hdev->pdev->dev,
+                        "received unknown or unhandled event of vector0\n");
                break;
        }
 
-       /* we should clear the source of interrupt */
-       hclge_clear_event_cause(hdev, event_cause, clearval);
-       hclge_enable_vector(&hdev->misc_vector, true);
+       /* clear the source of interrupt if it is not cause by reset */
+       if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
+               hclge_clear_event_cause(hdev, event_cause, clearval);
+               hclge_enable_vector(&hdev->misc_vector, true);
+       }
 
        return IRQ_HANDLED;
 }
 
 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
 {
+       if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+               dev_warn(&hdev->pdev->dev,
+                        "vector(vector_id %d) has been freed.\n", vector_id);
+               return;
+       }
+
        hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
        hdev->num_msi_left += 1;
        hdev->num_msi_used -= 1;
@@ -2642,23 +2636,53 @@ static int hclge_notify_client(struct hclge_dev *hdev,
                               enum hnae3_reset_notify_type type)
 {
        struct hnae3_client *client = hdev->nic_client;
+       struct hnae3_handle *handle;
+       int ret;
        u16 i;
 
        if (!client->ops->reset_notify)
                return -EOPNOTSUPP;
 
        for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
-               struct hnae3_handle *handle = &hdev->vport[i].nic;
-               int ret;
-
+               handle = &hdev->vport[i].nic;
                ret = client->ops->reset_notify(handle, type);
-               if (ret)
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "notify nic client failed %d", ret);
                        return ret;
+               }
        }
 
        return 0;
 }
 
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+                                   enum hnae3_reset_notify_type type)
+{
+       struct hnae3_client *client = hdev->roce_client;
+       struct hnae3_handle *handle;
+       int ret = 0;
+       u16 i;
+
+       if (!client)
+               return 0;
+
+       if (!client->ops->reset_notify)
+               return -EOPNOTSUPP;
+
+       for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+               handle = &hdev->vport[i].roce;
+               ret = client->ops->reset_notify(handle, type);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "notify roce client failed %d", ret);
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
 static int hclge_reset_wait(struct hclge_dev *hdev)
 {
 #define HCLGE_RESET_WATI_MS    100
@@ -2687,7 +2711,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
        }
 
        val = hclge_read_dev(&hdev->hw, reg);
-       while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+       while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
                msleep(HCLGE_RESET_WATI_MS);
                val = hclge_read_dev(&hdev->hw, reg);
                cnt++;
@@ -2709,8 +2733,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
        int ret;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
-       hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
-       hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+       hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
        req->fun_reset_vfid = func_id;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2729,13 +2752,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
        switch (hdev->reset_type) {
        case HNAE3_GLOBAL_RESET:
                val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
-               hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+               hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
                hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
                dev_info(&pdev->dev, "Global Reset requested\n");
                break;
        case HNAE3_CORE_RESET:
                val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
-               hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
+               hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
                hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
                dev_info(&pdev->dev, "Core Reset requested\n");
                break;
@@ -2777,18 +2800,50 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
        return rst_level;
 }
 
+static void hclge_clear_reset_cause(struct hclge_dev *hdev)
+{
+       u32 clearval = 0;
+
+       switch (hdev->reset_type) {
+       case HNAE3_IMP_RESET:
+               clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+               break;
+       case HNAE3_GLOBAL_RESET:
+               clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+               break;
+       case HNAE3_CORE_RESET:
+               clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+               break;
+       default:
+               break;
+       }
+
+       if (!clearval)
+               return;
+
+       hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+       hclge_enable_vector(&hdev->misc_vector, true);
+}
+
 static void hclge_reset(struct hclge_dev *hdev)
 {
+       struct hnae3_handle *handle;
+
        /* perform reset of the stack & ae device for a client */
+       handle = &hdev->vport[0].nic;
 
+       hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+       hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+       rtnl_lock();
        hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
 
        if (!hclge_reset_wait(hdev)) {
-               rtnl_lock();
                hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
                hclge_reset_ae_dev(hdev->ae_dev);
                hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
-               rtnl_unlock();
+
+               hclge_clear_reset_cause(hdev);
        } else {
                /* schedule again to check pending resets later */
                set_bit(hdev->reset_type, &hdev->reset_pending);
@@ -2796,6 +2851,11 @@ static void hclge_reset(struct hclge_dev *hdev)
        }
 
        hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+       handle->last_reset_time = jiffies;
+       rtnl_unlock();
+
+       hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+       hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
 }
 
 static void hclge_reset_event(struct hnae3_handle *handle)
@@ -2808,8 +2868,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
         * know this if last reset request did not occur very recently (watchdog
         * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
         * In case of new request we reset the "reset level" to PF reset.
+        * And if it is a repeat reset request of the most recent one then we
+        * want to make sure we throttle the reset request. Therefore, we will
+        * not allow it again before 3*HZ times.
         */
-       if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
+       if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+               return;
+       else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
                handle->reset_level = HNAE3_FUNC_RESET;
 
        dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
@@ -2821,8 +2886,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
 
        if (handle->reset_level < HNAE3_GLOBAL_RESET)
                handle->reset_level++;
-
-       handle->last_reset_time = jiffies;
 }
 
 static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -3063,23 +3126,21 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
        for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
                u16 mode = 0;
 
-               hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
-               hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
-                              HCLGE_RSS_TC_SIZE_S, tc_size[i]);
-               hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
-                              HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+               hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+               hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
+                               HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+               hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
+                               HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
 
                req->rss_tc_mode[i] = cpu_to_le16(mode);
        }
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "Configure rss tc mode fail, status = %d\n", ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
@@ -3102,13 +3163,10 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
        req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
        req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "Configure rss input fail, status = %d\n", ret);
-               return ret;
-       }
-
-       return 0;
+       return ret;
 }
 
 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
@@ -3444,16 +3502,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
        i = 0;
        for (node = ring_chain; node; node = node->next) {
                tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
-               hnae_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
-                              HCLGE_INT_TYPE_S,
-                              hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-               hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
-                              HCLGE_TQP_ID_S, node->tqp_index);
-               hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
-                              HCLGE_INT_GL_IDX_S,
-                              hnae_get_field(node->int_gl_idx,
-                                             HNAE3_RING_GL_IDX_M,
-                                             HNAE3_RING_GL_IDX_S));
+               hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
+                               HCLGE_INT_TYPE_S,
+                               hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+               hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+                               HCLGE_TQP_ID_S, node->tqp_index);
+               hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+                               HCLGE_INT_GL_IDX_S,
+                               hnae3_get_field(node->int_gl_idx,
+                                               HNAE3_RING_GL_IDX_M,
+                                               HNAE3_RING_GL_IDX_S));
                req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
                if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
                        req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -3556,12 +3614,11 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
                HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "Set promisc mode fail, status is %d.\n", ret);
-               return ret;
-       }
-       return 0;
+
+       return ret;
 }
 
 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -3601,20 +3658,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
        int ret;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
-       hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
-       hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
-       hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+       hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
        req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3642,7 +3699,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
 
        /* 2 Then setup the loopback flag */
        loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
-       hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+       hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
 
        req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
 
@@ -3657,6 +3714,55 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
        return ret;
 }
 
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
+{
+#define HCLGE_SERDES_RETRY_MS  10
+#define HCLGE_SERDES_RETRY_NUM 100
+       struct hclge_serdes_lb_cmd *req;
+       struct hclge_desc desc;
+       int ret, i = 0;
+
+       req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+
+       if (en) {
+               req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+               req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+       } else {
+               req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+       }
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "serdes loopback set fail, ret = %d\n", ret);
+               return ret;
+       }
+
+       do {
+               msleep(HCLGE_SERDES_RETRY_MS);
+               hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+                                          true);
+               ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "serdes loopback get, ret = %d\n", ret);
+                       return ret;
+               }
+       } while (++i < HCLGE_SERDES_RETRY_NUM &&
+                !(req->result & HCLGE_CMD_SERDES_DONE_B));
+
+       if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
+               dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+               return -EBUSY;
+       } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
+               dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
 static int hclge_set_loopback(struct hnae3_handle *handle,
                              enum hnae3_loop loop_mode, bool en)
 {
@@ -3668,6 +3774,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
        case HNAE3_MAC_INTER_LOOP_MAC:
                ret = hclge_set_mac_loopback(hdev, en);
                break;
+       case HNAE3_MAC_INTER_LOOP_SERDES:
+               ret = hclge_set_serdes_loopback(hdev, en);
+               break;
        default:
                ret = -ENOTSUPP;
                dev_err(&hdev->pdev->dev,
@@ -3730,9 +3839,6 @@ static int hclge_ae_start(struct hnae3_handle *handle)
        /* reset tqp stats */
        hclge_reset_tqp_stats(handle);
 
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
-               return 0;
-
        ret = hclge_mac_start_phy(hdev);
        if (ret)
                return ret;
@@ -3748,9 +3854,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
 
        del_timer_sync(&hdev->service_timer);
        cancel_work_sync(&hdev->service_task);
+       clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
 
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+               hclge_mac_stop_phy(hdev);
                return;
+       }
 
        for (i = 0; i < vport->alloc_tqps; i++)
                hclge_tqp_enable(hdev, i, 0, false);
@@ -3906,20 +4015,18 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
        req = (struct hclge_mta_filter_mode_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
 
-       hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
-                    enable);
-       hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
-                      HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
+       hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
+                     enable);
+       hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
+                       HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "Config mat filter mode failed for cmd_send, ret =%d.\n",
                        ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
@@ -3933,19 +4040,17 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
        req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
 
-       hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
-                    enable);
+       hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
+                     enable);
        req->function_id = func_id;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev,
                        "Config func_id enable failed for cmd_send, ret =%d.\n",
                        ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 static int hclge_set_mta_table_item(struct hclge_vport *vport,
@@ -3960,10 +4065,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
 
        req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
-       hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
+       hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
 
-       hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
-                      HCLGE_CFG_MTA_ITEM_IDX_S, idx);
+       hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+                       HCLGE_CFG_MTA_ITEM_IDX_S, idx);
        req->item_idx = cpu_to_le16(item_idx);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3974,9 +4079,88 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
                return ret;
        }
 
+       if (enable)
+               set_bit(idx, vport->mta_shadow);
+       else
+               clear_bit(idx, vport->mta_shadow);
+
        return 0;
 }
 
+static int hclge_update_mta_status(struct hnae3_handle *handle)
+{
+       unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct net_device *netdev = handle->kinfo.netdev;
+       struct netdev_hw_addr *ha;
+       u16 tbl_idx;
+
+       memset(mta_status, 0, sizeof(mta_status));
+
+       /* update mta_status from mc addr list */
+       netdev_for_each_mc_addr(ha, netdev) {
+               tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
+               set_bit(tbl_idx, mta_status);
+       }
+
+       return hclge_update_mta_status_common(vport, mta_status,
+                                       0, HCLGE_MTA_TBL_SIZE, true);
+}
+
+int hclge_update_mta_status_common(struct hclge_vport *vport,
+                                  unsigned long *status,
+                                  u16 idx,
+                                  u16 count,
+                                  bool update_filter)
+{
+       struct hclge_dev *hdev = vport->back;
+       u16 update_max = idx + count;
+       u16 check_max;
+       int ret = 0;
+       bool used;
+       u16 i;
+
+       /* setup mta check range */
+       if (update_filter) {
+               i = 0;
+               check_max = HCLGE_MTA_TBL_SIZE;
+       } else {
+               i = idx;
+               check_max = update_max;
+       }
+
+       used = false;
+       /* check and update all mta item */
+       for (; i < check_max; i++) {
+               /* ignore unused item */
+               if (!test_bit(i, vport->mta_shadow))
+                       continue;
+
+               /* if i in update range then update it */
+               if (i >= idx && i < update_max)
+                       if (!test_bit(i - idx, status))
+                               hclge_set_mta_table_item(vport, i, false);
+
+               if (!used && test_bit(i, vport->mta_shadow))
+                       used = true;
+       }
+
+       /* no longer use mta, disable it */
+       if (vport->accept_mta_mc && update_filter && !used) {
+               ret = hclge_cfg_func_mta_filter(hdev,
+                                               vport->vport_id,
+                                               false);
+               if (ret)
+                       dev_err(&hdev->pdev->dev,
+                               "disable func mta filter fail ret=%d\n",
+                               ret);
+               else
+                       vport->accept_mta_mc = false;
+       }
+
+       return ret;
+}
+
 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
                                     struct hclge_mac_vlan_tbl_entry_cmd *req)
 {
@@ -4131,17 +4315,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
        }
 
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
-       hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-
-       hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
-       hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
-       hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
-                      HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
-       hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
-                      HCLGE_MAC_EPORT_PFID_S, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+
+       hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+                       HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
 
        req.egress_port = cpu_to_le16(egress_port);
 
@@ -4192,8 +4369,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
        }
 
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
        hclge_prepare_mac_addr(&req, addr);
        ret = hclge_remove_mac_vlan_tbl(vport, &req);
 
@@ -4205,7 +4382,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
 
-       return  hclge_add_mc_addr_common(vport, addr);
+       return hclge_add_mc_addr_common(vport, addr);
 }
 
 int hclge_add_mc_addr_common(struct hclge_vport *vport,
@@ -4225,10 +4402,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
                return -EINVAL;
        }
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
-       hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+       hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
        hclge_prepare_mac_addr(&req, addr);
        status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
        if (!status) {
@@ -4244,9 +4421,25 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
                status = hclge_add_mac_vlan_tbl(vport, &req, desc);
        }
 
-       /* Set MTA table for this MAC address */
-       tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
-       status = hclge_set_mta_table_item(vport, tbl_idx, true);
+       /* If mc mac vlan table is full, use MTA table */
+       if (status == -ENOSPC) {
+               if (!vport->accept_mta_mc) {
+                       status = hclge_cfg_func_mta_filter(hdev,
+                                                          vport->vport_id,
+                                                          true);
+                       if (status) {
+                               dev_err(&hdev->pdev->dev,
+                                       "set mta filter mode fail ret=%d\n",
+                                       status);
+                               return status;
+                       }
+                       vport->accept_mta_mc = true;
+               }
+
+               /* Set MTA table for this MAC address */
+               tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
+               status = hclge_set_mta_table_item(vport, tbl_idx, true);
+       }
 
        return status;
 }
@@ -4266,7 +4459,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
        struct hclge_mac_vlan_tbl_entry_cmd req;
        enum hclge_cmd_status status;
        struct hclge_desc desc[3];
-       u16 tbl_idx;
 
        /* mac addr check */
        if (!is_multicast_ether_addr(addr)) {
@@ -4277,10 +4469,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
        }
 
        memset(&req, 0, sizeof(req));
-       hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
-       hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
-       hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+       hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+       hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
        hclge_prepare_mac_addr(&req, addr);
        status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
        if (!status) {
@@ -4295,17 +4487,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
                        status = hclge_add_mac_vlan_tbl(vport, &req, desc);
 
        } else {
-               /* This mac addr do not exist, can't delete it */
-               dev_err(&hdev->pdev->dev,
-                       "Rm multicast mac addr failed, ret = %d.\n",
-                       status);
-               return -EIO;
+               /* Maybe this mac address is in mta table, but it cannot be
+                * deleted here because an entry of mta represents an address
+                * range rather than a specific address. the delete action to
+                * all entries will take effect in update_mta_status called by
+                * hns3_nic_set_rx_mode.
+                */
+               status = 0;
        }
 
-       /* Set MTB table for this MAC address */
-       tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
-       status = hclge_set_mta_table_item(vport, tbl_idx, false);
-
        return status;
 }
 
@@ -4465,13 +4655,11 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
        req->vlan_fe = filter_en;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
                        ret);
-               return ret;
-       }
 
-       return 0;
+       return ret;
 }
 
 #define HCLGE_FILTER_TYPE_VF           0
@@ -4663,19 +4851,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
        req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
        req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
        req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
-                       vcfg->accept_tag1 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
-                       vcfg->accept_untag1 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
-                       vcfg->accept_tag2 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
-                       vcfg->accept_untag2 ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
-                    vcfg->insert_tag1_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
-                    vcfg->insert_tag2_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+                     vcfg->accept_tag1 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+                     vcfg->accept_untag1 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+                     vcfg->accept_tag2 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+                     vcfg->accept_untag2 ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+                     vcfg->insert_tag1_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+                     vcfg->insert_tag2_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
 
        req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
        req->vf_bitmap[req->vf_offset] =
@@ -4701,14 +4889,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
 
        req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
-                    vcfg->strip_tag1_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
-                    vcfg->strip_tag2_en ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
-                    vcfg->vlan1_vlan_prionly ? 1 : 0);
-       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
-                    vcfg->vlan2_vlan_prionly ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+                     vcfg->strip_tag1_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+                     vcfg->strip_tag2_en ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+                     vcfg->vlan1_vlan_prionly ? 1 : 0);
+       hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+                     vcfg->vlan2_vlan_prionly ? 1 : 0);
 
        req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
        req->vf_bitmap[req->vf_offset] =
@@ -4860,16 +5048,15 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
 
        req = (struct hclge_config_max_frm_size_cmd *)desc.data;
        req->max_frm_size = cpu_to_le16(max_frm_size);
+       req->min_frm_size = HCLGE_MAC_MIN_FRAME;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
+       if (ret)
                dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
-               return ret;
-       }
-
-       hdev->mps = max_frm_size;
+       else
+               hdev->mps = max_frm_size;
 
-       return 0;
+       return ret;
 }
 
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -4904,7 +5091,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
 
        req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
        req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
-       hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
+       hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret) {
@@ -4934,7 +5121,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
                return ret;
        }
 
-       return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+       return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
 }
 
 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
@@ -5241,12 +5428,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
        phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
 
        retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
-       mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
-                                  HCLGE_PHY_MDIX_CTRL_S);
+       mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+                                   HCLGE_PHY_MDIX_CTRL_S);
 
        retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
-       mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
-       is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+       mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+       is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
 
        phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
 
@@ -5392,7 +5579,6 @@ static int hclge_pci_init(struct hclge_dev *hdev)
 
        pci_set_master(pdev);
        hw = &hdev->hw;
-       hw->back = hdev;
        hw->io_base = pcim_iomap(pdev, 2, 0);
        if (!hw->io_base) {
                dev_err(&pdev->dev, "Can't map configuration register space\n");
@@ -5423,6 +5609,30 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
        pci_disable_device(pdev);
 }
 
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+       set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+       set_bit(HCLGE_STATE_DOWN, &hdev->state);
+       clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+       clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+       clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+       clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+       set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+       if (hdev->service_timer.function)
+               del_timer_sync(&hdev->service_timer);
+       if (hdev->service_task.func)
+               cancel_work_sync(&hdev->service_task);
+       if (hdev->rst_service_task.func)
+               cancel_work_sync(&hdev->rst_service_task);
+       if (hdev->mbx_service_task.func)
+               cancel_work_sync(&hdev->mbx_service_task);
+}
+
 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 {
        struct pci_dev *pdev = ae_dev->pdev;
@@ -5438,8 +5648,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        hdev->pdev = pdev;
        hdev->ae_dev = ae_dev;
        hdev->reset_type = HNAE3_NONE_RESET;
-       hdev->reset_request = 0;
-       hdev->reset_pending = 0;
        ae_dev->priv = hdev;
 
        ret = hclge_pci_init(hdev);
@@ -5558,15 +5766,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
        INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
 
+       hclge_clear_all_event_cause(hdev);
+
        /* Enable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, true);
 
-       set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
-       set_bit(HCLGE_STATE_DOWN, &hdev->state);
-       clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
-       clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
-       clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
-       clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+       hclge_state_init(hdev);
 
        pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
        return 0;
@@ -5660,9 +5865,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
                return ret;
        }
 
-       /* Enable MISC vector(vector0) */
-       hclge_enable_vector(&hdev->misc_vector, true);
-
        dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
                 HCLGE_DRIVER_NAME);
 
@@ -5674,22 +5876,15 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        struct hclge_dev *hdev = ae_dev->priv;
        struct hclge_mac *mac = &hdev->hw.mac;
 
-       set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
-       if (hdev->service_timer.function)
-               del_timer_sync(&hdev->service_timer);
-       if (hdev->service_task.func)
-               cancel_work_sync(&hdev->service_task);
-       if (hdev->rst_service_task.func)
-               cancel_work_sync(&hdev->rst_service_task);
-       if (hdev->mbx_service_task.func)
-               cancel_work_sync(&hdev->mbx_service_task);
+       hclge_state_uninit(hdev);
 
        if (mac->phydev)
                mdiobus_unregister(mac->mdio_bus);
 
        /* Disable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, false);
+       synchronize_irq(hdev->misc_vector.vector_irq);
+
        hclge_destroy_cmd_queue(&hdev->hw);
        hclge_misc_irq_uninit(hdev);
        hclge_pci_uninit(hdev);
@@ -5765,6 +5960,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
        u32 *rss_indir;
        int ret, i;
 
+       /* Free old tqps, and reallocate with new tqp number when nic setup */
        hclge_release_tqp(vport);
 
        ret = hclge_knic_setup(vport, new_tqps_num);
@@ -6009,8 +6205,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
 
        req = (struct hclge_set_led_state_cmd *)desc.data;
-       hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
-                      HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+       hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+                       HCLGE_LED_LOCATE_STATE_S, locate_led_status);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -6109,6 +6305,7 @@ static const struct hnae3_ae_ops hclge_ops = {
        .rm_uc_addr = hclge_rm_uc_addr,
        .add_mc_addr = hclge_add_mc_addr,
        .rm_mc_addr = hclge_rm_mc_addr,
+       .update_mta_status = hclge_update_mta_status,
        .set_autoneg = hclge_set_autoneg,
        .get_autoneg = hclge_get_autoneg,
        .get_pauseparam = hclge_get_pauseparam,
@@ -6139,7 +6336,6 @@ static const struct hnae3_ae_ops hclge_ops = {
 
 static struct hnae3_ae_algo ae_algo = {
        .ops = &hclge_ops,
-       .name = HCLGE_NAME,
        .pdev_id_table = ae_algo_pci_tbl,
 };