]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
net: hns3: Refine the MSIX allocation for PF
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
index 460f551a8a013d87354f213a298dd3523d672fc1..2c309dec01f3328376db019bee3c2c92b1c3e228 100644 (file)
@@ -789,7 +789,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
                        handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
                }
 
-               count ++;
+               count++;
                handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
        } else if (stringset == ETH_SS_STATS) {
                count = ARRAY_SIZE(g_mac_stats_string) +
@@ -933,6 +933,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
        hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
 
        if (hnae3_dev_roce_supported(hdev)) {
+               hdev->roce_base_msix_offset =
+               hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
+                               HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
                hdev->num_roce_msi =
                hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
                                HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
@@ -940,7 +943,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
                /* PF should have NIC vectors and Roce vectors,
                 * NIC vectors are queued before Roce vectors.
                 */
-               hdev->num_msi = hdev->num_roce_msi  + HCLGE_ROCE_VECTOR_OFFSET;
+               hdev->num_msi = hdev->num_roce_msi  +
+                               hdev->roce_base_msix_offset;
        } else {
                hdev->num_msi =
                hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
@@ -1814,8 +1818,6 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
        return ret;
 }
 
-#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-
 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
                                   struct hclge_pkt_buf_alloc *buf_alloc)
 {
@@ -1843,13 +1845,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
                        req->tc_wl[j].high =
                                cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
                        req->tc_wl[j].high |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                               cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                        req->tc_wl[j].low =
                                cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
                        req->tc_wl[j].low |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                }
        }
 
@@ -1889,13 +1889,11 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
                        req->com_thrd[j].high =
                                cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
                        req->com_thrd[j].high |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                        req->com_thrd[j].low =
                                cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
                        req->com_thrd[j].low |=
-                               cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
-                                           HCLGE_RX_PRIV_EN_B);
+                                cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
                }
        }
 
@@ -1919,19 +1917,16 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
 
        req = (struct hclge_rx_com_wl *)desc.data;
        req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
-       req->com_wl.high |=
-               cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
-                           HCLGE_RX_PRIV_EN_B);
+       req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
 
        req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
-       req->com_wl.low |=
-               cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
-                           HCLGE_RX_PRIV_EN_B);
+       req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
                dev_err(&hdev->pdev->dev,
                        "common waterline config cmd failed %d\n", ret);
+
        return ret;
 }
 
@@ -2016,7 +2011,6 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
 
        roce->rinfo.netdev = nic->kinfo.netdev;
        roce->rinfo.roce_io_base = vport->back->hw.io_base;
-       roce->rinfo.is_reset = false;
 
        roce->pdev = nic->pdev;
        roce->ae_algo = nic->ae_algo;
@@ -2048,7 +2042,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
        hdev->num_msi_left = vectors;
        hdev->base_msi_vector = pdev->irq;
        hdev->roce_base_vector = hdev->base_msi_vector +
-                               HCLGE_ROCE_VECTOR_OFFSET;
+                               hdev->roce_base_msix_offset;
 
        hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
                                           sizeof(u16), GFP_KERNEL);
@@ -2382,7 +2376,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
 
 static void hclge_update_link_status(struct hclge_dev *hdev)
 {
+       struct hnae3_client *rclient = hdev->roce_client;
        struct hnae3_client *client = hdev->nic_client;
+       struct hnae3_handle *rhandle;
        struct hnae3_handle *handle;
        int state;
        int i;
@@ -2394,6 +2390,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
                for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
                        handle = &hdev->vport[i].nic;
                        client->ops->link_status_change(handle, state);
+                       rhandle = &hdev->vport[i].roce;
+                       if (rclient && rclient->ops->link_status_change)
+                               rclient->ops->link_status_change(rhandle,
+                                                                state);
                }
                hdev->hw.mac.link = state;
        }
@@ -2473,7 +2473,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
        u32 cmdq_src_reg;
 
        /* fetch the events from their corresponding regs */
-       rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+       rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
        cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
 
        /* Assumption: If by any chance reset and mailbox events are reported
@@ -2485,12 +2485,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
 
        /* check for vector0 reset event sources */
        if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
                set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
                *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
                return HCLGE_VECTOR0_EVENT_RST;
        }
 
        if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+               set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
                set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
                *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
                return HCLGE_VECTOR0_EVENT_RST;
@@ -2525,6 +2527,15 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
        }
 }
 
+static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
+{
+       hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
+                               BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
+                               BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
+                               BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
+       hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
+}
+
 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
 {
        writel(enable ? 1 : 0, vector->addr);
@@ -2633,33 +2644,43 @@ static int hclge_notify_client(struct hclge_dev *hdev,
                return -EOPNOTSUPP;
 
        for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
-               if (hdev->roce_client) {
-                       handle = &hdev->vport[i].roce;
-                       client = hdev->roce_client;
-                       if (type == HNAE3_UNINIT_CLIENT)
-                               if (handle)
-                                       client->ops->uninit_instance(handle,
-                                                                    true);
+               handle = &hdev->vport[i].nic;
+               ret = client->ops->reset_notify(handle, type);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "notify nic client failed %d", ret);
+                       return ret;
                }
+       }
 
-               client = hdev->nic_client;
-               handle = &hdev->vport[i].nic;
+       return 0;
+}
+
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+                                   enum hnae3_reset_notify_type type)
+{
+       struct hnae3_client *client = hdev->roce_client;
+       struct hnae3_handle *handle;
+       int ret = 0;
+       u16 i;
+
+       if (!client)
+               return 0;
 
+       if (!client->ops->reset_notify)
+               return -EOPNOTSUPP;
+
+       for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+               handle = &hdev->vport[i].roce;
                ret = client->ops->reset_notify(handle, type);
-               if (ret)
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "notify roce client failed %d", ret);
                        return ret;
-
-               if (hdev->roce_client) {
-                       handle = &hdev->vport[i].roce;
-                       client = hdev->roce_client;
-                       if (type == HNAE3_INIT_CLIENT)
-                               if (handle)
-                                       return client->ops->init_instance(
-                                                                       handle);
                }
        }
 
-       return 0;
+       return ret;
 }
 
 static int hclge_reset_wait(struct hclge_dev *hdev)
@@ -2690,8 +2711,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
        }
 
        val = hclge_read_dev(&hdev->hw, reg);
-       while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT &&
-              test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+       while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
                msleep(HCLGE_RESET_WATI_MS);
                val = hclge_read_dev(&hdev->hw, reg);
                cnt++;
@@ -2795,8 +2815,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
                clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
                break;
        default:
-               dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
-                        hdev->reset_type);
                break;
        }
 
@@ -2809,16 +2827,21 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
 
 static void hclge_reset(struct hclge_dev *hdev)
 {
+       struct hnae3_handle *handle;
+
        /* perform reset of the stack & ae device for a client */
+       handle = &hdev->vport[0].nic;
 
+       hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+       hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+       rtnl_lock();
        hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
 
        if (!hclge_reset_wait(hdev)) {
-               rtnl_lock();
                hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
                hclge_reset_ae_dev(hdev->ae_dev);
                hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
-               rtnl_unlock();
 
                hclge_clear_reset_cause(hdev);
        } else {
@@ -2828,6 +2851,11 @@ static void hclge_reset(struct hclge_dev *hdev)
        }
 
        hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+       handle->last_reset_time = jiffies;
+       rtnl_unlock();
+
+       hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+       hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
 }
 
 static void hclge_reset_event(struct hnae3_handle *handle)
@@ -2840,8 +2868,13 @@ static void hclge_reset_event(struct hnae3_handle *handle)
         * know this if last reset request did not occur very recently (watchdog
         * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
         * In case of new request we reset the "reset level" to PF reset.
+        * And if it is a repeat reset request of the most recent one then we
+        * want to make sure we throttle the reset request. Therefore, we will
+        * not allow it again before 3*HZ times.
         */
-       if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
+       if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+               return;
+       else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
                handle->reset_level = HNAE3_FUNC_RESET;
 
        dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
@@ -2853,8 +2886,6 @@ static void hclge_reset_event(struct hnae3_handle *handle)
 
        if (handle->reset_level < HNAE3_GLOBAL_RESET)
                handle->reset_level++;
-
-       handle->last_reset_time = jiffies;
 }
 
 static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -3718,7 +3749,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
                                "serdes loopback get, ret = %d\n", ret);
                        return ret;
                }
-       } while (++i < HCLGE_SERDES_RETRY_NUM  &&
+       } while (++i < HCLGE_SERDES_RETRY_NUM &&
                 !(req->result & HCLGE_CMD_SERDES_DONE_B));
 
        if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
@@ -4821,17 +4852,17 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
        req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
        req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
        hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
-                       vcfg->accept_tag1 ? 1 : 0);
+                     vcfg->accept_tag1 ? 1 : 0);
        hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
-                       vcfg->accept_untag1 ? 1 : 0);
+                     vcfg->accept_untag1 ? 1 : 0);
        hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
-                       vcfg->accept_tag2 ? 1 : 0);
+                     vcfg->accept_tag2 ? 1 : 0);
        hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
-                       vcfg->accept_untag2 ? 1 : 0);
+                     vcfg->accept_untag2 ? 1 : 0);
        hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
-                    vcfg->insert_tag1_en ? 1 : 0);
+                     vcfg->insert_tag1_en ? 1 : 0);
        hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
-                    vcfg->insert_tag2_en ? 1 : 0);
+                     vcfg->insert_tag2_en ? 1 : 0);
        hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
 
        req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
@@ -5017,6 +5048,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
 
        req = (struct hclge_config_max_frm_size_cmd *)desc.data;
        req->max_frm_size = cpu_to_le16(max_frm_size);
+       req->min_frm_size = HCLGE_MAC_MIN_FRAME;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
@@ -5734,6 +5766,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
        INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
        INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
 
+       hclge_clear_all_event_cause(hdev);
+
        /* Enable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, true);
 
@@ -5849,6 +5883,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
 
        /* Disable MISC vector(vector0) */
        hclge_enable_vector(&hdev->misc_vector, false);
+       synchronize_irq(hdev->misc_vector.vector_irq);
+
        hclge_destroy_cmd_queue(&hdev->hw);
        hclge_misc_irq_uninit(hdev);
        hclge_pci_uninit(hdev);
@@ -6170,7 +6206,7 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
 
        req = (struct hclge_set_led_state_cmd *)desc.data;
        hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
-                      HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+                       HCLGE_LED_LOCATE_STATE_S, locate_led_status);
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)