]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
qed: Introduce iWARP personality
authorKalderon, Michal <Michal.Kalderon@cavium.com>
Sun, 2 Jul 2017 07:29:21 +0000 (10:29 +0300)
committerDavid S. Miller <davem@davemloft.net>
Mon, 3 Jul 2017 08:43:44 +0000 (01:43 -0700)
iWARP personality introduced the need for differentiating in several
places in the code whether we are RoCE, iWARP or either. This
leads to introducing new macros for querying the personality.

Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
include/linux/qed/common_hsi.h

index 14b08ee9e3ade1db0bf5053de37fa9e76f140e2b..22e1171c317e75322441cbe4dabcaea271114b6e 100644 (file)
@@ -210,14 +210,16 @@ struct qed_tunn_update_params {
 
 /* The PCI personality is not quite synonymous to protocol ID:
  * 1. All personalities need CORE connections
- * 2. The Ethernet personality may support also the RoCE protocol
+ * 2. The Ethernet personality may support also the RoCE/iWARP protocol
  */
 enum qed_pci_personality {
        QED_PCI_ETH,
        QED_PCI_FCOE,
        QED_PCI_ISCSI,
        QED_PCI_ETH_ROCE,
-       QED_PCI_DEFAULT /* default in shmem */
+       QED_PCI_ETH_IWARP,
+       QED_PCI_ETH_RDMA,
+       QED_PCI_DEFAULT, /* default in shmem */
 };
 
 /* All VFs are symmetric, all counters are PF + all VFs */
@@ -277,6 +279,7 @@ enum qed_dev_cap {
        QED_DEV_CAP_FCOE,
        QED_DEV_CAP_ISCSI,
        QED_DEV_CAP_ROCE,
+       QED_DEV_CAP_IWARP,
 };
 
 enum qed_wol_support {
@@ -286,7 +289,24 @@ enum qed_wol_support {
 
 struct qed_hw_info {
        /* PCI personality */
-       enum qed_pci_personality        personality;
+       enum qed_pci_personality personality;
+#define QED_IS_RDMA_PERSONALITY(dev)                       \
+       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||  \
+        (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+        (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_ROCE_PERSONALITY(dev)                      \
+       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
+        (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_IWARP_PERSONALITY(dev)                      \
+       ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+        (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_L2_PERSONALITY(dev)                   \
+       ((dev)->hw_info.personality == QED_PCI_ETH || \
+        QED_IS_RDMA_PERSONALITY(dev))
+#define QED_IS_FCOE_PERSONALITY(dev) \
+       ((dev)->hw_info.personality == QED_PCI_FCOE)
+#define QED_IS_ISCSI_PERSONALITY(dev) \
+       ((dev)->hw_info.personality == QED_PCI_ISCSI)
 
        /* Resource Allocation scheme results */
        u32                             resc_start[QED_MAX_RESC];
index e201214764db298081db01b0137028780b4ce453..38716f77c21ddc7503ccd8a615ed2c403fbc23e4 100644 (file)
@@ -853,7 +853,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
        if (!excess_lines)
                return 0;
 
-       if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+       if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
                return 0;
 
        p_mngr = p_hwfn->p_cxt_mngr;
@@ -1033,7 +1033,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
        u32 lines, line, sz_left, lines_to_skip = 0;
 
        /* Special handling for RoCE that supports dynamic allocation */
-       if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
+       if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
            ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
                return 0;
 
@@ -1833,7 +1833,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
                tm_offset += tm_iids.pf_tids[i];
        }
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
+       if (QED_IS_RDMA_PERSONALITY(p_hwfn))
                active_seg_mask = 0;
 
        STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
@@ -2344,7 +2344,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
                       last_cid_allocated - 1);
 
                if (!p_hwfn->b_rdma_enabled_in_prs) {
-                       /* Enable RoCE search */
+                       /* Enable RDMA search */
                        qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
                        p_hwfn->b_rdma_enabled_in_prs = true;
                }
index 49667ad9042da9db095694d206634c74c4a46f4a..68e61823bfc047469b9067ec414cff0b39fa684a 100644 (file)
@@ -936,7 +936,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
 
                /* EQ */
                n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
-               if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
                        num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
                                                               PROTOCOLID_ROCE,
                                                               NULL) * 2;
@@ -2057,7 +2057,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
        qed_int_get_num_sbs(p_hwfn, &sb_cnt);
 
        if (IS_ENABLED(CONFIG_QED_RDMA) &&
-           p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+           QED_IS_RDMA_PERSONALITY(p_hwfn)) {
                /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
                 * the status blocks equally between L2 / RoCE but with
                 * consideration as to how many l2 queues / cnqs we have.
@@ -2068,9 +2068,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
 
                non_l2_sbs = feat_num[QED_RDMA_CNQ];
        }
-
-       if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
-           p_hwfn->hw_info.personality == QED_PCI_ETH) {
+       if (QED_IS_L2_PERSONALITY(p_hwfn)) {
                /* Start by allocating VF queues, then PF's */
                feat_num[QED_VF_L2_QUE] = min_t(u32,
                                                RESC_NUM(p_hwfn, QED_L2_QUEUE),
@@ -2083,12 +2081,12 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
                                                         QED_VF_L2_QUE));
        }
 
-       if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+       if (QED_IS_FCOE_PERSONALITY(p_hwfn))
                feat_num[QED_FCOE_CQ] =  min_t(u32, sb_cnt.cnt,
                                               RESC_NUM(p_hwfn,
                                                        QED_CMDQS_CQS));
 
-       if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+       if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
                feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
                                               RESC_NUM(p_hwfn,
                                                        QED_CMDQS_CQS));
index e57699bfbdfa415cd9eb56c04e31f2fe0ad20c80..27ea54ba7e1b9566c33d2a3378880867424aceee 100644 (file)
@@ -79,8 +79,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
        unsigned long **pp_qids;
        u32 i;
 
-       if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
-           p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+       if (!QED_IS_L2_PERSONALITY(p_hwfn))
                return 0;
 
        p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
index 17f9b0a7b55332e645ad02cfc9ed238dad38f672..be66f19d577d1cb60cf32dde546fc30b844f95ce 100644 (file)
@@ -1421,7 +1421,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
        if (rc)
                goto out;
 
-       if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+       if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
                qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
 
        qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
index 16cc30b11cce34602ca68973f23ba2713c713ac9..b11399606990ae5b950cc933fcceeae966375058 100644 (file)
@@ -237,6 +237,8 @@ err0:
 int qed_fill_dev_info(struct qed_dev *cdev,
                      struct qed_dev_info *dev_info)
 {
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_hw_info *hw_info = &p_hwfn->hw_info;
        struct qed_tunnel_info *tun = &cdev->tunnel;
        struct qed_ptt  *ptt;
 
@@ -260,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
        dev_info->pci_mem_start = cdev->pci_params.mem_start;
        dev_info->pci_mem_end = cdev->pci_params.mem_end;
        dev_info->pci_irq = cdev->pci_params.irq;
-       dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
-                                   QED_PCI_ETH_ROCE);
+       dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
        dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
        dev_info->dev_type = cdev->type;
-       ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+       ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
 
        if (IS_PF(cdev)) {
                dev_info->fw_major = FW_MAJOR_VERSION;
@@ -274,8 +275,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
                dev_info->mf_mode = cdev->mf_mode;
                dev_info->tx_switching = true;
 
-               if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
-                   QED_WOL_SUPPORT_PME)
+               if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
                        dev_info->wol_support = true;
 
                dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
@@ -304,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
                                    &dev_info->mfw_rev, NULL);
        }
 
-       dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
+       dev_info->mtu = hw_info->mtu;
 
        return 0;
 }
@@ -790,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
                                       cdev->num_hwfns;
 
        if (!IS_ENABLED(CONFIG_QED_RDMA) ||
-           QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
+           !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
                return 0;
 
        for_each_hwfn(cdev, i)
@@ -931,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
        /* In case we might support RDMA, don't allow qede to be greedy
         * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
         */
-       if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
-           QED_PCI_ETH_ROCE) {
+       if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
                u16 *num_cons;
 
                num_cons = &params->eth_pf_params.num_cons;
index a567cbf8c5b465edf3aff52d0454886e3d0239c7..885ae1379b5ae4c2febd7a2a965ded4e2e8f7981 100644 (file)
@@ -778,7 +778,7 @@ enum protocol_type {
        PROTOCOLID_ROCE,
        PROTOCOLID_CORE,
        PROTOCOLID_ETH,
-       PROTOCOLID_RESERVED4,
+       PROTOCOLID_IWARP,
        PROTOCOLID_RESERVED5,
        PROTOCOLID_PREROCE,
        PROTOCOLID_COMMON,