]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'qedr' into k.o/for-next
authorDoug Ledford <dledford@redhat.com>
Mon, 25 Sep 2017 15:18:35 +0000 (11:18 -0400)
committerDoug Ledford <dledford@redhat.com>
Mon, 25 Sep 2017 15:18:35 +0000 (11:18 -0400)
Signed-off-by: Doug Ledford <dledford@redhat.com>
13 files changed:
MAINTAINERS
drivers/infiniband/hw/qedr/Makefile
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c [deleted file]
drivers/infiniband/hw/qedr/qedr_cm.h [deleted file]
drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
drivers/infiniband/hw/qedr/qedr_iw_cm.c [new file with mode: 0644]
drivers/infiniband/hw/qedr/qedr_iw_cm.h [new file with mode: 0644]
drivers/infiniband/hw/qedr/qedr_roce_cm.c [new file with mode: 0644]
drivers/infiniband/hw/qedr/qedr_roce_cm.h [new file with mode: 0644]
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qedr/verbs.h

index 6671f375f7fcdd1cf9e40f62c884f1ef9592dab9..473c9b17a37c95d9b8784edc3c05aef31c41b081 100644 (file)
@@ -11068,6 +11068,7 @@ F:      drivers/net/ethernet/qlogic/qede/
 
 QLOGIC QL4xxx RDMA DRIVER
 M:     Ram Amrani <Ram.Amrani@cavium.com>
+M:     Michal Kalderon <Michal.Kalderon@cavium.com>
 M:     Ariel Elior <Ariel.Elior@cavium.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
index ba7067c77f2f6ff3e684364bcb1907e6d24e02a0..1c0bc4f78550abd7c743cda3231833bbfa075125 100644 (file)
@@ -1,3 +1,3 @@
 obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
 
-qedr-y := main.o verbs.o qedr_cm.o
+qedr-y := main.o verbs.o qedr_roce_cm.o qedr_iw_cm.o
index 97d033f51dc90c4e727cf0a9df2fda174c540a28..ca9317107ab7d9855355f5796ae86b30b5f6b829 100644 (file)
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_addr.h>
 #include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_mad.h>
 #include <linux/netdevice.h>
 #include <linux/iommu.h>
 #include <linux/pci.h>
 #include <net/addrconf.h>
+#include <linux/idr.h>
 
 #include <linux/qed/qed_chain.h>
 #include <linux/qed/qed_if.h>
 #include "qedr.h"
 #include "verbs.h"
 #include <rdma/qedr-abi.h>
+#include "qedr_iw_cm.h"
 
 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
 MODULE_AUTHOR("QLogic Corporation");
@@ -92,8 +96,84 @@ static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
        return qdev->ndev;
 }
 
+int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
+                            struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = qedr_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+           RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+       return 0;
+}
+
+int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+                          struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = qedr_query_port(ibdev, port_num, &attr);
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = 1;
+       immutable->gid_tbl_len = 1;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+       immutable->max_mad_size = 0;
+
+       return 0;
+}
+
+int qedr_iw_register_device(struct qedr_dev *dev)
+{
+       dev->ibdev.node_type = RDMA_NODE_RNIC;
+       dev->ibdev.query_gid = qedr_iw_query_gid;
+
+       dev->ibdev.get_port_immutable = qedr_iw_port_immutable;
+
+       dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+       if (!dev->ibdev.iwcm)
+               return -ENOMEM;
+
+       dev->ibdev.iwcm->connect = qedr_iw_connect;
+       dev->ibdev.iwcm->accept = qedr_iw_accept;
+       dev->ibdev.iwcm->reject = qedr_iw_reject;
+       dev->ibdev.iwcm->create_listen = qedr_iw_create_listen;
+       dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen;
+       dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref;
+       dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref;
+       dev->ibdev.iwcm->get_qp = qedr_iw_get_qp;
+
+       memcpy(dev->ibdev.iwcm->ifname,
+              dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname));
+
+       return 0;
+}
+
+void qedr_roce_register_device(struct qedr_dev *dev)
+{
+       dev->ibdev.node_type = RDMA_NODE_IB_CA;
+       dev->ibdev.query_gid = qedr_query_gid;
+
+       dev->ibdev.add_gid = qedr_add_gid;
+       dev->ibdev.del_gid = qedr_del_gid;
+
+       dev->ibdev.get_port_immutable = qedr_roce_port_immutable;
+}
+
 static int qedr_register_device(struct qedr_dev *dev)
 {
+       int rc;
+
        strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
 
        dev->ibdev.node_guid = dev->attr.node_guid;
@@ -121,18 +201,21 @@ static int qedr_register_device(struct qedr_dev *dev)
                                     QEDR_UVERBS(POST_SEND) |
                                     QEDR_UVERBS(POST_RECV);
 
+       if (IS_IWARP(dev)) {
+               rc = qedr_iw_register_device(dev);
+               if (rc)
+                       return rc;
+       } else {
+               qedr_roce_register_device(dev);
+       }
+
        dev->ibdev.phys_port_cnt = 1;
        dev->ibdev.num_comp_vectors = dev->num_cnq;
-       dev->ibdev.node_type = RDMA_NODE_IB_CA;
 
        dev->ibdev.query_device = qedr_query_device;
        dev->ibdev.query_port = qedr_query_port;
        dev->ibdev.modify_port = qedr_modify_port;
 
-       dev->ibdev.query_gid = qedr_query_gid;
-       dev->ibdev.add_gid = qedr_add_gid;
-       dev->ibdev.del_gid = qedr_del_gid;
-
        dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
        dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
        dev->ibdev.mmap = qedr_mmap;
@@ -166,7 +249,7 @@ static int qedr_register_device(struct qedr_dev *dev)
        dev->ibdev.post_recv = qedr_post_recv;
 
        dev->ibdev.process_mad = qedr_process_mad;
-       dev->ibdev.get_port_immutable = qedr_port_immutable;
+
        dev->ibdev.get_netdev = qedr_get_netdev;
 
        dev->ibdev.dev.parent = &dev->pdev->dev;
@@ -217,6 +300,9 @@ static void qedr_free_resources(struct qedr_dev *dev)
 {
        int i;
 
+       if (IS_IWARP(dev))
+               destroy_workqueue(dev->iwarp_wq);
+
        for (i = 0; i < dev->num_cnq; i++) {
                qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
                dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
@@ -241,6 +327,12 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
 
        spin_lock_init(&dev->sgid_lock);
 
+       if (IS_IWARP(dev)) {
+               spin_lock_init(&dev->idr_lock);
+               idr_init(&dev->qpidr);
+               dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+       }
+
        /* Allocate Status blocks for CNQ */
        dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
                                GFP_KERNEL);
@@ -716,6 +808,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
        in_params->events = &events;
        in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
        in_params->max_mtu = dev->ndev->mtu;
+       dev->iwarp_max_mtu = dev->ndev->mtu;
        ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
 
        rc = dev->ops->rdma_init(dev->cdev, in_params);
index b2bb42e2805ddfb220b7033620d988e0877f3d6a..bcfa1f901281146113f71a7355de4068b19335c7 100644 (file)
@@ -33,6 +33,7 @@
 #define __QEDR_H__
 
 #include <linux/pci.h>
+#include <linux/idr.h>
 #include <rdma/ib_addr.h>
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_chain.h>
@@ -43,6 +44,8 @@
 
 #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
 #define DP_NAME(dev) ((dev)->ibdev.name)
+#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
+#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
 
 #define DP_DEBUG(dev, module, fmt, ...)                                        \
        pr_debug("(%s) " module ": " fmt,                               \
@@ -56,6 +59,7 @@
 #define QEDR_MSG_SQ   "  SQ"
 #define QEDR_MSG_QP   "  QP"
 #define QEDR_MSG_GSI  " GSI"
+#define QEDR_MSG_IWARP  " IW"
 
 #define QEDR_CQ_MAGIC_NUMBER   (0x11223344)
 
@@ -160,6 +164,11 @@ struct qedr_dev {
        struct qedr_cq          *gsi_sqcq;
        struct qedr_cq          *gsi_rqcq;
        struct qedr_qp          *gsi_qp;
+       enum qed_rdma_type      rdma_type;
+       spinlock_t              idr_lock; /* Protect qpidr data-structure */
+       struct idr              qpidr;
+       struct workqueue_struct *iwarp_wq;
+       u16                     iwarp_max_mtu;
 
        unsigned long enet_state;
 
@@ -317,6 +326,9 @@ struct qedr_qp_hwq_info {
        /* DB */
        void __iomem *db;
        union db_prod32 db_data;
+
+       void __iomem *iwarp_db2;
+       union db_prod32 iwarp_db2_data;
 };
 
 #define QEDR_INC_SW_IDX(p_info, index)                                 \
@@ -337,7 +349,7 @@ enum qedr_qp_err_bitmap {
 struct qedr_qp {
        struct ib_qp ibqp;      /* must be first */
        struct qedr_dev *dev;
-
+       struct qedr_iw_ep *ep;
        struct qedr_qp_hwq_info sq;
        struct qedr_qp_hwq_info rq;
 
@@ -394,6 +406,8 @@ struct qedr_qp {
        /* Relevant to qps created from user space only (applications) */
        struct qedr_userq usq;
        struct qedr_userq urq;
+       atomic_t refcnt;
+       bool destroyed;
 };
 
 struct qedr_ah {
@@ -474,6 +488,21 @@ static inline int qedr_get_dmac(struct qedr_dev *dev,
        return 0;
 }
 
+struct qedr_iw_listener {
+       struct qedr_dev *dev;
+       struct iw_cm_id *cm_id;
+       int             backlog;
+       void            *qed_handle;
+};
+
+struct qedr_iw_ep {
+       struct qedr_dev *dev;
+       struct iw_cm_id *cm_id;
+       struct qedr_qp  *qp;
+       void            *qed_context;
+       u8              during_connect;
+};
+
 static inline
 struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
 {
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
deleted file mode 100644 (file)
index 4689e80..0000000
+++ /dev/null
@@ -1,744 +0,0 @@
-/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/dma-mapping.h>
-#include <linux/crc32.h>
-#include <linux/iommu.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/udp.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/iw_cm.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_addr.h>
-#include <rdma/ib_cache.h>
-
-#include <linux/qed/qed_if.h>
-#include <linux/qed/qed_rdma_if.h>
-#include "qedr.h"
-#include "verbs.h"
-#include <rdma/qedr-abi.h>
-#include "qedr_cm.h"
-
-void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
-{
-       info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
-}
-
-void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
-                         struct ib_qp_init_attr *attrs)
-{
-       dev->gsi_qp_created = 1;
-       dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
-       dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
-       dev->gsi_qp = qp;
-}
-
-void qedr_ll2_complete_tx_packet(void *cxt,
-                                u8 connection_handle,
-                                void *cookie,
-                                dma_addr_t first_frag_addr,
-                                bool b_last_fragment, bool b_last_packet)
-{
-       struct qedr_dev *dev = (struct qedr_dev *)cxt;
-       struct qed_roce_ll2_packet *pkt = cookie;
-       struct qedr_cq *cq = dev->gsi_sqcq;
-       struct qedr_qp *qp = dev->gsi_qp;
-       unsigned long flags;
-
-       DP_DEBUG(dev, QEDR_MSG_GSI,
-                "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
-                dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
-                cq->ibcq.comp_handler ? "Yes" : "No");
-
-       dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
-                         pkt->header.baddr);
-       kfree(pkt);
-
-       spin_lock_irqsave(&qp->q_lock, flags);
-       qedr_inc_sw_gsi_cons(&qp->sq);
-       spin_unlock_irqrestore(&qp->q_lock, flags);
-
-       if (cq->ibcq.comp_handler)
-               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-}
-
-void qedr_ll2_complete_rx_packet(void *cxt,
-                                struct qed_ll2_comp_rx_data *data)
-{
-       struct qedr_dev *dev = (struct qedr_dev *)cxt;
-       struct qedr_cq *cq = dev->gsi_rqcq;
-       struct qedr_qp *qp = dev->gsi_qp;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qp->q_lock, flags);
-
-       qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
-               -EINVAL : 0;
-       qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
-       /* note: length stands for data length i.e. GRH is excluded */
-       qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
-               data->length.data_length;
-       *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
-               ntohl(data->opaque_data_0);
-       *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
-               ntohs((u16)data->opaque_data_1);
-
-       qedr_inc_sw_gsi_cons(&qp->rq);
-
-       spin_unlock_irqrestore(&qp->q_lock, flags);
-
-       if (cq->ibcq.comp_handler)
-               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-}
-
-void qedr_ll2_release_rx_packet(void *cxt,
-                               u8 connection_handle,
-                               void *cookie,
-                               dma_addr_t rx_buf_addr, bool b_last_packet)
-{
-       /* Do nothing... */
-}
-
-static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
-                               struct ib_qp_init_attr *attrs)
-{
-       struct qed_rdma_destroy_cq_in_params iparams;
-       struct qed_rdma_destroy_cq_out_params oparams;
-       struct qedr_cq *cq;
-
-       cq = get_qedr_cq(attrs->send_cq);
-       iparams.icid = cq->icid;
-       dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
-       dev->ops->common->chain_free(dev->cdev, &cq->pbl);
-
-       cq = get_qedr_cq(attrs->recv_cq);
-       /* if a dedicated recv_cq was used, delete it too */
-       if (iparams.icid != cq->icid) {
-               iparams.icid = cq->icid;
-               dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
-               dev->ops->common->chain_free(dev->cdev, &cq->pbl);
-       }
-}
-
-static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
-                                         struct ib_qp_init_attr *attrs)
-{
-       if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
-               DP_ERR(dev,
-                      " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
-                      attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
-               return -EINVAL;
-       }
-
-       if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
-               DP_ERR(dev,
-                      " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
-                      attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
-               return -EINVAL;
-       }
-
-       if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
-               DP_ERR(dev,
-                      " create gsi qp: failed. max_send_wr is too large %d>%d\n",
-                      attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int qedr_ll2_post_tx(struct qedr_dev *dev,
-                           struct qed_roce_ll2_packet *pkt)
-{
-       enum qed_ll2_roce_flavor_type roce_flavor;
-       struct qed_ll2_tx_pkt_info ll2_tx_pkt;
-       int rc;
-       int i;
-
-       memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
-
-       roce_flavor = (pkt->roce_mode == ROCE_V1) ?
-           QED_LL2_ROCE : QED_LL2_RROCE;
-
-       if (pkt->roce_mode == ROCE_V2_IPV4)
-               ll2_tx_pkt.enable_ip_cksum = 1;
-
-       ll2_tx_pkt.num_of_bds = 1 /* hdr */  + pkt->n_seg;
-       ll2_tx_pkt.vlan = 0;
-       ll2_tx_pkt.tx_dest = pkt->tx_dest;
-       ll2_tx_pkt.qed_roce_flavor = roce_flavor;
-       ll2_tx_pkt.first_frag = pkt->header.baddr;
-       ll2_tx_pkt.first_frag_len = pkt->header.len;
-       ll2_tx_pkt.cookie = pkt;
-
-       /* tx header */
-       rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
-                                            dev->gsi_ll2_handle,
-                                            &ll2_tx_pkt, 1);
-       if (rc) {
-               /* TX failed while posting header - release resources */
-               dma_free_coherent(&dev->pdev->dev, pkt->header.len,
-                                 pkt->header.vaddr, pkt->header.baddr);
-               kfree(pkt);
-
-               DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
-               return rc;
-       }
-
-       /* tx payload */
-       for (i = 0; i < pkt->n_seg; i++) {
-               rc = dev->ops->ll2_set_fragment_of_tx_packet(
-                       dev->rdma_ctx,
-                       dev->gsi_ll2_handle,
-                       pkt->payload[i].baddr,
-                       pkt->payload[i].len);
-
-               if (rc) {
-                       /* if failed not much to do here, partial packet has
-                        * been posted we can't free memory, will need to wait
-                        * for completion
-                        */
-                       DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
-                       return rc;
-               }
-       }
-
-       return 0;
-}
-
-int qedr_ll2_stop(struct qedr_dev *dev)
-{
-       int rc;
-
-       if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
-               return 0;
-
-       /* remove LL2 MAC address filter */
-       rc = dev->ops->ll2_set_mac_filter(dev->cdev,
-                                         dev->gsi_ll2_mac_address, NULL);
-
-       rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
-                                               dev->gsi_ll2_handle);
-       if (rc)
-               DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
-
-       dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
-
-       dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
-
-       return rc;
-}
-
-int qedr_ll2_start(struct qedr_dev *dev,
-                  struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
-{
-       struct qed_ll2_acquire_data data;
-       struct qed_ll2_cbs cbs;
-       int rc;
-
-       /* configure and start LL2 */
-       cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
-       cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
-       cbs.rx_release_cb = qedr_ll2_release_rx_packet;
-       cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
-       cbs.cookie = dev;
-
-       memset(&data, 0, sizeof(data));
-       data.input.conn_type = QED_LL2_TYPE_ROCE;
-       data.input.mtu = dev->ndev->mtu;
-       data.input.rx_num_desc = attrs->cap.max_recv_wr;
-       data.input.rx_drop_ttl0_flg = true;
-       data.input.rx_vlan_removal_en = false;
-       data.input.tx_num_desc = attrs->cap.max_send_wr;
-       data.input.tx_tc = 0;
-       data.input.tx_dest = QED_LL2_TX_DEST_NW;
-       data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
-       data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
-       data.input.gsi_enable = 1;
-       data.p_connection_handle = &dev->gsi_ll2_handle;
-       data.cbs = &cbs;
-
-       rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
-       if (rc) {
-               DP_ERR(dev,
-                      "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
-                      rc);
-               return rc;
-       }
-
-       rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
-                                               dev->gsi_ll2_handle);
-       if (rc) {
-               DP_ERR(dev,
-                      "ll2 start: failed to establish LL2 connection (rc=%d)\n",
-                      rc);
-               goto err1;
-       }
-
-       rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
-       if (rc)
-               goto err2;
-
-       return 0;
-
-err2:
-       dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
-err1:
-       dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
-
-       return rc;
-}
-
-struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
-                                struct ib_qp_init_attr *attrs,
-                                struct qedr_qp *qp)
-{
-       int rc;
-
-       rc = qedr_check_gsi_qp_attrs(dev, attrs);
-       if (rc)
-               return ERR_PTR(rc);
-
-       rc = qedr_ll2_start(dev, attrs, qp);
-       if (rc) {
-               DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
-               return ERR_PTR(rc);
-       }
-
-       /* create QP */
-       qp->ibqp.qp_num = 1;
-       qp->rq.max_wr = attrs->cap.max_recv_wr;
-       qp->sq.max_wr = attrs->cap.max_send_wr;
-
-       qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
-                               GFP_KERNEL);
-       if (!qp->rqe_wr_id)
-               goto err;
-       qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
-                               GFP_KERNEL);
-       if (!qp->wqe_wr_id)
-               goto err;
-
-       qedr_store_gsi_qp_cq(dev, qp, attrs);
-       ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
-
-       /* the GSI CQ is handled by the driver so remove it from the FW */
-       qedr_destroy_gsi_cq(dev, attrs);
-       dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
-       dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
-
-       DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
-
-       return &qp->ibqp;
-
-err:
-       kfree(qp->rqe_wr_id);
-
-       rc = qedr_ll2_stop(dev);
-       if (rc)
-               DP_ERR(dev, "create gsi qp: failed destroy on create\n");
-
-       return ERR_PTR(-ENOMEM);
-}
-
-int qedr_destroy_gsi_qp(struct qedr_dev *dev)
-{
-       return qedr_ll2_stop(dev);
-}
-
-#define QEDR_MAX_UD_HEADER_SIZE        (100)
-#define QEDR_GSI_QPN           (1)
-static inline int qedr_gsi_build_header(struct qedr_dev *dev,
-                                       struct qedr_qp *qp,
-                                       struct ib_send_wr *swr,
-                                       struct ib_ud_header *udh,
-                                       int *roce_mode)
-{
-       bool has_vlan = false, has_grh_ipv6 = true;
-       struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
-       const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
-       union ib_gid sgid;
-       int send_size = 0;
-       u16 vlan_id = 0;
-       u16 ether_type;
-       struct ib_gid_attr sgid_attr;
-       int rc;
-       int ip_ver = 0;
-
-       bool has_udp = false;
-       int i;
-
-       send_size = 0;
-       for (i = 0; i < swr->num_sge; ++i)
-               send_size += swr->sg_list[i].length;
-
-       rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
-                              grh->sgid_index, &sgid, &sgid_attr);
-       if (rc) {
-               DP_ERR(dev,
-                      "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
-                      rdma_ah_get_port_num(ah_attr),
-                      grh->sgid_index);
-               return rc;
-       }
-
-       if (sgid_attr.ndev) {
-               vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
-               if (vlan_id < VLAN_CFI_MASK)
-                       has_vlan = true;
-
-               dev_put(sgid_attr.ndev);
-       }
-
-       if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
-               DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
-                      grh->sgid_index);
-               return -ENOENT;
-       }
-
-       has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
-       if (!has_udp) {
-               /* RoCE v1 */
-               ether_type = ETH_P_IBOE;
-               *roce_mode = ROCE_V1;
-       } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
-               /* RoCE v2 IPv4 */
-               ip_ver = 4;
-               ether_type = ETH_P_IP;
-               has_grh_ipv6 = false;
-               *roce_mode = ROCE_V2_IPV4;
-       } else {
-               /* RoCE v2 IPv6 */
-               ip_ver = 6;
-               ether_type = ETH_P_IPV6;
-               *roce_mode = ROCE_V2_IPV6;
-       }
-
-       rc = ib_ud_header_init(send_size, false, true, has_vlan,
-                              has_grh_ipv6, ip_ver, has_udp, 0, udh);
-       if (rc) {
-               DP_ERR(dev, "gsi post send: failed to init header\n");
-               return rc;
-       }
-
-       /* ENET + VLAN headers */
-       ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
-       ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
-       if (has_vlan) {
-               udh->eth.type = htons(ETH_P_8021Q);
-               udh->vlan.tag = htons(vlan_id);
-               udh->vlan.type = htons(ether_type);
-       } else {
-               udh->eth.type = htons(ether_type);
-       }
-
-       /* BTH */
-       udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
-       udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
-       udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
-       udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
-       udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
-
-       /* DETH */
-       udh->deth.qkey = htonl(0x80010000);
-       udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
-
-       if (has_grh_ipv6) {
-               /* GRH / IPv6 header */
-               udh->grh.traffic_class = grh->traffic_class;
-               udh->grh.flow_label = grh->flow_label;
-               udh->grh.hop_limit = grh->hop_limit;
-               udh->grh.destination_gid = grh->dgid;
-               memcpy(&udh->grh.source_gid.raw, &sgid.raw,
-                      sizeof(udh->grh.source_gid.raw));
-       } else {
-               /* IPv4 header */
-               u32 ipv4_addr;
-
-               udh->ip4.protocol = IPPROTO_UDP;
-               udh->ip4.tos = htonl(grh->flow_label);
-               udh->ip4.frag_off = htons(IP_DF);
-               udh->ip4.ttl = grh->hop_limit;
-
-               ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
-               udh->ip4.saddr = ipv4_addr;
-               ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
-               udh->ip4.daddr = ipv4_addr;
-               /* note: checksum is calculated by the device */
-       }
-
-       /* UDP */
-       if (has_udp) {
-               udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
-               udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
-               udh->udp.csum = 0;
-               /* UDP length is untouched hence is zero */
-       }
-       return 0;
-}
-
-static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
-                                       struct qedr_qp *qp,
-                                       struct ib_send_wr *swr,
-                                       struct qed_roce_ll2_packet **p_packet)
-{
-       u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
-       struct qed_roce_ll2_packet *packet;
-       struct pci_dev *pdev = dev->pdev;
-       int roce_mode, header_size;
-       struct ib_ud_header udh;
-       int i, rc;
-
-       *p_packet = NULL;
-
-       rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
-       if (rc)
-               return rc;
-
-       header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
-
-       packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
-       if (!packet)
-               return -ENOMEM;
-
-       packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
-                                                 &packet->header.baddr,
-                                                 GFP_ATOMIC);
-       if (!packet->header.vaddr) {
-               kfree(packet);
-               return -ENOMEM;
-       }
-
-       if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
-               packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
-       else
-               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
-
-       packet->roce_mode = roce_mode;
-       memcpy(packet->header.vaddr, ud_header_buffer, header_size);
-       packet->header.len = header_size;
-       packet->n_seg = swr->num_sge;
-       for (i = 0; i < packet->n_seg; i++) {
-               packet->payload[i].baddr = swr->sg_list[i].addr;
-               packet->payload[i].len = swr->sg_list[i].length;
-       }
-
-       *p_packet = packet;
-
-       return 0;
-}
-
-int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-                      struct ib_send_wr **bad_wr)
-{
-       struct qed_roce_ll2_packet *pkt = NULL;
-       struct qedr_qp *qp = get_qedr_qp(ibqp);
-       struct qedr_dev *dev = qp->dev;
-       unsigned long flags;
-       int rc;
-
-       if (qp->state != QED_ROCE_QP_STATE_RTS) {
-               *bad_wr = wr;
-               DP_ERR(dev,
-                      "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
-                      qp->state);
-               return -EINVAL;
-       }
-
-       if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
-               DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
-                      wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
-               rc = -EINVAL;
-               goto err;
-       }
-
-       if (wr->opcode != IB_WR_SEND) {
-               DP_ERR(dev,
-                      "gsi post send: failed due to unsupported opcode %d\n",
-                      wr->opcode);
-               rc = -EINVAL;
-               goto err;
-       }
-
-       spin_lock_irqsave(&qp->q_lock, flags);
-
-       rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
-       if (rc) {
-               spin_unlock_irqrestore(&qp->q_lock, flags);
-               goto err;
-       }
-
-       rc = qedr_ll2_post_tx(dev, pkt);
-
-       if (!rc) {
-               qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
-               qedr_inc_sw_prod(&qp->sq);
-               DP_DEBUG(qp->dev, QEDR_MSG_GSI,
-                        "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
-                        wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
-       } else {
-               DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
-               rc = -EAGAIN;
-               *bad_wr = wr;
-       }
-
-       spin_unlock_irqrestore(&qp->q_lock, flags);
-
-       if (wr->next) {
-               DP_ERR(dev,
-                      "gsi post send: failed second WR. Only one WR may be passed at a time\n");
-               *bad_wr = wr->next;
-               rc = -EINVAL;
-       }
-
-       return rc;
-
-err:
-       *bad_wr = wr;
-       return rc;
-}
-
-int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-                      struct ib_recv_wr **bad_wr)
-{
-       struct qedr_dev *dev = get_qedr_dev(ibqp->device);
-       struct qedr_qp *qp = get_qedr_qp(ibqp);
-       unsigned long flags;
-       int rc = 0;
-
-       if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
-           (qp->state != QED_ROCE_QP_STATE_RTS)) {
-               *bad_wr = wr;
-               DP_ERR(dev,
-                      "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
-                      qp->state);
-               return -EINVAL;
-       }
-
-       spin_lock_irqsave(&qp->q_lock, flags);
-
-       while (wr) {
-               if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
-                       DP_ERR(dev,
-                              "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
-                              wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
-                       goto err;
-               }
-
-               rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
-                                                 dev->gsi_ll2_handle,
-                                                 wr->sg_list[0].addr,
-                                                 wr->sg_list[0].length,
-                                                 0 /* cookie */,
-                                                 1 /* notify_fw */);
-               if (rc) {
-                       DP_ERR(dev,
-                              "gsi post recv: failed to post rx buffer (rc=%d)\n",
-                              rc);
-                       goto err;
-               }
-
-               memset(&qp->rqe_wr_id[qp->rq.prod], 0,
-                      sizeof(qp->rqe_wr_id[qp->rq.prod]));
-               qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
-               qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
-
-               qedr_inc_sw_prod(&qp->rq);
-
-               wr = wr->next;
-       }
-
-       spin_unlock_irqrestore(&qp->q_lock, flags);
-
-       return rc;
-err:
-       spin_unlock_irqrestore(&qp->q_lock, flags);
-       *bad_wr = wr;
-       return -ENOMEM;
-}
-
-int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
-       struct qedr_dev *dev = get_qedr_dev(ibcq->device);
-       struct qedr_cq *cq = get_qedr_cq(ibcq);
-       struct qedr_qp *qp = dev->gsi_qp;
-       unsigned long flags;
-       int i = 0;
-
-       spin_lock_irqsave(&cq->cq_lock, flags);
-
-       while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
-               memset(&wc[i], 0, sizeof(*wc));
-
-               wc[i].qp = &qp->ibqp;
-               wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
-               wc[i].opcode = IB_WC_RECV;
-               wc[i].pkey_index = 0;
-               wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
-                   IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
-               /* 0 - currently only one recv sg is supported */
-               wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
-               wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
-               ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
-               wc[i].wc_flags |= IB_WC_WITH_SMAC;
-               if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
-                       wc[i].wc_flags |= IB_WC_WITH_VLAN;
-                       wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
-               }
-
-               qedr_inc_sw_cons(&qp->rq);
-               i++;
-       }
-
-       while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
-               memset(&wc[i], 0, sizeof(*wc));
-
-               wc[i].qp = &qp->ibqp;
-               wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
-               wc[i].opcode = IB_WC_SEND;
-               wc[i].status = IB_WC_SUCCESS;
-
-               qedr_inc_sw_cons(&qp->sq);
-               i++;
-       }
-
-       spin_unlock_irqrestore(&cq->cq_lock, flags);
-
-       DP_DEBUG(dev, QEDR_MSG_GSI,
-                "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
-                num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
-                qp->sq.gsi_cons, qp->ibqp.qp_num);
-
-       return i;
-}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
deleted file mode 100644 (file)
index a559163..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef LINUX_QEDR_CM_H_
-#define LINUX_QEDR_CM_H_
-
-#define QEDR_GSI_MAX_RECV_WR   (4096)
-#define QEDR_GSI_MAX_SEND_WR   (4096)
-
-#define QEDR_GSI_MAX_RECV_SGE  (1)     /* LL2 FW limitation */
-
-#define QEDR_ROCE_V2_UDP_SPORT (0000)
-
-static inline u32 qedr_get_ipv4_from_gid(const u8 *gid)
-{
-       return *(u32 *)(void *)&gid[12];
-}
-
-/* RDMA CM */
-int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-                      struct ib_recv_wr **bad_wr);
-int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-                      struct ib_send_wr **bad_wr);
-struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
-                                struct ib_qp_init_attr *attrs,
-                                struct qedr_qp *qp);
-void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
-                         struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
-int qedr_destroy_gsi_qp(struct qedr_dev *dev);
-void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
-#endif
index 5c98d2055cadfb91e33e5102b900bb6043c615a4..b7587f10e7dea0679e73a19f23113a521b1d7a2f 100644 (file)
@@ -655,8 +655,10 @@ struct rdma_sq_rdma_wqe_1st {
 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1
 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
-#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x3
-#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       6
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK     0x1
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT    6
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       7
        u8 wqe_size;
        u8 prev_wqe_size;
 };
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
new file mode 100644 (file)
index 0000000..2950d3f
--- /dev/null
@@ -0,0 +1,749 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/addrconf.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+#include <net/flow.h>
+#include "qedr.h"
+#include "qedr_iw_cm.h"
+
+static inline void
+qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info,
+                   struct iw_cm_event *event)
+{
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+       laddr->sin_family = AF_INET;
+       raddr->sin_family = AF_INET;
+
+       laddr->sin_port = htons(cm_info->local_port);
+       raddr->sin_port = htons(cm_info->remote_port);
+
+       laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]);
+       raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]);
+}
+
+static inline void
+qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
+                   struct iw_cm_event *event)
+{
+       struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+       struct sockaddr_in6 *raddr6 =
+           (struct sockaddr_in6 *)&event->remote_addr;
+       int i;
+
+       laddr6->sin6_family = AF_INET6;
+       raddr6->sin6_family = AF_INET6;
+
+       laddr6->sin6_port = htons(cm_info->local_port);
+       raddr6->sin6_port = htons(cm_info->remote_port);
+
+       for (i = 0; i < 4; i++) {
+               laddr6->sin6_addr.in6_u.u6_addr32[i] =
+                   htonl(cm_info->local_ip[i]);
+               raddr6->sin6_addr.in6_u.u6_addr32[i] =
+                   htonl(cm_info->remote_ip[i]);
+       }
+}
+
+void
+qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
+{
+       struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context;
+       struct qedr_dev *dev = listener->dev;
+       struct iw_cm_event event;
+       struct qedr_iw_ep *ep;
+
+       ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+       if (!ep)
+               return;
+
+       ep->dev = dev;
+       ep->qed_context = params->ep_context;
+
+       memset(&event, 0, sizeof(event));
+       event.event = IW_CM_EVENT_CONNECT_REQUEST;
+       event.status = params->status;
+
+       if (!IS_ENABLED(CONFIG_IPV6) ||
+           params->cm_info->ip_version == QED_TCP_IPV4)
+               qedr_fill_sockaddr4(params->cm_info, &event);
+       else
+               qedr_fill_sockaddr6(params->cm_info, &event);
+
+       event.provider_data = (void *)ep;
+       event.private_data = (void *)params->cm_info->private_data;
+       event.private_data_len = (u8)params->cm_info->private_data_len;
+       event.ord = params->cm_info->ord;
+       event.ird = params->cm_info->ird;
+
+       listener->cm_id->event_handler(listener->cm_id, &event);
+}
+
+void
+qedr_iw_issue_event(void *context,
+                   struct qed_iwarp_cm_event_params *params,
+                   enum iw_cm_event_type event_type)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+       struct iw_cm_event event;
+
+       memset(&event, 0, sizeof(event));
+       event.status = params->status;
+       event.event = event_type;
+
+       if (params->cm_info) {
+               event.ird = params->cm_info->ird;
+               event.ord = params->cm_info->ord;
+               event.private_data_len = params->cm_info->private_data_len;
+               event.private_data = (void *)params->cm_info->private_data;
+       }
+
+       if (ep->cm_id)
+               ep->cm_id->event_handler(ep->cm_id, &event);
+}
+
+void
+qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+       if (ep->cm_id) {
+               qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
+
+               ep->cm_id->rem_ref(ep->cm_id);
+               ep->cm_id = NULL;
+       }
+}
+
+void
+qedr_iw_qp_event(void *context,
+                struct qed_iwarp_cm_event_params *params,
+                enum ib_event_type ib_event, char *str)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+       struct qedr_dev *dev = ep->dev;
+       struct ib_qp *ibqp = &ep->qp->ibqp;
+       struct ib_event event;
+
+       DP_NOTICE(dev, "QP error received: %s\n", str);
+
+       if (ibqp->event_handler) {
+               event.event = ib_event;
+               event.device = ibqp->device;
+               event.element.qp = ibqp;
+               ibqp->event_handler(&event, ibqp->qp_context);
+       }
+}
+
+struct qedr_discon_work {
+       struct work_struct              work;
+       struct qedr_iw_ep               *ep;
+       enum qed_iwarp_event_type       event;
+       int                             status;
+};
+
+static void qedr_iw_disconnect_worker(struct work_struct *work)
+{
+       struct qedr_discon_work *dwork =
+           container_of(work, struct qedr_discon_work, work);
+       struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+       struct qedr_iw_ep *ep = dwork->ep;
+       struct qedr_dev *dev = ep->dev;
+       struct qedr_qp *qp = ep->qp;
+       struct iw_cm_event event;
+
+       if (qp->destroyed) {
+               kfree(dwork);
+               qedr_iw_qp_rem_ref(&qp->ibqp);
+               return;
+       }
+
+       memset(&event, 0, sizeof(event));
+       event.status = dwork->status;
+       event.event = IW_CM_EVENT_DISCONNECT;
+
+       /* Success means graceful disconnect was requested. modifying
+        * to SQD is translated to graceful disconnect. O/w reset is sent
+        */
+       if (dwork->status)
+               qp_params.new_state = QED_ROCE_QP_STATE_ERR;
+       else
+               qp_params.new_state = QED_ROCE_QP_STATE_SQD;
+
+       kfree(dwork);
+
+       if (ep->cm_id)
+               ep->cm_id->event_handler(ep->cm_id, &event);
+
+       SET_FIELD(qp_params.modify_flags,
+                 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+
+       dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
+
+       qedr_iw_qp_rem_ref(&qp->ibqp);
+}
+
+void
+qedr_iw_disconnect_event(void *context,
+                        struct qed_iwarp_cm_event_params *params)
+{
+       struct qedr_discon_work *work;
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+       struct qedr_dev *dev = ep->dev;
+       struct qedr_qp *qp = ep->qp;
+
+       work = kzalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work)
+               return;
+
+       qedr_iw_qp_add_ref(&qp->ibqp);
+       work->ep = ep;
+       work->event = params->event;
+       work->status = params->status;
+
+       INIT_WORK(&work->work, qedr_iw_disconnect_worker);
+       queue_work(dev->iwarp_wq, &work->work);
+}
+
+static void
+qedr_iw_passive_complete(void *context,
+                        struct qed_iwarp_cm_event_params *params)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+       struct qedr_dev *dev = ep->dev;
+
+       /* We will only reach the following state if MPA_REJECT was called on
+        * passive. In this case there will be no associated QP.
+        */
+       if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
+               DP_DEBUG(dev, QEDR_MSG_IWARP,
+                        "PASSIVE connection refused releasing ep...\n");
+               kfree(ep);
+               return;
+       }
+
+       qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
+
+       if (params->status < 0)
+               qedr_iw_close_event(context, params);
+}
+
+int
+qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+       struct qedr_dev *dev = ep->dev;
+       struct qed_iwarp_send_rtr_in rtr_in;
+
+       rtr_in.ep_context = params->ep_context;
+
+       return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
+}
+
+int
+qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+       struct qedr_dev *dev = ep->dev;
+
+       switch (params->event) {
+       case QED_IWARP_EVENT_MPA_REQUEST:
+               qedr_iw_mpa_request(context, params);
+               break;
+       case QED_IWARP_EVENT_ACTIVE_MPA_REPLY:
+               qedr_iw_mpa_reply(context, params);
+               break;
+       case QED_IWARP_EVENT_PASSIVE_COMPLETE:
+               ep->during_connect = 0;
+               qedr_iw_passive_complete(context, params);
+               break;
+
+       case QED_IWARP_EVENT_ACTIVE_COMPLETE:
+               ep->during_connect = 0;
+               qedr_iw_issue_event(context,
+                                   params,
+                                   IW_CM_EVENT_CONNECT_REPLY);
+               if (params->status < 0) {
+                       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+                       ep->cm_id->rem_ref(ep->cm_id);
+                       ep->cm_id = NULL;
+               }
+               break;
+       case QED_IWARP_EVENT_DISCONNECT:
+               qedr_iw_disconnect_event(context, params);
+               break;
+       case QED_IWARP_EVENT_CLOSE:
+               ep->during_connect = 0;
+               qedr_iw_close_event(context, params);
+               break;
+       case QED_IWARP_EVENT_RQ_EMPTY:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+                                "QED_IWARP_EVENT_RQ_EMPTY");
+               break;
+       case QED_IWARP_EVENT_IRQ_FULL:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+                                "QED_IWARP_EVENT_IRQ_FULL");
+               break;
+       case QED_IWARP_EVENT_LLP_TIMEOUT:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+                                "QED_IWARP_EVENT_LLP_TIMEOUT");
+               break;
+       case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+                                "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR");
+               break;
+       case QED_IWARP_EVENT_CQ_OVERFLOW:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+                                "QED_IWARP_EVENT_CQ_OVERFLOW");
+               break;
+       case QED_IWARP_EVENT_QP_CATASTROPHIC:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+                                "QED_IWARP_EVENT_QP_CATASTROPHIC");
+               break;
+       case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+                                "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR");
+               break;
+       case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR:
+               qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+                                "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR");
+               break;
+       case QED_IWARP_EVENT_TERMINATE_RECEIVED:
+               DP_NOTICE(dev, "Got terminate message\n");
+               break;
+       default:
+               DP_NOTICE(dev, "Unknown event received %d\n", params->event);
+               break;
+       };
+       return 0;
+}
+
+static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr)
+{
+       struct net_device *ndev;
+       u16 vlan_id = 0;
+
+       ndev = ip_dev_find(&init_net, htonl(addr[0]));
+
+       if (ndev) {
+               vlan_id = rdma_vlan_dev_vlan_id(ndev);
+               dev_put(ndev);
+       }
+       if (vlan_id == 0xffff)
+               vlan_id = 0;
+       return vlan_id;
+}
+
+static u16 qedr_iw_get_vlan_ipv6(u32 *addr)
+{
+       struct net_device *ndev = NULL;
+       struct in6_addr laddr6;
+       u16 vlan_id = 0;
+       int i;
+
+       if (!IS_ENABLED(CONFIG_IPV6))
+               return vlan_id;
+
+       for (i = 0; i < 4; i++)
+               laddr6.in6_u.u6_addr32[i] = htonl(addr[i]);
+
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, ndev) {
+               if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) {
+                       vlan_id = rdma_vlan_dev_vlan_id(ndev);
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+       if (vlan_id == 0xffff)
+               vlan_id = 0;
+
+       return vlan_id;
+}
+
+static int
+qedr_addr4_resolve(struct qedr_dev *dev,
+                  struct sockaddr_in *src_in,
+                  struct sockaddr_in *dst_in, u8 *dst_mac)
+{
+       __be32 src_ip = src_in->sin_addr.s_addr;
+       __be32 dst_ip = dst_in->sin_addr.s_addr;
+       struct neighbour *neigh = NULL;
+       struct rtable *rt = NULL;
+       int rc = 0;
+
+       rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
+       if (IS_ERR(rt)) {
+               DP_ERR(dev, "ip_route_output returned error\n");
+               return -EINVAL;
+       }
+
+       neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
+
+       if (neigh) {
+               rcu_read_lock();
+               if (neigh->nud_state & NUD_VALID) {
+                       ether_addr_copy(dst_mac, neigh->ha);
+                       DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+               } else {
+                       neigh_event_send(neigh, NULL);
+               }
+               rcu_read_unlock();
+               neigh_release(neigh);
+       }
+
+       ip_rt_put(rt);
+
+       return rc;
+}
+
+static int
+qedr_addr6_resolve(struct qedr_dev *dev,
+                  struct sockaddr_in6 *src_in,
+                  struct sockaddr_in6 *dst_in, u8 *dst_mac)
+{
+       struct neighbour *neigh = NULL;
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+       int rc = 0;
+
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.daddr = dst_in->sin6_addr;
+       fl6.saddr = src_in->sin6_addr;
+
+       dst = ip6_route_output(&init_net, NULL, &fl6);
+
+       if ((!dst) || dst->error) {
+               if (dst) {
+                       dst_release(dst);
+                       DP_ERR(dev,
+                              "ip6_route_output returned dst->error = %d\n",
+                              dst->error);
+               }
+               return -EINVAL;
+       }
+       neigh = dst_neigh_lookup(dst, &dst_in);
+
+       if (neigh) {
+               rcu_read_lock();
+               if (neigh->nud_state & NUD_VALID) {
+                       ether_addr_copy(dst_mac, neigh->ha);
+                       DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+               } else {
+                       neigh_event_send(neigh, NULL);
+               }
+               rcu_read_unlock();
+               neigh_release(neigh);
+       }
+
+       dst_release(dst);
+
+       return rc;
+}
+
+int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+       struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+       struct qed_iwarp_connect_out out_params;
+       struct qed_iwarp_connect_in in_params;
+       struct qed_iwarp_cm_info *cm_info;
+       struct sockaddr_in6 *laddr6;
+       struct sockaddr_in6 *raddr6;
+       struct sockaddr_in *laddr;
+       struct sockaddr_in *raddr;
+       struct qedr_iw_ep *ep;
+       struct qedr_qp *qp;
+       int rc = 0;
+       int i;
+
+       qp = idr_find(&dev->qpidr, conn_param->qpn);
+
+       laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+       raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+
+       DP_DEBUG(dev, QEDR_MSG_IWARP,
+                "Connect source address: %pISpc, remote address: %pISpc\n",
+                &cm_id->local_addr, &cm_id->remote_addr);
+
+       if (!laddr->sin_port || !raddr->sin_port)
+               return -EINVAL;
+
+       ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+       if (!ep)
+               return -ENOMEM;
+
+       ep->dev = dev;
+       ep->qp = qp;
+       qp->ep = ep;
+       cm_id->add_ref(cm_id);
+       ep->cm_id = cm_id;
+
+       in_params.event_cb = qedr_iw_event_handler;
+       in_params.cb_context = ep;
+
+       cm_info = &in_params.cm_info;
+       memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip));
+       memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip));
+
+       if (!IS_ENABLED(CONFIG_IPV6) ||
+           cm_id->remote_addr.ss_family == AF_INET) {
+               cm_info->ip_version = QED_TCP_IPV4;
+
+               cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
+               cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr);
+               cm_info->remote_port = ntohs(raddr->sin_port);
+               cm_info->local_port = ntohs(laddr->sin_port);
+               cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip);
+
+               rc = qedr_addr4_resolve(dev, laddr, raddr,
+                                       (u8 *)in_params.remote_mac_addr);
+
+               in_params.mss = dev->iwarp_max_mtu -
+                   (sizeof(struct iphdr) + sizeof(struct tcphdr));
+
+       } else {
+               in_params.cm_info.ip_version = QED_TCP_IPV6;
+
+               for (i = 0; i < 4; i++) {
+                       cm_info->remote_ip[i] =
+                           ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]);
+                       cm_info->local_ip[i] =
+                           ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+               }
+
+               cm_info->local_port = ntohs(laddr6->sin6_port);
+               cm_info->remote_port = ntohs(raddr6->sin6_port);
+
+               in_params.mss = dev->iwarp_max_mtu -
+                   (sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
+
+               cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip);
+
+               rc = qedr_addr6_resolve(dev, laddr6, raddr6,
+                                       (u8 *)in_params.remote_mac_addr);
+       }
+       if (rc)
+               goto err;
+
+       DP_DEBUG(dev, QEDR_MSG_IWARP,
+                "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n",
+                conn_param->ord, conn_param->ird, conn_param->private_data,
+                conn_param->private_data_len, qp->rq_psn);
+
+       cm_info->ord = conn_param->ord;
+       cm_info->ird = conn_param->ird;
+       cm_info->private_data = conn_param->private_data;
+       cm_info->private_data_len = conn_param->private_data_len;
+       in_params.qp = qp->qed_qp;
+       memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+
+       ep->during_connect = 1;
+       rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
+       if (rc)
+               goto err;
+
+       return rc;
+
+err:
+       cm_id->rem_ref(cm_id);
+       kfree(ep);
+       return rc;
+}
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+       struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+       struct qedr_iw_listener *listener;
+       struct qed_iwarp_listen_in iparams;
+       struct qed_iwarp_listen_out oparams;
+       struct sockaddr_in *laddr;
+       struct sockaddr_in6 *laddr6;
+       int rc;
+       int i;
+
+       laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+
+       DP_DEBUG(dev, QEDR_MSG_IWARP,
+                "Create Listener address: %pISpc\n", &cm_id->local_addr);
+
+       listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+       if (!listener)
+               return -ENOMEM;
+
+       listener->dev = dev;
+       cm_id->add_ref(cm_id);
+       listener->cm_id = cm_id;
+       listener->backlog = backlog;
+
+       iparams.cb_context = listener;
+       iparams.event_cb = qedr_iw_event_handler;
+       iparams.max_backlog = backlog;
+
+       if (!IS_ENABLED(CONFIG_IPV6) ||
+           cm_id->local_addr.ss_family == AF_INET) {
+               iparams.ip_version = QED_TCP_IPV4;
+               memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr));
+
+               iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
+               iparams.port = ntohs(laddr->sin_port);
+               iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr);
+       } else {
+               iparams.ip_version = QED_TCP_IPV6;
+
+               for (i = 0; i < 4; i++) {
+                       iparams.ip_addr[i] =
+                           ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+               }
+
+               iparams.port = ntohs(laddr6->sin6_port);
+
+               iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr);
+       }
+       rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
+       if (rc)
+               goto err;
+
+       listener->qed_handle = oparams.handle;
+       cm_id->provider_data = listener;
+       return rc;
+
+err:
+       cm_id->rem_ref(cm_id);
+       kfree(listener);
+       return rc;
+}
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+       struct qedr_iw_listener *listener = cm_id->provider_data;
+       struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+       int rc = 0;
+
+       if (listener->qed_handle)
+               rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx,
+                                                   listener->qed_handle);
+
+       cm_id->rem_ref(cm_id);
+       return rc;
+}
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+       struct qedr_dev *dev = ep->dev;
+       struct qedr_qp *qp;
+       struct qed_iwarp_accept_in params;
+       int rc;
+
+       DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
+
+       qp = idr_find(&dev->qpidr, conn_param->qpn);
+       if (!qp) {
+               DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
+               return -EINVAL;
+       }
+
+       ep->qp = qp;
+       qp->ep = ep;
+       cm_id->add_ref(cm_id);
+       ep->cm_id = cm_id;
+
+       params.ep_context = ep->qed_context;
+       params.cb_context = ep;
+       params.qp = ep->qp->qed_qp;
+       params.private_data = conn_param->private_data;
+       params.private_data_len = conn_param->private_data_len;
+       params.ird = conn_param->ird;
+       params.ord = conn_param->ord;
+
+       ep->during_connect = 1;
+       rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
+       if (rc)
+               goto err;
+
+       return rc;
+err:
+       ep->during_connect = 0;
+       cm_id->rem_ref(cm_id);
+       return rc;
+}
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+       struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+       struct qedr_dev *dev = ep->dev;
+       struct qed_iwarp_reject_in params;
+
+       params.ep_context = ep->qed_context;
+       params.cb_context = ep;
+       params.private_data = pdata;
+       params.private_data_len = pdata_len;
+       ep->qp = NULL;
+
+       return dev->ops->iwarp_reject(dev->rdma_ctx, &params);
+}
+
+void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
+{
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+       atomic_inc(&qp->refcnt);
+}
+
+void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
+{
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+       if (atomic_dec_and_test(&qp->refcnt)) {
+               spin_lock_irq(&qp->dev->idr_lock);
+               idr_remove(&qp->dev->qpidr, qp->qp_id);
+               spin_unlock_irq(&qp->dev->idr_lock);
+               kfree(qp);
+       }
+}
+
+struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+       return idr_find(&dev->qpidr, qpn);
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.h b/drivers/infiniband/hw/qedr/qedr_iw_cm.h
new file mode 100644 (file)
index 0000000..08f4b10
--- /dev/null
@@ -0,0 +1,49 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <rdma/iw_cm.h>
+
+int qedr_iw_connect(struct iw_cm_id *cm_id,
+                   struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+
+void qedr_iw_qp_add_ref(struct ib_qp *qp);
+
+void qedr_iw_qp_rem_ref(struct ib_qp *qp);
+
+struct ib_qp *qedr_iw_get_qp(struct ib_device *dev, int qpn);
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
new file mode 100644 (file)
index 0000000..c3c249b
--- /dev/null
@@ -0,0 +1,744 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_rdma_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_roce_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+       info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+                         struct ib_qp_init_attr *attrs)
+{
+       dev->gsi_qp_created = 1;
+       dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+       dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+       dev->gsi_qp = qp;
+}
+
+void qedr_ll2_complete_tx_packet(void *cxt,
+                                u8 connection_handle,
+                                void *cookie,
+                                dma_addr_t first_frag_addr,
+                                bool b_last_fragment, bool b_last_packet)
+{
+       struct qedr_dev *dev = (struct qedr_dev *)cxt;
+       struct qed_roce_ll2_packet *pkt = cookie;
+       struct qedr_cq *cq = dev->gsi_sqcq;
+       struct qedr_qp *qp = dev->gsi_qp;
+       unsigned long flags;
+
+       DP_DEBUG(dev, QEDR_MSG_GSI,
+                "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+                dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+                cq->ibcq.comp_handler ? "Yes" : "No");
+
+       dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+                         pkt->header.baddr);
+       kfree(pkt);
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+       qedr_inc_sw_gsi_cons(&qp->sq);
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       if (cq->ibcq.comp_handler)
+               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+}
+
+void qedr_ll2_complete_rx_packet(void *cxt,
+                                struct qed_ll2_comp_rx_data *data)
+{
+       struct qedr_dev *dev = (struct qedr_dev *)cxt;
+       struct qedr_cq *cq = dev->gsi_rqcq;
+       struct qedr_qp *qp = dev->gsi_qp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
+               -EINVAL : 0;
+       qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+       /* note: length stands for data length i.e. GRH is excluded */
+       qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
+               data->length.data_length;
+       *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
+               ntohl(data->opaque_data_0);
+       *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
+               ntohs((u16)data->opaque_data_1);
+
+       qedr_inc_sw_gsi_cons(&qp->rq);
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       if (cq->ibcq.comp_handler)
+               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+}
+
+void qedr_ll2_release_rx_packet(void *cxt,
+                               u8 connection_handle,
+                               void *cookie,
+                               dma_addr_t rx_buf_addr, bool b_last_packet)
+{
+       /* Do nothing... */
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+                               struct ib_qp_init_attr *attrs)
+{
+       struct qed_rdma_destroy_cq_in_params iparams;
+       struct qed_rdma_destroy_cq_out_params oparams;
+       struct qedr_cq *cq;
+
+       cq = get_qedr_cq(attrs->send_cq);
+       iparams.icid = cq->icid;
+       dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+       dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+       cq = get_qedr_cq(attrs->recv_cq);
+       /* if a dedicated recv_cq was used, delete it too */
+       if (iparams.icid != cq->icid) {
+               iparams.icid = cq->icid;
+               dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+               dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+       }
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+                                         struct ib_qp_init_attr *attrs)
+{
+       if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+               DP_ERR(dev,
+                      " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+                      attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+               DP_ERR(dev,
+                      " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+                      attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+               return -EINVAL;
+       }
+
+       if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+               DP_ERR(dev,
+                      " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+                      attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int qedr_ll2_post_tx(struct qedr_dev *dev,
+                           struct qed_roce_ll2_packet *pkt)
+{
+       enum qed_ll2_roce_flavor_type roce_flavor;
+       struct qed_ll2_tx_pkt_info ll2_tx_pkt;
+       int rc;
+       int i;
+
+       memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
+
+       roce_flavor = (pkt->roce_mode == ROCE_V1) ?
+           QED_LL2_ROCE : QED_LL2_RROCE;
+
+       if (pkt->roce_mode == ROCE_V2_IPV4)
+               ll2_tx_pkt.enable_ip_cksum = 1;
+
+       ll2_tx_pkt.num_of_bds = 1 /* hdr */  + pkt->n_seg;
+       ll2_tx_pkt.vlan = 0;
+       ll2_tx_pkt.tx_dest = pkt->tx_dest;
+       ll2_tx_pkt.qed_roce_flavor = roce_flavor;
+       ll2_tx_pkt.first_frag = pkt->header.baddr;
+       ll2_tx_pkt.first_frag_len = pkt->header.len;
+       ll2_tx_pkt.cookie = pkt;
+
+       /* tx header */
+       rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
+                                            dev->gsi_ll2_handle,
+                                            &ll2_tx_pkt, 1);
+       if (rc) {
+               /* TX failed while posting header - release resources */
+               dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+                                 pkt->header.vaddr, pkt->header.baddr);
+               kfree(pkt);
+
+               DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
+               return rc;
+       }
+
+       /* tx payload */
+       for (i = 0; i < pkt->n_seg; i++) {
+               rc = dev->ops->ll2_set_fragment_of_tx_packet(
+                       dev->rdma_ctx,
+                       dev->gsi_ll2_handle,
+                       pkt->payload[i].baddr,
+                       pkt->payload[i].len);
+
+               if (rc) {
+                       /* if failed not much to do here, partial packet has
+                        * been posted we can't free memory, will need to wait
+                        * for completion
+                        */
+                       DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+int qedr_ll2_stop(struct qedr_dev *dev)
+{
+       int rc;
+
+       if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
+               return 0;
+
+       /* remove LL2 MAC address filter */
+       rc = dev->ops->ll2_set_mac_filter(dev->cdev,
+                                         dev->gsi_ll2_mac_address, NULL);
+
+       rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
+                                               dev->gsi_ll2_handle);
+       if (rc)
+               DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
+
+       dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+       dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
+
+       return rc;
+}
+
+int qedr_ll2_start(struct qedr_dev *dev,
+                  struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
+{
+       struct qed_ll2_acquire_data data;
+       struct qed_ll2_cbs cbs;
+       int rc;
+
+       /* configure and start LL2 */
+       cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
+       cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
+       cbs.rx_release_cb = qedr_ll2_release_rx_packet;
+       cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
+       cbs.cookie = dev;
+
+       memset(&data, 0, sizeof(data));
+       data.input.conn_type = QED_LL2_TYPE_ROCE;
+       data.input.mtu = dev->ndev->mtu;
+       data.input.rx_num_desc = attrs->cap.max_recv_wr;
+       data.input.rx_drop_ttl0_flg = true;
+       data.input.rx_vlan_removal_en = false;
+       data.input.tx_num_desc = attrs->cap.max_send_wr;
+       data.input.tx_tc = 0;
+       data.input.tx_dest = QED_LL2_TX_DEST_NW;
+       data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
+       data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
+       data.input.gsi_enable = 1;
+       data.p_connection_handle = &dev->gsi_ll2_handle;
+       data.cbs = &cbs;
+
+       rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
+       if (rc) {
+               DP_ERR(dev,
+                      "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
+                      rc);
+               return rc;
+       }
+
+       rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
+                                               dev->gsi_ll2_handle);
+       if (rc) {
+               DP_ERR(dev,
+                      "ll2 start: failed to establish LL2 connection (rc=%d)\n",
+                      rc);
+               goto err1;
+       }
+
+       rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
+       if (rc)
+               goto err2;
+
+       return 0;
+
+err2:
+       dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+err1:
+       dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
+
+       return rc;
+}
+
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+                                struct ib_qp_init_attr *attrs,
+                                struct qedr_qp *qp)
+{
+       int rc;
+
+       rc = qedr_check_gsi_qp_attrs(dev, attrs);
+       if (rc)
+               return ERR_PTR(rc);
+
+       rc = qedr_ll2_start(dev, attrs, qp);
+       if (rc) {
+               DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+               return ERR_PTR(rc);
+       }
+
+       /* create QP */
+       qp->ibqp.qp_num = 1;
+       qp->rq.max_wr = attrs->cap.max_recv_wr;
+       qp->sq.max_wr = attrs->cap.max_send_wr;
+
+       qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+                               GFP_KERNEL);
+       if (!qp->rqe_wr_id)
+               goto err;
+       qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+                               GFP_KERNEL);
+       if (!qp->wqe_wr_id)
+               goto err;
+
+       qedr_store_gsi_qp_cq(dev, qp, attrs);
+       ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+       /* the GSI CQ is handled by the driver so remove it from the FW */
+       qedr_destroy_gsi_cq(dev, attrs);
+       dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+       dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+       DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+       return &qp->ibqp;
+
+err:
+       kfree(qp->rqe_wr_id);
+
+       rc = qedr_ll2_stop(dev);
+       if (rc)
+               DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+       return ERR_PTR(-ENOMEM);
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+       return qedr_ll2_stop(dev);
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE        (100)
+#define QEDR_GSI_QPN           (1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+                                       struct qedr_qp *qp,
+                                       struct ib_send_wr *swr,
+                                       struct ib_ud_header *udh,
+                                       int *roce_mode)
+{
+       bool has_vlan = false, has_grh_ipv6 = true;
+       struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+       const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+       union ib_gid sgid;
+       int send_size = 0;
+       u16 vlan_id = 0;
+       u16 ether_type;
+       struct ib_gid_attr sgid_attr;
+       int rc;
+       int ip_ver = 0;
+
+       bool has_udp = false;
+       int i;
+
+       send_size = 0;
+       for (i = 0; i < swr->num_sge; ++i)
+               send_size += swr->sg_list[i].length;
+
+       rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
+                              grh->sgid_index, &sgid, &sgid_attr);
+       if (rc) {
+               DP_ERR(dev,
+                      "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
+                      rdma_ah_get_port_num(ah_attr),
+                      grh->sgid_index);
+               return rc;
+       }
+
+       if (sgid_attr.ndev) {
+               vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+               if (vlan_id < VLAN_CFI_MASK)
+                       has_vlan = true;
+
+               dev_put(sgid_attr.ndev);
+       }
+
+       if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
+               DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+                      grh->sgid_index);
+               return -ENOENT;
+       }
+
+       has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+       if (!has_udp) {
+               /* RoCE v1 */
+               ether_type = ETH_P_IBOE;
+               *roce_mode = ROCE_V1;
+       } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+               /* RoCE v2 IPv4 */
+               ip_ver = 4;
+               ether_type = ETH_P_IP;
+               has_grh_ipv6 = false;
+               *roce_mode = ROCE_V2_IPV4;
+       } else {
+               /* RoCE v2 IPv6 */
+               ip_ver = 6;
+               ether_type = ETH_P_IPV6;
+               *roce_mode = ROCE_V2_IPV6;
+       }
+
+       rc = ib_ud_header_init(send_size, false, true, has_vlan,
+                              has_grh_ipv6, ip_ver, has_udp, 0, udh);
+       if (rc) {
+               DP_ERR(dev, "gsi post send: failed to init header\n");
+               return rc;
+       }
+
+       /* ENET + VLAN headers */
+       ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
+       ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+       if (has_vlan) {
+               udh->eth.type = htons(ETH_P_8021Q);
+               udh->vlan.tag = htons(vlan_id);
+               udh->vlan.type = htons(ether_type);
+       } else {
+               udh->eth.type = htons(ether_type);
+       }
+
+       /* BTH */
+       udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+       udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+       udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+       udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+       udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+       /* DETH */
+       udh->deth.qkey = htonl(0x80010000);
+       udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+       if (has_grh_ipv6) {
+               /* GRH / IPv6 header */
+               udh->grh.traffic_class = grh->traffic_class;
+               udh->grh.flow_label = grh->flow_label;
+               udh->grh.hop_limit = grh->hop_limit;
+               udh->grh.destination_gid = grh->dgid;
+               memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+                      sizeof(udh->grh.source_gid.raw));
+       } else {
+               /* IPv4 header */
+               u32 ipv4_addr;
+
+               udh->ip4.protocol = IPPROTO_UDP;
+               udh->ip4.tos = htonl(grh->flow_label);
+               udh->ip4.frag_off = htons(IP_DF);
+               udh->ip4.ttl = grh->hop_limit;
+
+               ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+               udh->ip4.saddr = ipv4_addr;
+               ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
+               udh->ip4.daddr = ipv4_addr;
+               /* note: checksum is calculated by the device */
+       }
+
+       /* UDP */
+       if (has_udp) {
+               udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+               udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+               udh->udp.csum = 0;
+               /* UDP length is untouched hence is zero */
+       }
+       return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+                                       struct qedr_qp *qp,
+                                       struct ib_send_wr *swr,
+                                       struct qed_roce_ll2_packet **p_packet)
+{
+       u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+       struct qed_roce_ll2_packet *packet;
+       struct pci_dev *pdev = dev->pdev;
+       int roce_mode, header_size;
+       struct ib_ud_header udh;
+       int i, rc;
+
+       *p_packet = NULL;
+
+       rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+       if (rc)
+               return rc;
+
+       header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+       packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+       if (!packet)
+               return -ENOMEM;
+
+       packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+                                                 &packet->header.baddr,
+                                                 GFP_ATOMIC);
+       if (!packet->header.vaddr) {
+               kfree(packet);
+               return -ENOMEM;
+       }
+
+       if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+       else
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+
+       packet->roce_mode = roce_mode;
+       memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+       packet->header.len = header_size;
+       packet->n_seg = swr->num_sge;
+       for (i = 0; i < packet->n_seg; i++) {
+               packet->payload[i].baddr = swr->sg_list[i].addr;
+               packet->payload[i].len = swr->sg_list[i].length;
+       }
+
+       *p_packet = packet;
+
+       return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                      struct ib_send_wr **bad_wr)
+{
+       struct qed_roce_ll2_packet *pkt = NULL;
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       struct qedr_dev *dev = qp->dev;
+       unsigned long flags;
+       int rc;
+
+       if (qp->state != QED_ROCE_QP_STATE_RTS) {
+               *bad_wr = wr;
+               DP_ERR(dev,
+                      "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+                      qp->state);
+               return -EINVAL;
+       }
+
+       if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+               DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+                      wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+               rc = -EINVAL;
+               goto err;
+       }
+
+       if (wr->opcode != IB_WR_SEND) {
+               DP_ERR(dev,
+                      "gsi post send: failed due to unsupported opcode %d\n",
+                      wr->opcode);
+               rc = -EINVAL;
+               goto err;
+       }
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+       if (rc) {
+               spin_unlock_irqrestore(&qp->q_lock, flags);
+               goto err;
+       }
+
+       rc = qedr_ll2_post_tx(dev, pkt);
+
+       if (!rc) {
+               qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+               qedr_inc_sw_prod(&qp->sq);
+               DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+                        "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
+                        wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+       } else {
+               DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+               rc = -EAGAIN;
+               *bad_wr = wr;
+       }
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       if (wr->next) {
+               DP_ERR(dev,
+                      "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+               *bad_wr = wr->next;
+               rc = -EINVAL;
+       }
+
+       return rc;
+
+err:
+       *bad_wr = wr;
+       return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                      struct ib_recv_wr **bad_wr)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+       struct qedr_qp *qp = get_qedr_qp(ibqp);
+       unsigned long flags;
+       int rc = 0;
+
+       if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+           (qp->state != QED_ROCE_QP_STATE_RTS)) {
+               *bad_wr = wr;
+               DP_ERR(dev,
+                      "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+                      qp->state);
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&qp->q_lock, flags);
+
+       while (wr) {
+               if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+                       DP_ERR(dev,
+                              "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+                              wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+                       goto err;
+               }
+
+               rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
+                                                 dev->gsi_ll2_handle,
+                                                 wr->sg_list[0].addr,
+                                                 wr->sg_list[0].length,
+                                                 0 /* cookie */,
+                                                 1 /* notify_fw */);
+               if (rc) {
+                       DP_ERR(dev,
+                              "gsi post recv: failed to post rx buffer (rc=%d)\n",
+                              rc);
+                       goto err;
+               }
+
+               memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+                      sizeof(qp->rqe_wr_id[qp->rq.prod]));
+               qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+               qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+               qedr_inc_sw_prod(&qp->rq);
+
+               wr = wr->next;
+       }
+
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+
+       return rc;
+err:
+       spin_unlock_irqrestore(&qp->q_lock, flags);
+       *bad_wr = wr;
+       return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+       struct qedr_cq *cq = get_qedr_cq(ibcq);
+       struct qedr_qp *qp = dev->gsi_qp;
+       unsigned long flags;
+       int i = 0;
+
+       spin_lock_irqsave(&cq->cq_lock, flags);
+
+       while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+               memset(&wc[i], 0, sizeof(*wc));
+
+               wc[i].qp = &qp->ibqp;
+               wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+               wc[i].opcode = IB_WC_RECV;
+               wc[i].pkey_index = 0;
+               wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+                   IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+               /* 0 - currently only one recv sg is supported */
+               wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+               wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+               ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+               wc[i].wc_flags |= IB_WC_WITH_SMAC;
+               if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+                       wc[i].wc_flags |= IB_WC_WITH_VLAN;
+                       wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+               }
+
+               qedr_inc_sw_cons(&qp->rq);
+               i++;
+       }
+
+       while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+               memset(&wc[i], 0, sizeof(*wc));
+
+               wc[i].qp = &qp->ibqp;
+               wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+               wc[i].opcode = IB_WC_SEND;
+               wc[i].status = IB_WC_SUCCESS;
+
+               qedr_inc_sw_cons(&qp->sq);
+               i++;
+       }
+
+       spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+       DP_DEBUG(dev, QEDR_MSG_GSI,
+                "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+                num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+                qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+       return i;
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.h b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
new file mode 100644 (file)
index 0000000..a559163
--- /dev/null
@@ -0,0 +1,60 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
+
+#define QEDR_GSI_MAX_RECV_WR   (4096)
+#define QEDR_GSI_MAX_SEND_WR   (4096)
+
+#define QEDR_GSI_MAX_RECV_SGE  (1)     /* LL2 FW limitation */
+
+#define QEDR_ROCE_V2_UDP_SPORT (0000)
+
+static inline u32 qedr_get_ipv4_from_gid(const u8 *gid)
+{
+       return *(u32 *)(void *)&gid[12];
+}
+
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                      struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                      struct ib_send_wr **bad_wr);
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+                                struct ib_qp_init_attr *attrs,
+                                struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+                         struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
+#endif
index 769ac07c3c8eb72d3bd2bf95446236f9c07dc34b..a8a6e66767d5c7be42e2315509b56f1a58a88ecc 100644 (file)
@@ -49,7 +49,7 @@
 #include "qedr.h"
 #include "verbs.h"
 #include <rdma/qedr-abi.h>
-#include "qedr_cm.h"
+#include "qedr_roce_cm.h"
 
 #define DB_ADDR_SHIFT(addr)            ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
 
@@ -70,6 +70,20 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
        return 0;
 }
 
+int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+                     int index, union ib_gid *sgid)
+{
+       struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+       memset(sgid->raw, 0, sizeof(sgid->raw));
+       ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
+
+       DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
+                sgid->global.interface_id, sgid->global.subnet_prefix);
+
+       return 0;
+}
+
 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
                   union ib_gid *sgid)
 {
@@ -263,8 +277,13 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
        attr->sm_lid = 0;
        attr->sm_sl = 0;
        attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
-       attr->gid_tbl_len = QEDR_MAX_SGID;
-       attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+       if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+               attr->gid_tbl_len = 1;
+               attr->pkey_tbl_len = 1;
+       } else {
+               attr->gid_tbl_len = QEDR_MAX_SGID;
+               attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+       }
        attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
        attr->qkey_viol_cntr = 0;
        get_link_speed_and_width(rdma_port->link_speed,
@@ -770,7 +789,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
                                       struct qedr_dev *dev,
                                       struct qedr_userq *q,
                                       u64 buf_addr, size_t buf_len,
-                                      int access, int dmasync)
+                                      int access, int dmasync,
+                                      int alloc_and_init)
 {
        u32 fw_pages;
        int rc;
@@ -791,19 +811,27 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
        if (rc)
                goto err0;
 
-       q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
-       if (IS_ERR(q->pbl_tbl)) {
-               rc = PTR_ERR(q->pbl_tbl);
-               goto err0;
-       }
-
+       if (alloc_and_init) {
+               q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+               if (IS_ERR(q->pbl_tbl)) {
+                       rc = PTR_ERR(q->pbl_tbl);
+                       goto err0;
+               }
                qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
                                   FW_PAGE_SHIFT);
+       } else {
+               q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
+               if (!q->pbl_tbl) {
+                       rc = -ENOMEM;
+                       goto err0;
+               }
+       }
 
        return 0;
 
 err0:
        ib_umem_release(q->umem);
+       q->umem = NULL;
 
        return rc;
 }
@@ -929,7 +957,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
                cq->cq_type = QEDR_CQ_TYPE_USER;
 
                rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
-                                         ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+                                         ureq.len, IB_ACCESS_LOCAL_WRITE,
+                                         1, 1);
                if (rc)
                        goto err0;
 
@@ -1222,18 +1251,34 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
        return 0;
 }
 
-static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+static void qedr_copy_rq_uresp(struct qedr_dev *dev,
+                              struct qedr_create_qp_uresp *uresp,
                               struct qedr_qp *qp)
 {
-       uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+       /* iWARP requires two doorbells per RQ. */
+       if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+               uresp->rq_db_offset =
+                   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+               uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+       } else {
+               uresp->rq_db_offset =
+                   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+       }
+
        uresp->rq_icid = qp->icid;
 }
 
-static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+static void qedr_copy_sq_uresp(struct qedr_dev *dev,
+                              struct qedr_create_qp_uresp *uresp,
                               struct qedr_qp *qp)
 {
        uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
-       uresp->sq_icid = qp->icid + 1;
+
+       /* iWARP uses the same cid for rq and sq */
+       if (rdma_protocol_iwarp(&dev->ibdev, 1))
+               uresp->sq_icid = qp->icid;
+       else
+               uresp->sq_icid = qp->icid + 1;
 }
 
 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
@@ -1243,8 +1288,8 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
        int rc;
 
        memset(&uresp, 0, sizeof(uresp));
-       qedr_copy_sq_uresp(&uresp, qp);
-       qedr_copy_rq_uresp(&uresp, qp);
+       qedr_copy_sq_uresp(dev, &uresp, qp);
+       qedr_copy_rq_uresp(dev, &uresp, qp);
 
        uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
        uresp.qp_id = qp->qp_id;
@@ -1264,6 +1309,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
                                      struct ib_qp_init_attr *attrs)
 {
        spin_lock_init(&qp->q_lock);
+       atomic_set(&qp->refcnt, 1);
        qp->pd = pd;
        qp->qp_type = attrs->qp_type;
        qp->max_inline_data = attrs->cap.max_inline_data;
@@ -1334,6 +1380,52 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
                 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
 }
 
+static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
+{
+       int rc;
+
+       if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+               return 0;
+
+       idr_preload(GFP_KERNEL);
+       spin_lock_irq(&dev->idr_lock);
+
+       rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
+
+       spin_unlock_irq(&dev->idr_lock);
+       idr_preload_end();
+
+       return rc < 0 ? rc : 0;
+}
+
+static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
+{
+       if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+               return;
+
+       spin_lock_irq(&dev->idr_lock);
+       idr_remove(&dev->qpidr, id);
+       spin_unlock_irq(&dev->idr_lock);
+}
+
+static inline void
+qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
+                           struct qedr_qp *qp,
+                           struct qed_rdma_create_qp_out_params *out_params)
+{
+       qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
+       qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
+
+       qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
+                          &qp->usq.pbl_info, FW_PAGE_SHIFT);
+
+       qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+       qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+
+       qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
+                          &qp->urq.pbl_info, FW_PAGE_SHIFT);
+}
+
 static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
 {
        if (qp->usq.umem)
@@ -1357,6 +1449,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
        struct ib_ucontext *ib_ctx = NULL;
        struct qedr_ucontext *ctx = NULL;
        struct qedr_create_qp_ureq ureq;
+       int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
        int rc = -EINVAL;
 
        ib_ctx = ibpd->uobject->context;
@@ -1371,14 +1464,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
 
        /* SQ - read access only (0), dma sync not required (0) */
        rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
-                                 ureq.sq_len, 0, 0);
+                                 ureq.sq_len, 0, 0, alloc_and_init);
        if (rc)
                return rc;
 
        /* RQ - read access only (0), dma sync not required (0) */
        rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
-                                 ureq.rq_len, 0, 0);
-
+                                 ureq.rq_len, 0, 0, alloc_and_init);
        if (rc)
                return rc;
 
@@ -1399,6 +1491,9 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
                goto err1;
        }
 
+       if (rdma_protocol_iwarp(&dev->ibdev, 1))
+               qedr_iwarp_populate_user_qp(dev, qp, &out_params);
+
        qp->qp_id = out_params.qp_id;
        qp->icid = out_params.icid;
 
@@ -1419,6 +1514,21 @@ err1:
        return rc;
 }
 
+static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+       qp->sq.db = dev->db_addr +
+           DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+       qp->sq.db_data.data.icid = qp->icid;
+
+       qp->rq.db = dev->db_addr +
+                   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+       qp->rq.db_data.data.icid = qp->icid;
+       qp->rq.iwarp_db2 = dev->db_addr +
+                          DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+       qp->rq.iwarp_db2_data.data.icid = qp->icid;
+       qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+}
+
 static int
 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
                           struct qedr_qp *qp,
@@ -1465,8 +1575,71 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
        qp->icid = out_params.icid;
 
        qedr_set_roce_db_info(dev, qp);
+       return rc;
+}
 
-       return 0;
+static int
+qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
+                           struct qedr_qp *qp,
+                           struct qed_rdma_create_qp_in_params *in_params,
+                           u32 n_sq_elems, u32 n_rq_elems)
+{
+       struct qed_rdma_create_qp_out_params out_params;
+       struct qed_chain_ext_pbl ext_pbl;
+       int rc;
+
+       in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
+                                                    QEDR_SQE_ELEMENT_SIZE,
+                                                    QED_CHAIN_MODE_PBL);
+       in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
+                                                    QEDR_RQE_ELEMENT_SIZE,
+                                                    QED_CHAIN_MODE_PBL);
+
+       qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+                                             in_params, &out_params);
+
+       if (!qp->qed_qp)
+               return -EINVAL;
+
+       /* Now we allocate the chain */
+       ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
+       ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
+
+       rc = dev->ops->common->chain_alloc(dev->cdev,
+                                          QED_CHAIN_USE_TO_PRODUCE,
+                                          QED_CHAIN_MODE_PBL,
+                                          QED_CHAIN_CNT_TYPE_U32,
+                                          n_sq_elems,
+                                          QEDR_SQE_ELEMENT_SIZE,
+                                          &qp->sq.pbl, &ext_pbl);
+
+       if (rc)
+               goto err;
+
+       ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
+       ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
+
+       rc = dev->ops->common->chain_alloc(dev->cdev,
+                                          QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                          QED_CHAIN_MODE_PBL,
+                                          QED_CHAIN_CNT_TYPE_U32,
+                                          n_rq_elems,
+                                          QEDR_RQE_ELEMENT_SIZE,
+                                          &qp->rq.pbl, &ext_pbl);
+
+       if (rc)
+               goto err;
+
+       qp->qp_id = out_params.qp_id;
+       qp->icid = out_params.icid;
+
+       qedr_set_iwarp_db_info(dev, qp);
+       return rc;
+
+err:
+       dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+
+       return rc;
 }
 
 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
@@ -1541,8 +1714,12 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
 
        n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
 
-       rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
-                                       n_sq_elems, n_rq_elems);
+       if (rdma_protocol_iwarp(&dev->ibdev, 1))
+               rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
+                                                n_sq_elems, n_rq_elems);
+       else
+               rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
+                                               n_sq_elems, n_rq_elems);
        if (rc)
                qedr_cleanup_kernel(dev, qp);
 
@@ -1602,6 +1779,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
 
        qp->ibqp.qp_num = qp->qp_id;
 
+       rc = qedr_idr_add(dev, qp, qp->qp_id);
+       if (rc)
+               goto err;
+
        return &qp->ibqp;
 
 err:
@@ -1689,10 +1870,13 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
                        /* Update doorbell (in case post_recv was
                         * done before move to RTR)
                         */
-                       wmb();
-                       writel(qp->rq.db_data.raw, qp->rq.db);
-                       /* Make sure write takes effect */
-                       mmiowb();
+
+                       if (rdma_protocol_roce(&dev->ibdev, 1)) {
+                               wmb();
+                               writel(qp->rq.db_data.raw, qp->rq.db);
+                               /* Make sure write takes effect */
+                               mmiowb();
+                       }
                        break;
                case QED_ROCE_QP_STATE_ERR:
                        break;
@@ -1786,16 +1970,18 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        else
                new_qp_state = old_qp_state;
 
-       if (!ib_modify_qp_is_ok
-           (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
-            IB_LINK_LAYER_ETHERNET)) {
-               DP_ERR(dev,
-                      "modify qp: invalid attribute mask=0x%x specified for\n"
-                      "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
-                      attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
-                      new_qp_state);
-               rc = -EINVAL;
-               goto err;
+       if (rdma_protocol_roce(&dev->ibdev, 1)) {
+               if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
+                                       ibqp->qp_type, attr_mask,
+                                       IB_LINK_LAYER_ETHERNET)) {
+                       DP_ERR(dev,
+                              "modify qp: invalid attribute mask=0x%x specified for\n"
+                              "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+                              attr_mask, qp->qp_id, ibqp->qp_type,
+                              old_qp_state, new_qp_state);
+                       rc = -EINVAL;
+                       goto err;
+               }
        }
 
        /* Translate the masks... */
@@ -2111,15 +2297,34 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
        DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
                 qp, qp->qp_type);
 
-       if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
-           (qp->state != QED_ROCE_QP_STATE_ERR) &&
-           (qp->state != QED_ROCE_QP_STATE_INIT)) {
+       if (rdma_protocol_roce(&dev->ibdev, 1)) {
+               if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+                   (qp->state != QED_ROCE_QP_STATE_ERR) &&
+                   (qp->state != QED_ROCE_QP_STATE_INIT)) {
 
-               attr.qp_state = IB_QPS_ERR;
-               attr_mask |= IB_QP_STATE;
+                       attr.qp_state = IB_QPS_ERR;
+                       attr_mask |= IB_QP_STATE;
 
-               /* Change the QP state to ERROR */
-               qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+                       /* Change the QP state to ERROR */
+                       qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+               }
+       } else {
+               /* Wait for the connect/accept to complete */
+               if (qp->ep) {
+                       int wait_count = 1;
+
+                       while (qp->ep->during_connect) {
+                               DP_DEBUG(dev, QEDR_MSG_QP,
+                                        "Still in during connect/accept\n");
+
+                               msleep(100);
+                               if (wait_count++ > 200) {
+                                       DP_NOTICE(dev,
+                                                 "during connect timeout\n");
+                                       break;
+                               }
+                       }
+               }
        }
 
        if (qp->qp_type == IB_QPT_GSI)
@@ -2127,8 +2332,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
 
        qedr_free_qp_resources(dev, qp);
 
-       kfree(qp);
-
+       if (atomic_dec_and_test(&qp->refcnt)) {
+               qedr_idr_remove(dev, qp->qp_id);
+               kfree(qp);
+       }
        return rc;
 }
 
@@ -2740,6 +2947,7 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
        case IB_WR_SEND_WITH_INV:
                return IB_WC_SEND;
        case IB_WR_RDMA_READ:
+       case IB_WR_RDMA_READ_WITH_INV:
                return IB_WC_RDMA_READ;
        case IB_WR_ATOMIC_CMP_AND_SWP:
                return IB_WC_COMP_SWAP;
@@ -2900,11 +3108,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
                break;
        case IB_WR_RDMA_READ_WITH_INV:
-               DP_ERR(dev,
-                      "RDMA READ WITH INVALIDATE not supported\n");
-               *bad_wr = wr;
-               rc = -EINVAL;
-               break;
+               SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
+               /* fallthrough... same is identical to RDMA READ */
 
        case IB_WR_RDMA_READ:
                wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
@@ -3014,15 +3219,17 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
        spin_lock_irqsave(&qp->q_lock, flags);
 
-       if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
-           (qp->state != QED_ROCE_QP_STATE_ERR) &&
-           (qp->state != QED_ROCE_QP_STATE_SQD)) {
-               spin_unlock_irqrestore(&qp->q_lock, flags);
-               *bad_wr = wr;
-               DP_DEBUG(dev, QEDR_MSG_CQ,
-                        "QP in wrong state! QP icid=0x%x state %d\n",
-                        qp->icid, qp->state);
-               return -EINVAL;
+       if (rdma_protocol_roce(&dev->ibdev, 1)) {
+               if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+                   (qp->state != QED_ROCE_QP_STATE_ERR) &&
+                   (qp->state != QED_ROCE_QP_STATE_SQD)) {
+                       spin_unlock_irqrestore(&qp->q_lock, flags);
+                       *bad_wr = wr;
+                       DP_DEBUG(dev, QEDR_MSG_CQ,
+                                "QP in wrong state! QP icid=0x%x state %d\n",
+                                qp->icid, qp->state);
+                       return -EINVAL;
+               }
        }
 
        while (wr) {
@@ -3142,6 +3349,11 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                /* Make sure write sticks */
                mmiowb();
 
+               if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+                       writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
+                       mmiowb();       /* for second doorbell */
+               }
+
                wr = wr->next;
        }
 
@@ -3603,23 +3815,3 @@ int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
                 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
        return IB_MAD_RESULT_SUCCESS;
 }
-
-int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
-                       struct ib_port_immutable *immutable)
-{
-       struct ib_port_attr attr;
-       int err;
-
-       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
-                                   RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
-
-       err = ib_query_port(ibdev, port_num, &attr);
-       if (err)
-               return err;
-
-       immutable->pkey_tbl_len = attr.pkey_tbl_len;
-       immutable->gid_tbl_len = attr.gid_tbl_len;
-       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
-       return 0;
-}
index 0f8ab49d5a1a92576dfbcb9bd5bd30900b80f3c8..1a94425dea338e3da3b007c9fa16cd9ce0c36e7a 100644 (file)
@@ -39,6 +39,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask,
                     struct ib_port_modify *props);
 
 int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+                     int index, union ib_gid *gid);
 
 int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);