]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
RDMA/hns: Add SRQ asynchronous event support
authorLijun Ou <oulijun@huawei.com>
Sat, 24 Nov 2018 08:49:22 +0000 (16:49 +0800)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 5 Dec 2018 14:59:13 +0000 (07:59 -0700)
This patch implements the process flow of SRQ asynchronous
event.

Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_srq.c

index 5a40746cb2e3eb39eaad58fd0366188951f03abc..779dd4c409cb821f13a73d83642dc25c932c29d3 100644 (file)
@@ -646,6 +646,12 @@ struct hns_roce_aeqe {
                        u32 rsv1;
                } qp_event;
 
+               struct {
+                       __le32 srq;
+                       u32 rsv0;
+                       u32 rsv1;
+               } srq_event;
+
                struct {
                        __le32 cq;
                        u32 rsv0;
@@ -1135,6 +1141,7 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
+void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
 int hns_roce_init(struct hns_roce_dev *hr_dev);
 void hns_roce_exit(struct hns_roce_dev *hr_dev);
index 6c9baf99894e8b82145ea35578130cc1bf7abaf4..835b78371294272dcf1a0013a58cf795208746e9 100644 (file)
@@ -4459,6 +4459,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
        int aeqe_found = 0;
        int event_type;
        int sub_type;
+       u32 srqn;
        u32 qpn;
        u32 cqn;
 
@@ -4481,6 +4482,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
                cqn = roce_get_field(aeqe->event.cq_event.cq,
                                     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
                                     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+               srqn = roce_get_field(aeqe->event.srq_event.srq,
+                                    HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+                                    HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
 
                switch (event_type) {
                case HNS_ROCE_EVENT_TYPE_PATH_MIG:
@@ -4488,13 +4492,14 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
                case HNS_ROCE_EVENT_TYPE_COMM_EST:
                case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
                case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+               case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
                case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
                case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
                        hns_roce_qp_event(hr_dev, qpn, event_type);
                        break;
                case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
-               case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
                case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+                       hns_roce_srq_event(hr_dev, srqn, event_type);
                        break;
                case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
                case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
index 46732d25a8a808eda61ef198b469061369d9985e..463df60094e853d6a166494da993c3ed64cc5d50 100644 (file)
@@ -9,6 +9,29 @@
 #include "hns_roce_cmd.h"
 #include "hns_roce_hem.h"
 
+void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
+{
+       struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+       struct hns_roce_srq *srq;
+
+       xa_lock(&srq_table->xa);
+       srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
+       if (srq)
+               atomic_inc(&srq->refcount);
+       xa_unlock(&srq_table->xa);
+
+       if (!srq) {
+               dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
+               return;
+       }
+
+       srq->event(srq, event_type);
+
+       if (atomic_dec_and_test(&srq->refcount))
+               complete(&srq->free);
+}
+EXPORT_SYMBOL_GPL(hns_roce_srq_event);
+
 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
                                  enum hns_roce_event event_type)
 {