]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
IB/hfi1: Remove race conditions in user_sdma send path
authorMichael J. Ruhl <michael.j.ruhl@intel.com>
Tue, 18 Dec 2018 21:04:18 +0000 (16:04 -0500)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Wed, 14 Aug 2019 09:18:49 +0000 (11:18 +0200)
BugLink: https://bugs.launchpad.net/bugs/1837257
commit 28a9a9e83ceae2cee25b9af9ad20d53aaa9ab951 upstream

Packet queue state is over used to determine SDMA descriptor
availablitity and packet queue request state.

cpu 0  ret = user_sdma_send_pkts(req, pcount);
cpu 0  if (atomic_read(&pq->n_reqs))
cpu 1  IRQ user_sdma_txreq_cb calls pq_update() (state to _INACTIVE)
cpu 0        xchg(&pq->state, SDMA_PKT_Q_ACTIVE);

At this point pq->n_reqs == 0 and pq->state is incorrectly
SDMA_PKT_Q_ACTIVE.  The close path will hang waiting for the state
to return to _INACTIVE.

This can also change the state from _DEFERRED to _ACTIVE.  However,
this is a mostly benign race.

Remove the racy code path.

Use n_reqs to determine if a packet queue is active or not.

Cc: <stable@vger.kernel.org> # 4.14.0>
Reviewed-by: Mitko Haralanov <mitko.haralanov@intel.com>
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/user_sdma.h

index 39134dd305f50f07a5c92a778c8ee9eaa4f28810..51831bfbf90f55d94e07d1d45ff7c7e8585a6daa 100644 (file)
@@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
        pq->ctxt = uctxt->ctxt;
        pq->subctxt = fd->subctxt;
        pq->n_max_reqs = hfi1_sdma_comp_ring_size;
-       pq->state = SDMA_PKT_Q_INACTIVE;
        atomic_set(&pq->n_reqs, 0);
        init_waitqueue_head(&pq->wait);
        atomic_set(&pq->n_locked, 0);
@@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
                /* Wait until all requests have been freed. */
                wait_event_interruptible(
                        pq->wait,
-                       (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+                       !atomic_read(&pq->n_reqs));
                kfree(pq->reqs);
                kfree(pq->req_in_use);
                kmem_cache_destroy(pq->txreq_cache);
@@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid)
        return mapping[hash];
 }
 
+/**
+ * hfi1_user_sdma_process_request() - Process and start a user sdma request
+ * @fd: valid file descriptor
+ * @iovec: array of io vectors to process
+ * @dim: overall iovec array size
+ * @count: number of io vector array entries processed
+ */
 int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
                                   struct iovec *iovec, unsigned long dim,
                                   unsigned long *count)
@@ -560,20 +566,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
                req->ahg_idx = sdma_ahg_alloc(req->sde);
 
        set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
+       pq->state = SDMA_PKT_Q_ACTIVE;
        /* Send the first N packets in the request to buy us some time */
        ret = user_sdma_send_pkts(req, pcount);
        if (unlikely(ret < 0 && ret != -EBUSY))
                goto free_req;
 
-       /*
-        * It is possible that the SDMA engine would have processed all the
-        * submitted packets by the time we get here. Therefore, only set
-        * packet queue state to ACTIVE if there are still uncompleted
-        * requests.
-        */
-       if (atomic_read(&pq->n_reqs))
-               xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
-
        /*
         * This is a somewhat blocking send implementation.
         * The driver will block the caller until all packets of the
@@ -1409,10 +1407,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
 
 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
 {
-       if (atomic_dec_and_test(&pq->n_reqs)) {
-               xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
+       if (atomic_dec_and_test(&pq->n_reqs))
                wake_up(&pq->wait);
-       }
 }
 
 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
index fa7a4acdcf206b4844a0da2c573cece8c9609c6b..7d7476d8e4403a0f780d7ea04fd45a701a0016d0 100644 (file)
@@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
 #define TXREQ_FLAGS_REQ_ACK   BIT(0)      /* Set the ACK bit in the header */
 #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
 
-#define SDMA_PKT_Q_INACTIVE BIT(0)
-#define SDMA_PKT_Q_ACTIVE   BIT(1)
-#define SDMA_PKT_Q_DEFERRED BIT(2)
+enum pkt_q_sdma_state {
+       SDMA_PKT_Q_ACTIVE,
+       SDMA_PKT_Q_DEFERRED,
+};
 
 /*
  * Maximum retry attempts to submit a TX request
@@ -135,7 +136,7 @@ struct hfi1_user_sdma_pkt_q {
        struct user_sdma_request *reqs;
        unsigned long *req_in_use;
        struct iowait busy;
-       unsigned state;
+       enum pkt_q_sdma_state state;
        wait_queue_head_t wait;
        unsigned long unpinned;
        struct mmu_rb_handler *handler;