]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net/rds: Give fr_state a chance to transition to FRMR_IS_FREE
authorGerd Rausch <gerd.rausch@oracle.com>
Tue, 16 Jul 2019 22:28:51 +0000 (15:28 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 17 Jul 2019 19:06:51 +0000 (12:06 -0700)
In the context of FRMR (ib_frmr.c):

Memory regions make it onto the "clean_list" via "rds_ib_flush_mr_pool",
after the memory region has been posted for invalidation via
"rds_ib_post_inv".

At that point in time, "fr_state" may still be in state "FRMR_IS_INUSE",
since the only place where "fr_state" transitions to "FRMR_IS_FREE"
is in "rds_ib_mr_cqe_handler", which is triggered by a tasklet.

So in case we notice that "fr_state != FRMR_IS_FREE" (see below),
we wait for "fr_inv_done" to trigger with a maximum of 10msec.
Then we check again, and only put the memory region onto the drop_list
(via "rds_ib_free_frmr") in case the situation remains unchanged.

This avoids the problem of memory-regions bouncing between "clean_list"
and "drop_list" before they even have a chance to be properly invalidated.

Signed-off-by: Gerd Rausch <gerd.rausch@oracle.com>
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/rds/ib_frmr.c
net/rds/ib_mr.h

index 32ae26ed58a04c3e87f425f930ed172ac194e2b3..6038138d6e38a222dfc7f4f4efbb134c739c8bb6 100644 (file)
@@ -75,6 +75,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
                pool->max_items_soft = pool->max_items;
 
        frmr->fr_state = FRMR_IS_FREE;
+       init_waitqueue_head(&frmr->fr_inv_done);
        return ibmr;
 
 out_no_cigar:
@@ -285,6 +286,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
        if (frmr->fr_inv) {
                frmr->fr_state = FRMR_IS_FREE;
                frmr->fr_inv = false;
+               wake_up(&frmr->fr_inv_done);
        }
 
        atomic_inc(&ic->i_fastreg_wrs);
@@ -345,8 +347,31 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
        }
 
        do {
-               if (ibmr)
+               if (ibmr) {
+                       /* Memory regions make it onto the "clean_list" via
+                        * "rds_ib_flush_mr_pool", after the memory region has
+                        * been posted for invalidation via "rds_ib_post_inv".
+                        *
+                        * At that point in time, "fr_state" may still be
+                        * in state "FRMR_IS_INUSE", since the only place where
+                        * "fr_state" transitions to "FRMR_IS_FREE" is in
+                        * is in "rds_ib_mr_cqe_handler", which is
+                        * triggered by a tasklet.
+                        *
+                        * So we wait for "fr_inv_done" to trigger
+                        * and only put memory regions onto the drop_list
+                        * that failed (i.e. not marked "FRMR_IS_FREE").
+                        *
+                        * This avoids the problem of memory-regions bouncing
+                        * between "clean_list" and "drop_list" before they
+                        * even have a chance to be properly invalidated.
+                        */
+                       frmr = &ibmr->u.frmr;
+                       wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
+                       if (frmr->fr_state == FRMR_IS_FREE)
+                               break;
                        rds_ib_free_frmr(ibmr, true);
+               }
                ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
                if (IS_ERR(ibmr))
                        return ibmr;
index 5da12c2484316cbbde5c33b254d86b061c1b187e..42daccb7b5eba6b0712ac3a5051618e50c652cee 100644 (file)
@@ -57,6 +57,7 @@ struct rds_ib_frmr {
        struct ib_mr            *mr;
        enum rds_ib_fr_state    fr_state;
        bool                    fr_inv;
+       wait_queue_head_t       fr_inv_done;
        struct ib_send_wr       fr_wr;
        unsigned int            dma_npages;
        unsigned int            sg_byte_len;