]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
xprtrdma: Close window between waking RPC senders and posting Receives
authorChuck Lever <chuck.lever@oracle.com>
Wed, 9 Oct 2019 17:07:38 +0000 (13:07 -0400)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 14 Feb 2020 06:00:53 +0000 (01:00 -0500)
BugLink: https://bugs.launchpad.net/bugs/1862227
commit 2ae50ad68cd79224198b525f7bd645c9da98b6ff upstream.

A recent clean up attempted to separate Receive handling and RPC
Reply processing, in the name of clean layering.

Unfortunately, we can't do this because the Receive Queue has to be
refilled _after_ the most recent credit update from the responder
is parsed from the transport header, but _before_ we wake up the
next RPC sender. That is right in the middle of
rpcrdma_reply_handler().

Usually this isn't a problem because current responder
implementations don't vary their credit grant. The one exception is
when a connection is established: the grant goes from one to a much
larger number on the first Receive. The requester MUST post enough
Receives right then so that any outstanding requests can be sent
without risking RNR and connection loss.

Fixes: 6ceea36890a0 ("xprtrdma: Refactor Receive accounting")
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h

index 4345e6912392c5a8609e4fef17f0120fb5c0ac51..414fd4a5373c52e390899c9547e076996b3cec3f 100644 (file)
@@ -1337,6 +1337,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
                xprt->cwnd = credits << RPC_CWNDSHIFT;
                spin_unlock(&xprt->transport_lock);
        }
+       rpcrdma_post_recvs(r_xprt, false);
 
        req = rpcr_to_rdmar(rqst);
        if (req->rl_reply) {
index b78fe1da1bf7424d8722fd7de43871f8f3bd4510..baf76089a31b46dbdb12f8720ac39c19debc897d 100644 (file)
@@ -82,7 +82,6 @@ rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
                     gfp_t flags);
 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
-static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
 
 /* Wait for outstanding transport work to finish. ib_drain_qp
  * handles the drains in the wrong order for us, so open code
@@ -168,7 +167,6 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
                                   rdmab_addr(rep->rr_rdmabuf),
                                   wc->byte_len, DMA_FROM_DEVICE);
 
-       rpcrdma_post_recvs(r_xprt, false);
        rpcrdma_reply_handler(rep);
        return;
 
@@ -1495,8 +1493,13 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
        return 0;
 }
 
-static void
-rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
+/**
+ * rpcrdma_post_recvs - Refill the Receive Queue
+ * @r_xprt: controlling transport instance
+ * @temp: mark Receive buffers to be deleted after use
+ *
+ */
+void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
 {
        struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
        struct rpcrdma_ep *ep = &r_xprt->rx_ep;
index 92ce09fcea742404fcacff8b0fc7f93549582e54..0438a14ddd9cf1f8de762373bf8449889a279c1c 100644 (file)
@@ -477,6 +477,7 @@ void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
 
 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
                                struct rpcrdma_req *);
+void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
 
 /*
  * Buffer calls - xprtrdma/verbs.c