]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
SUNRPC: Improve latency for interactive tasks
authorTrond Myklebust <trond.myklebust@hammerspace.com>
Sun, 9 Sep 2018 15:37:22 +0000 (11:37 -0400)
committerTrond Myklebust <trond.myklebust@hammerspace.com>
Sun, 30 Sep 2018 19:35:15 +0000 (15:35 -0400)
One of the intentions with the priority queues was to ensure that no
single process can hog the transport. The field task->tk_owner therefore
identifies the RPC call's origin, and is intended to allow the RPC layer
to organise queues for fairness.
This commit therefore modifies the transmit queue to group requests
by task->tk_owner, and ensures that we round robin among those groups.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
include/linux/sunrpc/xprt.h
net/sunrpc/xprt.c

index 8c2bb078f00cba5d36c119460f7db02bf74cc13e..e377620b974409304b5fe02590a3233b18c56d42 100644 (file)
@@ -89,6 +89,7 @@ struct rpc_rqst {
        };
 
        struct list_head        rq_xmit;        /* Send queue */
+       struct list_head        rq_xmit2;       /* Send queue */
 
        void                    *rq_buffer;     /* Call XDR encode buffer */
        size_t                  rq_callsize;
index 9c5a8514d264bba305b57b14c087a736d40ab662..44d0eeaddaac73c86179f4f410e7b0b44fda307c 100644 (file)
@@ -1053,12 +1053,21 @@ xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
 void
 xprt_request_enqueue_transmit(struct rpc_task *task)
 {
-       struct rpc_rqst *req = task->tk_rqstp;
+       struct rpc_rqst *pos, *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
 
        if (xprt_request_need_enqueue_transmit(task, req)) {
                spin_lock(&xprt->queue_lock);
+               list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
+                       if (pos->rq_task->tk_owner != task->tk_owner)
+                               continue;
+                       list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
+                       INIT_LIST_HEAD(&req->rq_xmit);
+                       goto out;
+               }
                list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
+               INIT_LIST_HEAD(&req->rq_xmit2);
+out:
                set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
                spin_unlock(&xprt->queue_lock);
        }
@@ -1074,8 +1083,20 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
 static void
 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
 {
-       if (test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
-               list_del(&task->tk_rqstp->rq_xmit);
+       struct rpc_rqst *req = task->tk_rqstp;
+
+       if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+               return;
+       if (!list_empty(&req->rq_xmit)) {
+               list_del(&req->rq_xmit);
+               if (!list_empty(&req->rq_xmit2)) {
+                       struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
+                                       struct rpc_rqst, rq_xmit2);
+                       list_del(&req->rq_xmit2);
+                       list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
+               }
+       } else
+               list_del(&req->rq_xmit2);
 }
 
 /**