]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - net/sunrpc/xprtrdma/backchannel.c
xprtrdma: Simplify locking that protects the rl_allreqs list
[mirror_ubuntu-eoan-kernel.git] / net / sunrpc / xprtrdma / backchannel.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f531a5db
CL
2/*
3 * Copyright (c) 2015 Oracle. All rights reserved.
4 *
5 * Support for backward direction RPCs on RPC/RDMA.
6 */
7
63cae470
CL
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
76566773 10#include <linux/sunrpc/svc_xprt.h>
bd2abef3 11#include <linux/sunrpc/svc_rdma.h>
f531a5db
CL
12
13#include "xprt_rdma.h"
b6e717cb 14#include <trace/events/rpcrdma.h>
f531a5db
CL
15
16#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17# define RPCDBG_FACILITY RPCDBG_TRANS
18#endif
19
c8bbe0c7 20#undef RPCRDMA_BACKCHANNEL_DEBUG
63cae470 21
edb41e61
CL
22static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
23 unsigned int count)
f531a5db 24{
edb41e61 25 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
92f4433e 26 struct rpcrdma_req *req;
edb41e61
CL
27 struct rpc_rqst *rqst;
28 unsigned int i;
29
30 for (i = 0; i < (count << 1); i++) {
31 struct rpcrdma_regbuf *rb;
edb41e61
CL
32 size_t size;
33
34 req = rpcrdma_create_req(r_xprt);
35 if (IS_ERR(req))
36 return PTR_ERR(req);
37 rqst = &req->rl_slot;
38
39 rqst->rq_xprt = xprt;
edb41e61
CL
40 INIT_LIST_HEAD(&rqst->rq_bc_list);
41 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
f7d46681 42 spin_lock(&xprt->bc_pa_lock);
edb41e61 43 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
f7d46681 44 spin_unlock(&xprt->bc_pa_lock);
edb41e61
CL
45
46 size = r_xprt->rx_data.inline_rsize;
47 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
48 if (IS_ERR(rb))
49 goto out_fail;
50 req->rl_sendbuf = rb;
51 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
52 min_t(size_t, size, PAGE_SIZE));
53 }
f531a5db
CL
54 return 0;
55
56out_fail:
92f4433e 57 rpcrdma_req_destroy(req);
f531a5db
CL
58 return -ENOMEM;
59}
60
f531a5db
CL
61/**
62 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
63 * @xprt: transport associated with these backchannel resources
64 * @reqs: number of concurrent incoming requests to expect
65 *
66 * Returns 0 on success; otherwise a negative errno
67 */
68int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
69{
70 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
f531a5db
CL
71 int rc;
72
73 /* The backchannel reply path returns each rpc_rqst to the
74 * bc_pa_list _after_ the reply is sent. If the server is
75 * faster than the client, it can send another backward
76 * direction request before the rpc_rqst is returned to the
77 * list. The client rejects the request in this case.
78 *
79 * Twice as many rpc_rqsts are prepared to ensure there is
80 * always an rpc_rqst available as soon as a reply is sent.
81 */
124fa17d
CL
82 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
83 goto out_err;
84
edb41e61 85 rc = rpcrdma_bc_setup_reqs(r_xprt, reqs);
f531a5db
CL
86 if (rc)
87 goto out_free;
88
edb41e61 89 r_xprt->rx_buf.rb_bc_srv_max_requests = reqs;
fc1eb807 90 trace_xprtrdma_cb_setup(r_xprt, reqs);
f531a5db
CL
91 return 0;
92
93out_free:
94 xprt_rdma_bc_destroy(xprt, reqs);
95
124fa17d 96out_err:
f531a5db
CL
97 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
98 return -ENOMEM;
99}
100
76566773
CL
101/**
102 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
103 * @serv: server endpoint
104 * @net: network namespace
105 *
106 * The "xprt" is an implied argument: it supplies the name of the
107 * backchannel transport class.
108 *
109 * Returns zero on success, negative errno on failure
110 */
111int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
112{
113 int ret;
114
115 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
116 if (ret < 0)
117 return ret;
118 return 0;
119}
120
6b26cc8c
CL
121/**
122 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
123 * @xprt: transport
124 *
125 * Returns maximum size, in bytes, of a backchannel message
126 */
127size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
128{
129 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
130 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
131 size_t maxmsg;
132
133 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
62aee0e3 134 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
6b26cc8c
CL
135 return maxmsg - RPCRDMA_HDRLEN_MIN;
136}
137
cf73daf5 138static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
83128a60 139{
7ec910e7 140 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
83128a60 141 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
7ec910e7
CL
142 __be32 *p;
143
144 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
145 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
146 req->rl_rdmabuf->rg_base);
147
148 p = xdr_reserve_space(&req->rl_stream, 28);
149 if (unlikely(!p))
150 return -EIO;
151 *p++ = rqst->rq_xid;
152 *p++ = rpcrdma_version;
153 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
154 *p++ = rdma_msg;
155 *p++ = xdr_zero;
156 *p++ = xdr_zero;
157 *p = xdr_zero;
83128a60 158
857f9aca
CL
159 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
160 &rqst->rq_snd_buf, rpcrdma_noch))
655fec69 161 return -EIO;
fc1eb807
CL
162
163 trace_xprtrdma_cb_reply(rqst);
83128a60
CL
164 return 0;
165}
166
cf73daf5
CL
167/**
168 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
169 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
170 *
171 * Caller holds the transport's write lock.
172 *
173 * Returns:
174 * %0 if the RPC message has been sent
175 * %-ENOTCONN if the caller should reconnect and call again
176 * %-EIO if a permanent error occurred and the request was not
177 * sent. Do not try to send this message again.
178 */
179int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
180{
0c0829bc
CL
181 struct rpc_xprt *xprt = rqst->rq_xprt;
182 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
cf73daf5
CL
183 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
184 int rc;
185
0c0829bc
CL
186 if (!xprt_connected(xprt))
187 return -ENOTCONN;
cf73daf5 188
0c0829bc 189 if (!xprt_request_get_cong(xprt, rqst))
75891f50
TM
190 return -EBADSLT;
191
cf73daf5
CL
192 rc = rpcrdma_bc_marshal_reply(rqst);
193 if (rc < 0)
194 goto failed_marshal;
195
196 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
197 goto drop_connection;
198 return 0;
199
200failed_marshal:
201 if (rc != -ENOTCONN)
202 return rc;
203drop_connection:
0c0829bc 204 xprt_rdma_close(xprt);
cf73daf5
CL
205 return -ENOTCONN;
206}
207
f531a5db
CL
208/**
209 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
210 * @xprt: transport associated with these backchannel resources
211 * @reqs: number of incoming requests to destroy; ignored
212 */
213void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
214{
f531a5db
CL
215 struct rpc_rqst *rqst, *tmp;
216
f7d46681 217 spin_lock(&xprt->bc_pa_lock);
f531a5db
CL
218 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
219 list_del(&rqst->rq_bc_pa_list);
f7d46681 220 spin_unlock(&xprt->bc_pa_lock);
f531a5db 221
92f4433e 222 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
f531a5db 223
f7d46681 224 spin_lock(&xprt->bc_pa_lock);
f531a5db 225 }
f7d46681 226 spin_unlock(&xprt->bc_pa_lock);
f531a5db
CL
227}
228
229/**
230 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
231 * @rqst: request to release
232 */
233void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
234{
7c8d9e7c 235 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
f531a5db
CL
236 struct rpc_xprt *xprt = rqst->rq_xprt;
237
c8bbe0c7 238 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
7c8d9e7c
CL
239 __func__, rqst, req);
240
241 rpcrdma_recv_buffer_put(req->rl_reply);
242 req->rl_reply = NULL;
c8bbe0c7 243
f7d46681 244 spin_lock(&xprt->bc_pa_lock);
f531a5db 245 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
f7d46681 246 spin_unlock(&xprt->bc_pa_lock);
f531a5db 247}
63cae470
CL
248
249/**
250 * rpcrdma_bc_receive_call - Handle a backward direction call
9ab6d89e 251 * @r_xprt: transport receiving the call
63cae470
CL
252 * @rep: receive buffer containing the call
253 *
63cae470
CL
254 * Operational assumptions:
255 * o Backchannel credits are ignored, just as the NFS server
256 * forechannel currently does
257 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
258 * No replay detection is done at the transport level
259 */
260void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
261 struct rpcrdma_rep *rep)
262{
263 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
63cae470
CL
264 struct svc_serv *bc_serv;
265 struct rpcrdma_req *req;
266 struct rpc_rqst *rqst;
267 struct xdr_buf *buf;
268 size_t size;
269 __be32 *p;
270
41c8f70f
CL
271 p = xdr_inline_decode(&rep->rr_stream, 0);
272 size = xdr_stream_remaining(&rep->rr_stream);
273
63cae470
CL
274#ifdef RPCRDMA_BACKCHANNEL_DEBUG
275 pr_info("RPC: %s: callback XID %08x, length=%u\n",
41c8f70f
CL
276 __func__, be32_to_cpup(p), size);
277 pr_info("RPC: %s: %*ph\n", __func__, size, p);
63cae470
CL
278#endif
279
63cae470
CL
280 /* Grab a free bc rqst */
281 spin_lock(&xprt->bc_pa_lock);
282 if (list_empty(&xprt->bc_pa_list)) {
283 spin_unlock(&xprt->bc_pa_lock);
284 goto out_overflow;
285 }
286 rqst = list_first_entry(&xprt->bc_pa_list,
287 struct rpc_rqst, rq_bc_pa_list);
288 list_del(&rqst->rq_bc_pa_list);
289 spin_unlock(&xprt->bc_pa_lock);
63cae470
CL
290
291 /* Prepare rqst */
292 rqst->rq_reply_bytes_recvd = 0;
293 rqst->rq_bytes_sent = 0;
41c8f70f 294 rqst->rq_xid = *p;
9f74660b
CL
295
296 rqst->rq_private_buf.len = size;
63cae470
CL
297
298 buf = &rqst->rq_rcv_buf;
299 memset(buf, 0, sizeof(*buf));
300 buf->head[0].iov_base = p;
301 buf->head[0].iov_len = size;
302 buf->len = size;
303
304 /* The receive buffer has to be hooked to the rpcrdma_req
41c8f70f
CL
305 * so that it is not released while the req is pointing
306 * to its buffer, and so that it can be reposted after
307 * the Upper Layer is done decoding it.
63cae470
CL
308 */
309 req = rpcr_to_rdmar(rqst);
63cae470 310 req->rl_reply = rep;
fc1eb807 311 trace_xprtrdma_cb_call(rqst);
63cae470 312
63cae470
CL
313 /* Queue rqst for ULP's callback service */
314 bc_serv = xprt->bc_serv;
315 spin_lock(&bc_serv->sv_cb_lock);
316 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
317 spin_unlock(&bc_serv->sv_cb_lock);
318
319 wake_up(&bc_serv->sv_cb_waitq);
320
321 r_xprt->rx_stats.bcall_count++;
322 return;
323
324out_overflow:
325 pr_warn("RPC/RDMA backchannel overflow\n");
0c0829bc 326 xprt_force_disconnect(xprt);
63cae470
CL
327 /* This receive buffer gets reposted automatically
328 * when the connection is re-established.
329 */
330 return;
63cae470 331}