]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - net/sunrpc/backchannel_rqst.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / net / sunrpc / backchannel_rqst.c
CommitLineData
fb7a0b9a
RL
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
5a0e3ad6 25#include <linux/slab.h>
fb7a0b9a 26#include <linux/sunrpc/xprt.h>
bc3b2d7f 27#include <linux/export.h>
09acfea5 28#include <linux/sunrpc/bc_xprt.h>
fb7a0b9a 29
f895b252 30#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
fb7a0b9a
RL
31#define RPCDBG_FACILITY RPCDBG_TRANS
32#endif
33
fb7a0b9a
RL
34/*
35 * Helper routines that track the number of preallocation elements
36 * on the transport.
37 */
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
0d2a970d 40 return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
fb7a0b9a
RL
41}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
0d2a970d 45 atomic_add(n, &xprt->bc_free_slots);
fb7a0b9a
RL
46 xprt->bc_alloc_count += n;
47}
48
49static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50{
0d2a970d 51 atomic_sub(n, &xprt->bc_free_slots);
fb7a0b9a
RL
52 return xprt->bc_alloc_count -= n;
53}
54
55/*
56 * Free the preallocated rpc_rqst structure and the memory
57 * buffers hanging off of it.
58 */
59static void xprt_free_allocation(struct rpc_rqst *req)
60{
61 struct xdr_buf *xbufp;
62
63 dprintk("RPC: free allocations for req= %p\n", req);
f30dfbba 64 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
88de6af2 65 xbufp = &req->rq_rcv_buf;
fb7a0b9a
RL
66 free_page((unsigned long)xbufp->head[0].iov_base);
67 xbufp = &req->rq_snd_buf;
68 free_page((unsigned long)xbufp->head[0].iov_base);
fb7a0b9a
RL
69 kfree(req);
70}
71
1dddda86
TM
72static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73{
74 struct page *page;
75 /* Preallocate one XDR receive buffer */
76 page = alloc_page(gfp_flags);
77 if (page == NULL)
78 return -ENOMEM;
b9c5bc03 79 xdr_buf_init(buf, page_address(page), PAGE_SIZE);
1dddda86
TM
80 return 0;
81}
82
83static
84struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
85{
86 struct rpc_rqst *req;
87
88 /* Pre-allocate one backchannel rpc_rqst */
89 req = kzalloc(sizeof(*req), gfp_flags);
90 if (req == NULL)
91 return NULL;
92
93 req->rq_xprt = xprt;
1dddda86
TM
94 INIT_LIST_HEAD(&req->rq_bc_list);
95
96 /* Preallocate one XDR receive buffer */
97 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
98 printk(KERN_ERR "Failed to create bc receive xbuf\n");
99 goto out_free;
100 }
101 req->rq_rcv_buf.len = PAGE_SIZE;
102
103 /* Preallocate one XDR send buffer */
104 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
105 printk(KERN_ERR "Failed to create bc snd xbuf\n");
106 goto out_free;
107 }
108 return req;
109out_free:
110 xprt_free_allocation(req);
111 return NULL;
112}
113
fb7a0b9a
RL
114/*
115 * Preallocate up to min_reqs structures and related buffers for use
116 * by the backchannel. This function can be called multiple times
117 * when creating new sessions that use the same rpc_xprt. The
118 * preallocated buffers are added to the pool of resources used by
119 * the rpc_xprt. Anyone of these resources may be used used by an
120 * incoming callback request. It's up to the higher levels in the
121 * stack to enforce that the maximum number of session slots is not
122 * being exceeded.
123 *
124 * Some callback arguments can be large. For example, a pNFS server
125 * using multiple deviceids. The list can be unbound, but the client
126 * has the ability to tell the server the maximum size of the callback
127 * requests. Each deviceID is 16 bytes, so allocate one page
128 * for the arguments to have enough room to receive a number of these
129 * deviceIDs. The NFS client indicates to the pNFS server that its
130 * callback requests can be up to 4096 bytes in size.
131 */
132int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
42e5c3e2
CL
133{
134 if (!xprt->ops->bc_setup)
135 return 0;
136 return xprt->ops->bc_setup(xprt, min_reqs);
137}
138EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
139
140int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
fb7a0b9a 141{
1dddda86 142 struct rpc_rqst *req;
fb7a0b9a
RL
143 struct list_head tmp_list;
144 int i;
145
146 dprintk("RPC: setup backchannel transport\n");
147
148 /*
149 * We use a temporary list to keep track of the preallocated
150 * buffers. Once we're done building the list we splice it
151 * into the backchannel preallocation list off of the rpc_xprt
152 * struct. This helps minimize the amount of time the list
153 * lock is held on the rpc_xprt struct. It also makes cleanup
154 * easier in case of memory allocation errors.
155 */
156 INIT_LIST_HEAD(&tmp_list);
157 for (i = 0; i < min_reqs; i++) {
158 /* Pre-allocate one backchannel rpc_rqst */
1dddda86 159 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
fb7a0b9a
RL
160 if (req == NULL) {
161 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
162 goto out_free;
163 }
164
165 /* Add the allocated buffer to the tmp list */
166 dprintk("RPC: adding req= %p\n", req);
167 list_add(&req->rq_bc_pa_list, &tmp_list);
fb7a0b9a
RL
168 }
169
170 /*
171 * Add the temporary list to the backchannel preallocation list
172 */
c89091c8 173 spin_lock(&xprt->bc_pa_lock);
fb7a0b9a
RL
174 list_splice(&tmp_list, &xprt->bc_pa_list);
175 xprt_inc_alloc_count(xprt, min_reqs);
c89091c8 176 spin_unlock(&xprt->bc_pa_lock);
fb7a0b9a
RL
177
178 dprintk("RPC: setup backchannel transport done\n");
179 return 0;
180
181out_free:
182 /*
183 * Memory allocation failed, free the temporary list
184 */
1dddda86
TM
185 while (!list_empty(&tmp_list)) {
186 req = list_first_entry(&tmp_list,
187 struct rpc_rqst,
188 rq_bc_pa_list);
62835679 189 list_del(&req->rq_bc_pa_list);
fb7a0b9a 190 xprt_free_allocation(req);
62835679 191 }
fb7a0b9a
RL
192
193 dprintk("RPC: setup backchannel transport failed\n");
d24bab93 194 return -ENOMEM;
fb7a0b9a 195}
fb7a0b9a 196
2c53040f
BH
197/**
198 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
199 * @xprt: the transport holding the preallocated strucures
acf0a39f 200 * @max_reqs: the maximum number of preallocated structures to destroy
2c53040f 201 *
fb7a0b9a
RL
202 * Since these structures may have been allocated by multiple calls
203 * to xprt_setup_backchannel, we only destroy up to the maximum number
204 * of reqs specified by the caller.
fb7a0b9a
RL
205 */
206void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
42e5c3e2
CL
207{
208 if (xprt->ops->bc_destroy)
209 xprt->ops->bc_destroy(xprt, max_reqs);
210}
211EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
212
213void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
fb7a0b9a
RL
214{
215 struct rpc_rqst *req = NULL, *tmp = NULL;
216
217 dprintk("RPC: destroy backchannel transport\n");
218
c4ded8d9
WAA
219 if (max_reqs == 0)
220 goto out;
221
fb7a0b9a
RL
222 spin_lock_bh(&xprt->bc_pa_lock);
223 xprt_dec_alloc_count(xprt, max_reqs);
224 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
225 dprintk("RPC: req=%p\n", req);
62835679 226 list_del(&req->rq_bc_pa_list);
fb7a0b9a
RL
227 xprt_free_allocation(req);
228 if (--max_reqs == 0)
229 break;
230 }
231 spin_unlock_bh(&xprt->bc_pa_lock);
232
c4ded8d9 233out:
fb7a0b9a
RL
234 dprintk("RPC: backchannel list empty= %s\n",
235 list_empty(&xprt->bc_pa_list) ? "true" : "false");
236}
fb7a0b9a 237
0d1bf340
TM
238static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
239 struct rpc_rqst *new)
fb7a0b9a 240{
2ea24497 241 struct rpc_rqst *req = NULL;
fb7a0b9a
RL
242
243 dprintk("RPC: allocate a backchannel request\n");
0d2a970d 244 if (atomic_read(&xprt->bc_free_slots) <= 0)
2ea24497 245 goto not_found;
0d2a970d 246 if (list_empty(&xprt->bc_pa_list)) {
0d1bf340 247 if (!new)
0d2a970d 248 goto not_found;
0d1bf340 249 list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
68514471 250 xprt->bc_alloc_count++;
0d2a970d 251 }
2ea24497
TM
252 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
253 rq_bc_pa_list);
254 req->rq_reply_bytes_recvd = 0;
2ea24497 255 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
fb7a0b9a 256 sizeof(req->rq_private_buf));
2ea24497
TM
257 req->rq_xid = xid;
258 req->rq_connect_cookie = xprt->connect_cookie;
fb7a0b9a 259 dprintk("RPC: backchannel req=%p\n", req);
0d1bf340 260not_found:
fb7a0b9a
RL
261 return req;
262}
263
264/*
265 * Return the preallocated rpc_rqst structure and XDR buffers
266 * associated with this rpc_task.
267 */
268void xprt_free_bc_request(struct rpc_rqst *req)
269{
270 struct rpc_xprt *xprt = req->rq_xprt;
271
42e5c3e2
CL
272 xprt->ops->bc_free_rqst(req);
273}
274
275void xprt_free_bc_rqst(struct rpc_rqst *req)
276{
277 struct rpc_xprt *xprt = req->rq_xprt;
278
fb7a0b9a
RL
279 dprintk("RPC: free backchannel req=%p\n", req);
280
2ea24497 281 req->rq_connect_cookie = xprt->connect_cookie - 1;
4e857c58 282 smp_mb__before_atomic();
fb7a0b9a 283 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
4e857c58 284 smp_mb__after_atomic();
fb7a0b9a 285
0d2a970d
TM
286 /*
287 * Return it to the list of preallocations so that it
288 * may be reused by a new callback request.
289 */
290 spin_lock_bh(&xprt->bc_pa_lock);
291 if (xprt_need_to_requeue(xprt)) {
292 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
293 xprt->bc_alloc_count++;
294 req = NULL;
295 }
296 spin_unlock_bh(&xprt->bc_pa_lock);
297 if (req != NULL) {
fb7a0b9a
RL
298 /*
299 * The last remaining session was destroyed while this
300 * entry was in use. Free the entry and don't attempt
301 * to add back to the list because there is no need to
302 * have anymore preallocated entries.
303 */
304 dprintk("RPC: Last session removed req=%p\n", req);
305 xprt_free_allocation(req);
306 return;
307 }
fb7a0b9a
RL
308}
309
2ea24497
TM
310/*
311 * One or more rpc_rqst structure have been preallocated during the
312 * backchannel setup. Buffer space for the send and private XDR buffers
313 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
314 * to this request. Use xprt_free_bc_request to return it.
315 *
316 * We know that we're called in soft interrupt context, grab the spin_lock
317 * since there is no need to grab the bottom half spin_lock.
318 *
319 * Return an available rpc_rqst, otherwise NULL if non are available.
320 */
321struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
322{
0d1bf340
TM
323 struct rpc_rqst *req, *new = NULL;
324
325 do {
326 spin_lock(&xprt->bc_pa_lock);
327 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
328 if (req->rq_connect_cookie != xprt->connect_cookie)
329 continue;
330 if (req->rq_xid == xid)
331 goto found;
332 }
333 req = xprt_get_bc_request(xprt, xid, new);
2ea24497 334found:
0d1bf340
TM
335 spin_unlock(&xprt->bc_pa_lock);
336 if (new) {
337 if (req != new)
338 xprt_free_bc_rqst(new);
339 break;
340 } else if (req)
341 break;
342 new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
343 } while (new);
2ea24497
TM
344 return req;
345}
346
347/*
348 * Add callback request to callback list. The callback
349 * service sleeps on the sv_cb_waitq waiting for new
350 * requests. Wake it up after adding enqueing the
351 * request.
352 */
353void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
354{
355 struct rpc_xprt *xprt = req->rq_xprt;
356 struct svc_serv *bc_serv = xprt->bc_serv;
357
813b00d6
CL
358 spin_lock(&xprt->bc_pa_lock);
359 list_del(&req->rq_bc_pa_list);
1980bd4d 360 xprt_dec_alloc_count(xprt, 1);
813b00d6
CL
361 spin_unlock(&xprt->bc_pa_lock);
362
2ea24497
TM
363 req->rq_private_buf.len = copied;
364 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
365
366 dprintk("RPC: add callback request to list\n");
367 spin_lock(&bc_serv->sv_cb_lock);
2ea24497
TM
368 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
369 wake_up(&bc_serv->sv_cb_waitq);
370 spin_unlock(&bc_serv->sv_cb_lock);
371}