]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/lockd/clntproc.c
Merge tag 'renesas-drivers-for-v4.13' of https://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / fs / lockd / clntproc.c
1 /*
2 * linux/fs/lockd/clntproc.c
3 *
4 * RPC procedures for the client side NLM implementation
5 *
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7 */
8
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fs.h>
14 #include <linux/nfs_fs.h>
15 #include <linux/utsname.h>
16 #include <linux/freezer.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/sunrpc/svc.h>
19 #include <linux/lockd/lockd.h>
20
21 #define NLMDBG_FACILITY NLMDBG_CLIENT
22 #define NLMCLNT_GRACE_WAIT (5*HZ)
23 #define NLMCLNT_POLL_TIMEOUT (30*HZ)
24 #define NLMCLNT_MAX_RETRIES 3
25
26 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
27 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
28 static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
29 static int nlm_stat_to_errno(__be32 stat);
30 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
31 static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
32
33 static const struct rpc_call_ops nlmclnt_unlock_ops;
34 static const struct rpc_call_ops nlmclnt_cancel_ops;
35
36 /*
37 * Cookie counter for NLM requests
38 */
39 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40
41 void nlmclnt_next_cookie(struct nlm_cookie *c)
42 {
43 u32 cookie = atomic_inc_return(&nlm_cookie);
44
45 memcpy(c->data, &cookie, 4);
46 c->len=4;
47 }
48
49 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
50 {
51 atomic_inc(&lockowner->count);
52 return lockowner;
53 }
54
55 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
56 {
57 if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
58 return;
59 list_del(&lockowner->list);
60 spin_unlock(&lockowner->host->h_lock);
61 nlmclnt_release_host(lockowner->host);
62 kfree(lockowner);
63 }
64
65 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
66 {
67 struct nlm_lockowner *lockowner;
68 list_for_each_entry(lockowner, &host->h_lockowners, list) {
69 if (lockowner->pid == pid)
70 return -EBUSY;
71 }
72 return 0;
73 }
74
75 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
76 {
77 uint32_t res;
78 do {
79 res = host->h_pidcount++;
80 } while (nlm_pidbusy(host, res) < 0);
81 return res;
82 }
83
84 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
85 {
86 struct nlm_lockowner *lockowner;
87 list_for_each_entry(lockowner, &host->h_lockowners, list) {
88 if (lockowner->owner != owner)
89 continue;
90 return nlm_get_lockowner(lockowner);
91 }
92 return NULL;
93 }
94
95 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
96 {
97 struct nlm_lockowner *res, *new = NULL;
98
99 spin_lock(&host->h_lock);
100 res = __nlm_find_lockowner(host, owner);
101 if (res == NULL) {
102 spin_unlock(&host->h_lock);
103 new = kmalloc(sizeof(*new), GFP_KERNEL);
104 spin_lock(&host->h_lock);
105 res = __nlm_find_lockowner(host, owner);
106 if (res == NULL && new != NULL) {
107 res = new;
108 atomic_set(&new->count, 1);
109 new->owner = owner;
110 new->pid = __nlm_alloc_pid(host);
111 new->host = nlm_get_host(host);
112 list_add(&new->list, &host->h_lockowners);
113 new = NULL;
114 }
115 }
116 spin_unlock(&host->h_lock);
117 kfree(new);
118 return res;
119 }
120
121 /*
122 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
123 */
124 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
125 {
126 struct nlm_args *argp = &req->a_args;
127 struct nlm_lock *lock = &argp->lock;
128 char *nodename = req->a_host->h_rpcclnt->cl_nodename;
129
130 nlmclnt_next_cookie(&argp->cookie);
131 memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
132 lock->caller = nodename;
133 lock->oh.data = req->a_owner;
134 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
135 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
136 nodename);
137 lock->svid = fl->fl_u.nfs_fl.owner->pid;
138 lock->fl.fl_start = fl->fl_start;
139 lock->fl.fl_end = fl->fl_end;
140 lock->fl.fl_type = fl->fl_type;
141 }
142
143 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
144 {
145 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
146 }
147
148 /**
149 * nlmclnt_proc - Perform a single client-side lock request
150 * @host: address of a valid nlm_host context representing the NLM server
151 * @cmd: fcntl-style file lock operation to perform
152 * @fl: address of arguments for the lock operation
153 * @data: address of data to be sent to callback operations
154 *
155 */
156 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
157 {
158 struct nlm_rqst *call;
159 int status;
160 const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
161
162 call = nlm_alloc_call(host);
163 if (call == NULL)
164 return -ENOMEM;
165
166 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
167 nlmclnt_ops->nlmclnt_alloc_call(data);
168
169 nlmclnt_locks_init_private(fl, host);
170 if (!fl->fl_u.nfs_fl.owner) {
171 /* lockowner allocation has failed */
172 nlmclnt_release_call(call);
173 return -ENOMEM;
174 }
175 /* Set up the argument struct */
176 nlmclnt_setlockargs(call, fl);
177 call->a_callback_data = data;
178
179 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
180 if (fl->fl_type != F_UNLCK) {
181 call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
182 status = nlmclnt_lock(call, fl);
183 } else
184 status = nlmclnt_unlock(call, fl);
185 } else if (IS_GETLK(cmd))
186 status = nlmclnt_test(call, fl);
187 else
188 status = -EINVAL;
189 fl->fl_ops->fl_release_private(fl);
190 fl->fl_ops = NULL;
191
192 dprintk("lockd: clnt proc returns %d\n", status);
193 return status;
194 }
195 EXPORT_SYMBOL_GPL(nlmclnt_proc);
196
197 /*
198 * Allocate an NLM RPC call struct
199 */
200 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
201 {
202 struct nlm_rqst *call;
203
204 for(;;) {
205 call = kzalloc(sizeof(*call), GFP_KERNEL);
206 if (call != NULL) {
207 atomic_set(&call->a_count, 1);
208 locks_init_lock(&call->a_args.lock.fl);
209 locks_init_lock(&call->a_res.lock.fl);
210 call->a_host = nlm_get_host(host);
211 return call;
212 }
213 if (signalled())
214 break;
215 printk("nlm_alloc_call: failed, waiting for memory\n");
216 schedule_timeout_interruptible(5*HZ);
217 }
218 return NULL;
219 }
220
221 void nlmclnt_release_call(struct nlm_rqst *call)
222 {
223 const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
224
225 if (!atomic_dec_and_test(&call->a_count))
226 return;
227 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
228 nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
229 nlmclnt_release_host(call->a_host);
230 nlmclnt_release_lockargs(call);
231 kfree(call);
232 }
233
234 static void nlmclnt_rpc_release(void *data)
235 {
236 nlmclnt_release_call(data);
237 }
238
239 static int nlm_wait_on_grace(wait_queue_head_t *queue)
240 {
241 DEFINE_WAIT(wait);
242 int status = -EINTR;
243
244 prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
245 if (!signalled ()) {
246 schedule_timeout(NLMCLNT_GRACE_WAIT);
247 try_to_freeze();
248 if (!signalled ())
249 status = 0;
250 }
251 finish_wait(queue, &wait);
252 return status;
253 }
254
255 /*
256 * Generic NLM call
257 */
258 static int
259 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
260 {
261 struct nlm_host *host = req->a_host;
262 struct rpc_clnt *clnt;
263 struct nlm_args *argp = &req->a_args;
264 struct nlm_res *resp = &req->a_res;
265 struct rpc_message msg = {
266 .rpc_argp = argp,
267 .rpc_resp = resp,
268 .rpc_cred = cred,
269 };
270 int status;
271
272 dprintk("lockd: call procedure %d on %s\n",
273 (int)proc, host->h_name);
274
275 do {
276 if (host->h_reclaiming && !argp->reclaim)
277 goto in_grace_period;
278
279 /* If we have no RPC client yet, create one. */
280 if ((clnt = nlm_bind_host(host)) == NULL)
281 return -ENOLCK;
282 msg.rpc_proc = &clnt->cl_procinfo[proc];
283
284 /* Perform the RPC call. If an error occurs, try again */
285 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
286 dprintk("lockd: rpc_call returned error %d\n", -status);
287 switch (status) {
288 case -EPROTONOSUPPORT:
289 status = -EINVAL;
290 break;
291 case -ECONNREFUSED:
292 case -ETIMEDOUT:
293 case -ENOTCONN:
294 nlm_rebind_host(host);
295 status = -EAGAIN;
296 break;
297 case -ERESTARTSYS:
298 return signalled () ? -EINTR : status;
299 default:
300 break;
301 }
302 break;
303 } else
304 if (resp->status == nlm_lck_denied_grace_period) {
305 dprintk("lockd: server in grace period\n");
306 if (argp->reclaim) {
307 printk(KERN_WARNING
308 "lockd: spurious grace period reject?!\n");
309 return -ENOLCK;
310 }
311 } else {
312 if (!argp->reclaim) {
313 /* We appear to be out of the grace period */
314 wake_up_all(&host->h_gracewait);
315 }
316 dprintk("lockd: server returns status %d\n",
317 ntohl(resp->status));
318 return 0; /* Okay, call complete */
319 }
320
321 in_grace_period:
322 /*
323 * The server has rebooted and appears to be in the grace
324 * period during which locks are only allowed to be
325 * reclaimed.
326 * We can only back off and try again later.
327 */
328 status = nlm_wait_on_grace(&host->h_gracewait);
329 } while (status == 0);
330
331 return status;
332 }
333
334 /*
335 * Generic NLM call, async version.
336 */
337 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
338 {
339 struct nlm_host *host = req->a_host;
340 struct rpc_clnt *clnt;
341 struct rpc_task_setup task_setup_data = {
342 .rpc_message = msg,
343 .callback_ops = tk_ops,
344 .callback_data = req,
345 .flags = RPC_TASK_ASYNC,
346 };
347
348 dprintk("lockd: call procedure %d on %s (async)\n",
349 (int)proc, host->h_name);
350
351 /* If we have no RPC client yet, create one. */
352 clnt = nlm_bind_host(host);
353 if (clnt == NULL)
354 goto out_err;
355 msg->rpc_proc = &clnt->cl_procinfo[proc];
356 task_setup_data.rpc_client = clnt;
357
358 /* bootstrap and kick off the async RPC call */
359 return rpc_run_task(&task_setup_data);
360 out_err:
361 tk_ops->rpc_release(req);
362 return ERR_PTR(-ENOLCK);
363 }
364
365 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
366 {
367 struct rpc_task *task;
368
369 task = __nlm_async_call(req, proc, msg, tk_ops);
370 if (IS_ERR(task))
371 return PTR_ERR(task);
372 rpc_put_task(task);
373 return 0;
374 }
375
376 /*
377 * NLM asynchronous call.
378 */
379 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
380 {
381 struct rpc_message msg = {
382 .rpc_argp = &req->a_args,
383 .rpc_resp = &req->a_res,
384 };
385 return nlm_do_async_call(req, proc, &msg, tk_ops);
386 }
387
388 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
389 {
390 struct rpc_message msg = {
391 .rpc_argp = &req->a_res,
392 };
393 return nlm_do_async_call(req, proc, &msg, tk_ops);
394 }
395
396 /*
397 * NLM client asynchronous call.
398 *
399 * Note that although the calls are asynchronous, and are therefore
400 * guaranteed to complete, we still always attempt to wait for
401 * completion in order to be able to correctly track the lock
402 * state.
403 */
404 static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
405 {
406 struct rpc_message msg = {
407 .rpc_argp = &req->a_args,
408 .rpc_resp = &req->a_res,
409 .rpc_cred = cred,
410 };
411 struct rpc_task *task;
412 int err;
413
414 task = __nlm_async_call(req, proc, &msg, tk_ops);
415 if (IS_ERR(task))
416 return PTR_ERR(task);
417 err = rpc_wait_for_completion_task(task);
418 rpc_put_task(task);
419 return err;
420 }
421
422 /*
423 * TEST for the presence of a conflicting lock
424 */
425 static int
426 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
427 {
428 int status;
429
430 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
431 if (status < 0)
432 goto out;
433
434 switch (req->a_res.status) {
435 case nlm_granted:
436 fl->fl_type = F_UNLCK;
437 break;
438 case nlm_lck_denied:
439 /*
440 * Report the conflicting lock back to the application.
441 */
442 fl->fl_start = req->a_res.lock.fl.fl_start;
443 fl->fl_end = req->a_res.lock.fl.fl_end;
444 fl->fl_type = req->a_res.lock.fl.fl_type;
445 fl->fl_pid = 0;
446 break;
447 default:
448 status = nlm_stat_to_errno(req->a_res.status);
449 }
450 out:
451 nlmclnt_release_call(req);
452 return status;
453 }
454
455 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
456 {
457 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
458 new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
459 new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
460 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
461 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
462 }
463
464 static void nlmclnt_locks_release_private(struct file_lock *fl)
465 {
466 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
467 list_del(&fl->fl_u.nfs_fl.list);
468 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
469 nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
470 }
471
472 static const struct file_lock_operations nlmclnt_lock_ops = {
473 .fl_copy_lock = nlmclnt_locks_copy_lock,
474 .fl_release_private = nlmclnt_locks_release_private,
475 };
476
477 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
478 {
479 fl->fl_u.nfs_fl.state = 0;
480 fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
481 INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
482 fl->fl_ops = &nlmclnt_lock_ops;
483 }
484
485 static int do_vfs_lock(struct file_lock *fl)
486 {
487 return locks_lock_file_wait(fl->fl_file, fl);
488 }
489
490 /*
491 * LOCK: Try to create a lock
492 *
493 * Programmer Harassment Alert
494 *
495 * When given a blocking lock request in a sync RPC call, the HPUX lockd
496 * will faithfully return LCK_BLOCKED but never cares to notify us when
497 * the lock could be granted. This way, our local process could hang
498 * around forever waiting for the callback.
499 *
500 * Solution A: Implement busy-waiting
501 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
502 *
503 * For now I am implementing solution A, because I hate the idea of
504 * re-implementing lockd for a third time in two months. The async
505 * calls shouldn't be too hard to do, however.
506 *
507 * This is one of the lovely things about standards in the NFS area:
508 * they're so soft and squishy you can't really blame HP for doing this.
509 */
510 static int
511 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
512 {
513 struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
514 struct nlm_host *host = req->a_host;
515 struct nlm_res *resp = &req->a_res;
516 struct nlm_wait *block = NULL;
517 unsigned char fl_flags = fl->fl_flags;
518 unsigned char fl_type;
519 int status = -ENOLCK;
520
521 if (nsm_monitor(host) < 0)
522 goto out;
523 req->a_args.state = nsm_local_state;
524
525 fl->fl_flags |= FL_ACCESS;
526 status = do_vfs_lock(fl);
527 fl->fl_flags = fl_flags;
528 if (status < 0)
529 goto out;
530
531 block = nlmclnt_prepare_block(host, fl);
532 again:
533 /*
534 * Initialise resp->status to a valid non-zero value,
535 * since 0 == nlm_lck_granted
536 */
537 resp->status = nlm_lck_blocked;
538 for(;;) {
539 /* Reboot protection */
540 fl->fl_u.nfs_fl.state = host->h_state;
541 status = nlmclnt_call(cred, req, NLMPROC_LOCK);
542 if (status < 0)
543 break;
544 /* Did a reclaimer thread notify us of a server reboot? */
545 if (resp->status == nlm_lck_denied_grace_period)
546 continue;
547 if (resp->status != nlm_lck_blocked)
548 break;
549 /* Wait on an NLM blocking lock */
550 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
551 if (status < 0)
552 break;
553 if (resp->status != nlm_lck_blocked)
554 break;
555 }
556
557 /* if we were interrupted while blocking, then cancel the lock request
558 * and exit
559 */
560 if (resp->status == nlm_lck_blocked) {
561 if (!req->a_args.block)
562 goto out_unlock;
563 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
564 goto out_unblock;
565 }
566
567 if (resp->status == nlm_granted) {
568 down_read(&host->h_rwsem);
569 /* Check whether or not the server has rebooted */
570 if (fl->fl_u.nfs_fl.state != host->h_state) {
571 up_read(&host->h_rwsem);
572 goto again;
573 }
574 /* Ensure the resulting lock will get added to granted list */
575 fl->fl_flags |= FL_SLEEP;
576 if (do_vfs_lock(fl) < 0)
577 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
578 up_read(&host->h_rwsem);
579 fl->fl_flags = fl_flags;
580 status = 0;
581 }
582 if (status < 0)
583 goto out_unlock;
584 /*
585 * EAGAIN doesn't make sense for sleeping locks, and in some
586 * cases NLM_LCK_DENIED is returned for a permanent error. So
587 * turn it into an ENOLCK.
588 */
589 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
590 status = -ENOLCK;
591 else
592 status = nlm_stat_to_errno(resp->status);
593 out_unblock:
594 nlmclnt_finish_block(block);
595 out:
596 nlmclnt_release_call(req);
597 return status;
598 out_unlock:
599 /* Fatal error: ensure that we remove the lock altogether */
600 dprintk("lockd: lock attempt ended in fatal error.\n"
601 " Attempting to unlock.\n");
602 nlmclnt_finish_block(block);
603 fl_type = fl->fl_type;
604 fl->fl_type = F_UNLCK;
605 down_read(&host->h_rwsem);
606 do_vfs_lock(fl);
607 up_read(&host->h_rwsem);
608 fl->fl_type = fl_type;
609 fl->fl_flags = fl_flags;
610 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
611 return status;
612 }
613
614 /*
615 * RECLAIM: Try to reclaim a lock
616 */
617 int
618 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
619 struct nlm_rqst *req)
620 {
621 int status;
622
623 memset(req, 0, sizeof(*req));
624 locks_init_lock(&req->a_args.lock.fl);
625 locks_init_lock(&req->a_res.lock.fl);
626 req->a_host = host;
627
628 /* Set up the argument struct */
629 nlmclnt_setlockargs(req, fl);
630 req->a_args.reclaim = 1;
631
632 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
633 if (status >= 0 && req->a_res.status == nlm_granted)
634 return 0;
635
636 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
637 "(errno %d, status %d)\n", fl->fl_pid,
638 status, ntohl(req->a_res.status));
639
640 /*
641 * FIXME: This is a serious failure. We can
642 *
643 * a. Ignore the problem
644 * b. Send the owning process some signal (Linux doesn't have
645 * SIGLOST, though...)
646 * c. Retry the operation
647 *
648 * Until someone comes up with a simple implementation
649 * for b or c, I'll choose option a.
650 */
651
652 return -ENOLCK;
653 }
654
655 /*
656 * UNLOCK: remove an existing lock
657 */
658 static int
659 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
660 {
661 struct nlm_host *host = req->a_host;
662 struct nlm_res *resp = &req->a_res;
663 int status;
664 unsigned char fl_flags = fl->fl_flags;
665
666 /*
667 * Note: the server is supposed to either grant us the unlock
668 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
669 * case, we want to unlock.
670 */
671 fl->fl_flags |= FL_EXISTS;
672 down_read(&host->h_rwsem);
673 status = do_vfs_lock(fl);
674 up_read(&host->h_rwsem);
675 fl->fl_flags = fl_flags;
676 if (status == -ENOENT) {
677 status = 0;
678 goto out;
679 }
680
681 atomic_inc(&req->a_count);
682 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
683 NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
684 if (status < 0)
685 goto out;
686
687 if (resp->status == nlm_granted)
688 goto out;
689
690 if (resp->status != nlm_lck_denied_nolocks)
691 printk("lockd: unexpected unlock status: %d\n",
692 ntohl(resp->status));
693 /* What to do now? I'm out of my depth... */
694 status = -ENOLCK;
695 out:
696 nlmclnt_release_call(req);
697 return status;
698 }
699
700 static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
701 {
702 struct nlm_rqst *req = data;
703 const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
704 bool defer_call = false;
705
706 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
707 defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
708
709 if (!defer_call)
710 rpc_call_start(task);
711 }
712
713 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
714 {
715 struct nlm_rqst *req = data;
716 u32 status = ntohl(req->a_res.status);
717
718 if (RPC_ASSASSINATED(task))
719 goto die;
720
721 if (task->tk_status < 0) {
722 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
723 switch (task->tk_status) {
724 case -EACCES:
725 case -EIO:
726 goto die;
727 default:
728 goto retry_rebind;
729 }
730 }
731 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
732 rpc_delay(task, NLMCLNT_GRACE_WAIT);
733 goto retry_unlock;
734 }
735 if (status != NLM_LCK_GRANTED)
736 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
737 die:
738 return;
739 retry_rebind:
740 nlm_rebind_host(req->a_host);
741 retry_unlock:
742 rpc_restart_call(task);
743 }
744
745 static const struct rpc_call_ops nlmclnt_unlock_ops = {
746 .rpc_call_prepare = nlmclnt_unlock_prepare,
747 .rpc_call_done = nlmclnt_unlock_callback,
748 .rpc_release = nlmclnt_rpc_release,
749 };
750
751 /*
752 * Cancel a blocked lock request.
753 * We always use an async RPC call for this in order not to hang a
754 * process that has been Ctrl-C'ed.
755 */
756 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
757 {
758 struct nlm_rqst *req;
759 int status;
760
761 dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
762 " Attempting to cancel lock.\n");
763
764 req = nlm_alloc_call(host);
765 if (!req)
766 return -ENOMEM;
767 req->a_flags = RPC_TASK_ASYNC;
768
769 nlmclnt_setlockargs(req, fl);
770 req->a_args.block = block;
771
772 atomic_inc(&req->a_count);
773 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
774 NLMPROC_CANCEL, &nlmclnt_cancel_ops);
775 if (status == 0 && req->a_res.status == nlm_lck_denied)
776 status = -ENOLCK;
777 nlmclnt_release_call(req);
778 return status;
779 }
780
781 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
782 {
783 struct nlm_rqst *req = data;
784 u32 status = ntohl(req->a_res.status);
785
786 if (RPC_ASSASSINATED(task))
787 goto die;
788
789 if (task->tk_status < 0) {
790 dprintk("lockd: CANCEL call error %d, retrying.\n",
791 task->tk_status);
792 goto retry_cancel;
793 }
794
795 dprintk("lockd: cancel status %u (task %u)\n",
796 status, task->tk_pid);
797
798 switch (status) {
799 case NLM_LCK_GRANTED:
800 case NLM_LCK_DENIED_GRACE_PERIOD:
801 case NLM_LCK_DENIED:
802 /* Everything's good */
803 break;
804 case NLM_LCK_DENIED_NOLOCKS:
805 dprintk("lockd: CANCEL failed (server has no locks)\n");
806 goto retry_cancel;
807 default:
808 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
809 status);
810 }
811
812 die:
813 return;
814
815 retry_cancel:
816 /* Don't ever retry more than 3 times */
817 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
818 goto die;
819 nlm_rebind_host(req->a_host);
820 rpc_restart_call(task);
821 rpc_delay(task, 30 * HZ);
822 }
823
824 static const struct rpc_call_ops nlmclnt_cancel_ops = {
825 .rpc_call_done = nlmclnt_cancel_callback,
826 .rpc_release = nlmclnt_rpc_release,
827 };
828
829 /*
830 * Convert an NLM status code to a generic kernel errno
831 */
832 static int
833 nlm_stat_to_errno(__be32 status)
834 {
835 switch(ntohl(status)) {
836 case NLM_LCK_GRANTED:
837 return 0;
838 case NLM_LCK_DENIED:
839 return -EAGAIN;
840 case NLM_LCK_DENIED_NOLOCKS:
841 case NLM_LCK_DENIED_GRACE_PERIOD:
842 return -ENOLCK;
843 case NLM_LCK_BLOCKED:
844 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
845 return -ENOLCK;
846 #ifdef CONFIG_LOCKD_V4
847 case NLM_DEADLCK:
848 return -EDEADLK;
849 case NLM_ROFS:
850 return -EROFS;
851 case NLM_STALE_FH:
852 return -ESTALE;
853 case NLM_FBIG:
854 return -EOVERFLOW;
855 case NLM_FAILED:
856 return -ENOLCK;
857 #endif
858 }
859 printk(KERN_NOTICE "lockd: unexpected server status %d\n",
860 ntohl(status));
861 return -ENOLCK;
862 }