]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/lockd/clntproc.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / fs / lockd / clntproc.c
1 /*
2 * linux/fs/lockd/clntproc.c
3 *
4 * RPC procedures for the client side NLM implementation
5 *
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7 */
8
9 #include <linux/module.h>
10 #include <linux/smp_lock.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/fs.h>
15 #include <linux/nfs_fs.h>
16 #include <linux/utsname.h>
17 #include <linux/freezer.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/sunrpc/svc.h>
20 #include <linux/lockd/lockd.h>
21
22 #define NLMDBG_FACILITY NLMDBG_CLIENT
23 #define NLMCLNT_GRACE_WAIT (5*HZ)
24 #define NLMCLNT_POLL_TIMEOUT (30*HZ)
25 #define NLMCLNT_MAX_RETRIES 3
26
27 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
28 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
29 static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
30 static int nlm_stat_to_errno(__be32 stat);
31 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
32 static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
33
34 static const struct rpc_call_ops nlmclnt_unlock_ops;
35 static const struct rpc_call_ops nlmclnt_cancel_ops;
36
37 /*
38 * Cookie counter for NLM requests
39 */
40 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41
42 void nlmclnt_next_cookie(struct nlm_cookie *c)
43 {
44 u32 cookie = atomic_inc_return(&nlm_cookie);
45
46 memcpy(c->data, &cookie, 4);
47 c->len=4;
48 }
49
50 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
51 {
52 atomic_inc(&lockowner->count);
53 return lockowner;
54 }
55
56 static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
57 {
58 if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
59 return;
60 list_del(&lockowner->list);
61 spin_unlock(&lockowner->host->h_lock);
62 nlm_release_host(lockowner->host);
63 kfree(lockowner);
64 }
65
66 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
67 {
68 struct nlm_lockowner *lockowner;
69 list_for_each_entry(lockowner, &host->h_lockowners, list) {
70 if (lockowner->pid == pid)
71 return -EBUSY;
72 }
73 return 0;
74 }
75
76 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
77 {
78 uint32_t res;
79 do {
80 res = host->h_pidcount++;
81 } while (nlm_pidbusy(host, res) < 0);
82 return res;
83 }
84
85 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
86 {
87 struct nlm_lockowner *lockowner;
88 list_for_each_entry(lockowner, &host->h_lockowners, list) {
89 if (lockowner->owner != owner)
90 continue;
91 return nlm_get_lockowner(lockowner);
92 }
93 return NULL;
94 }
95
96 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
97 {
98 struct nlm_lockowner *res, *new = NULL;
99
100 spin_lock(&host->h_lock);
101 res = __nlm_find_lockowner(host, owner);
102 if (res == NULL) {
103 spin_unlock(&host->h_lock);
104 new = kmalloc(sizeof(*new), GFP_KERNEL);
105 spin_lock(&host->h_lock);
106 res = __nlm_find_lockowner(host, owner);
107 if (res == NULL && new != NULL) {
108 res = new;
109 atomic_set(&new->count, 1);
110 new->owner = owner;
111 new->pid = __nlm_alloc_pid(host);
112 new->host = nlm_get_host(host);
113 list_add(&new->list, &host->h_lockowners);
114 new = NULL;
115 }
116 }
117 spin_unlock(&host->h_lock);
118 kfree(new);
119 return res;
120 }
121
122 /*
123 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
124 */
125 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
126 {
127 struct nlm_args *argp = &req->a_args;
128 struct nlm_lock *lock = &argp->lock;
129
130 nlmclnt_next_cookie(&argp->cookie);
131 memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
132 lock->caller = utsname()->nodename;
133 lock->oh.data = req->a_owner;
134 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
135 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
136 utsname()->nodename);
137 lock->svid = fl->fl_u.nfs_fl.owner->pid;
138 lock->fl.fl_start = fl->fl_start;
139 lock->fl.fl_end = fl->fl_end;
140 lock->fl.fl_type = fl->fl_type;
141 }
142
143 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
144 {
145 BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
146 }
147
148 /**
149 * nlmclnt_proc - Perform a single client-side lock request
150 * @host: address of a valid nlm_host context representing the NLM server
151 * @cmd: fcntl-style file lock operation to perform
152 * @fl: address of arguments for the lock operation
153 *
154 */
155 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
156 {
157 struct nlm_rqst *call;
158 int status;
159
160 nlm_get_host(host);
161 call = nlm_alloc_call(host);
162 if (call == NULL)
163 return -ENOMEM;
164
165 nlmclnt_locks_init_private(fl, host);
166 /* Set up the argument struct */
167 nlmclnt_setlockargs(call, fl);
168
169 lock_kernel();
170 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
171 if (fl->fl_type != F_UNLCK) {
172 call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
173 status = nlmclnt_lock(call, fl);
174 } else
175 status = nlmclnt_unlock(call, fl);
176 } else if (IS_GETLK(cmd))
177 status = nlmclnt_test(call, fl);
178 else
179 status = -EINVAL;
180
181 fl->fl_ops->fl_release_private(fl);
182 fl->fl_ops = NULL;
183 unlock_kernel();
184
185 dprintk("lockd: clnt proc returns %d\n", status);
186 return status;
187 }
188 EXPORT_SYMBOL_GPL(nlmclnt_proc);
189
190 /*
191 * Allocate an NLM RPC call struct
192 *
193 * Note: the caller must hold a reference to host. In case of failure,
194 * this reference will be released.
195 */
196 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
197 {
198 struct nlm_rqst *call;
199
200 for(;;) {
201 call = kzalloc(sizeof(*call), GFP_KERNEL);
202 if (call != NULL) {
203 atomic_set(&call->a_count, 1);
204 locks_init_lock(&call->a_args.lock.fl);
205 locks_init_lock(&call->a_res.lock.fl);
206 call->a_host = host;
207 return call;
208 }
209 if (signalled())
210 break;
211 printk("nlm_alloc_call: failed, waiting for memory\n");
212 schedule_timeout_interruptible(5*HZ);
213 }
214 nlm_release_host(host);
215 return NULL;
216 }
217
218 void nlm_release_call(struct nlm_rqst *call)
219 {
220 if (!atomic_dec_and_test(&call->a_count))
221 return;
222 nlm_release_host(call->a_host);
223 nlmclnt_release_lockargs(call);
224 kfree(call);
225 }
226
227 static void nlmclnt_rpc_release(void *data)
228 {
229 lock_kernel();
230 nlm_release_call(data);
231 unlock_kernel();
232 }
233
234 static int nlm_wait_on_grace(wait_queue_head_t *queue)
235 {
236 DEFINE_WAIT(wait);
237 int status = -EINTR;
238
239 prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
240 if (!signalled ()) {
241 schedule_timeout(NLMCLNT_GRACE_WAIT);
242 try_to_freeze();
243 if (!signalled ())
244 status = 0;
245 }
246 finish_wait(queue, &wait);
247 return status;
248 }
249
250 /*
251 * Generic NLM call
252 */
253 static int
254 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
255 {
256 struct nlm_host *host = req->a_host;
257 struct rpc_clnt *clnt;
258 struct nlm_args *argp = &req->a_args;
259 struct nlm_res *resp = &req->a_res;
260 struct rpc_message msg = {
261 .rpc_argp = argp,
262 .rpc_resp = resp,
263 .rpc_cred = cred,
264 };
265 int status;
266
267 dprintk("lockd: call procedure %d on %s\n",
268 (int)proc, host->h_name);
269
270 do {
271 if (host->h_reclaiming && !argp->reclaim)
272 goto in_grace_period;
273
274 /* If we have no RPC client yet, create one. */
275 if ((clnt = nlm_bind_host(host)) == NULL)
276 return -ENOLCK;
277 msg.rpc_proc = &clnt->cl_procinfo[proc];
278
279 /* Perform the RPC call. If an error occurs, try again */
280 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
281 dprintk("lockd: rpc_call returned error %d\n", -status);
282 switch (status) {
283 case -EPROTONOSUPPORT:
284 status = -EINVAL;
285 break;
286 case -ECONNREFUSED:
287 case -ETIMEDOUT:
288 case -ENOTCONN:
289 nlm_rebind_host(host);
290 status = -EAGAIN;
291 break;
292 case -ERESTARTSYS:
293 return signalled () ? -EINTR : status;
294 default:
295 break;
296 }
297 break;
298 } else
299 if (resp->status == nlm_lck_denied_grace_period) {
300 dprintk("lockd: server in grace period\n");
301 if (argp->reclaim) {
302 printk(KERN_WARNING
303 "lockd: spurious grace period reject?!\n");
304 return -ENOLCK;
305 }
306 } else {
307 if (!argp->reclaim) {
308 /* We appear to be out of the grace period */
309 wake_up_all(&host->h_gracewait);
310 }
311 dprintk("lockd: server returns status %d\n", resp->status);
312 return 0; /* Okay, call complete */
313 }
314
315 in_grace_period:
316 /*
317 * The server has rebooted and appears to be in the grace
318 * period during which locks are only allowed to be
319 * reclaimed.
320 * We can only back off and try again later.
321 */
322 status = nlm_wait_on_grace(&host->h_gracewait);
323 } while (status == 0);
324
325 return status;
326 }
327
328 /*
329 * Generic NLM call, async version.
330 */
331 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
332 {
333 struct nlm_host *host = req->a_host;
334 struct rpc_clnt *clnt;
335 struct rpc_task_setup task_setup_data = {
336 .rpc_message = msg,
337 .callback_ops = tk_ops,
338 .callback_data = req,
339 .flags = RPC_TASK_ASYNC,
340 };
341
342 dprintk("lockd: call procedure %d on %s (async)\n",
343 (int)proc, host->h_name);
344
345 /* If we have no RPC client yet, create one. */
346 clnt = nlm_bind_host(host);
347 if (clnt == NULL)
348 goto out_err;
349 msg->rpc_proc = &clnt->cl_procinfo[proc];
350 task_setup_data.rpc_client = clnt;
351
352 /* bootstrap and kick off the async RPC call */
353 return rpc_run_task(&task_setup_data);
354 out_err:
355 tk_ops->rpc_release(req);
356 return ERR_PTR(-ENOLCK);
357 }
358
359 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
360 {
361 struct rpc_task *task;
362
363 task = __nlm_async_call(req, proc, msg, tk_ops);
364 if (IS_ERR(task))
365 return PTR_ERR(task);
366 rpc_put_task(task);
367 return 0;
368 }
369
370 /*
371 * NLM asynchronous call.
372 */
373 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
374 {
375 struct rpc_message msg = {
376 .rpc_argp = &req->a_args,
377 .rpc_resp = &req->a_res,
378 };
379 return nlm_do_async_call(req, proc, &msg, tk_ops);
380 }
381
382 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
383 {
384 struct rpc_message msg = {
385 .rpc_argp = &req->a_res,
386 };
387 return nlm_do_async_call(req, proc, &msg, tk_ops);
388 }
389
390 /*
391 * NLM client asynchronous call.
392 *
393 * Note that although the calls are asynchronous, and are therefore
394 * guaranteed to complete, we still always attempt to wait for
395 * completion in order to be able to correctly track the lock
396 * state.
397 */
398 static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
399 {
400 struct rpc_message msg = {
401 .rpc_argp = &req->a_args,
402 .rpc_resp = &req->a_res,
403 .rpc_cred = cred,
404 };
405 struct rpc_task *task;
406 int err;
407
408 task = __nlm_async_call(req, proc, &msg, tk_ops);
409 if (IS_ERR(task))
410 return PTR_ERR(task);
411 err = rpc_wait_for_completion_task(task);
412 rpc_put_task(task);
413 return err;
414 }
415
416 /*
417 * TEST for the presence of a conflicting lock
418 */
419 static int
420 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
421 {
422 int status;
423
424 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
425 if (status < 0)
426 goto out;
427
428 switch (req->a_res.status) {
429 case nlm_granted:
430 fl->fl_type = F_UNLCK;
431 break;
432 case nlm_lck_denied:
433 /*
434 * Report the conflicting lock back to the application.
435 */
436 fl->fl_start = req->a_res.lock.fl.fl_start;
437 fl->fl_end = req->a_res.lock.fl.fl_end;
438 fl->fl_type = req->a_res.lock.fl.fl_type;
439 fl->fl_pid = 0;
440 break;
441 default:
442 status = nlm_stat_to_errno(req->a_res.status);
443 }
444 out:
445 nlm_release_call(req);
446 return status;
447 }
448
449 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
450 {
451 new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
452 new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
453 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
454 }
455
456 static void nlmclnt_locks_release_private(struct file_lock *fl)
457 {
458 list_del(&fl->fl_u.nfs_fl.list);
459 nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
460 }
461
462 static const struct file_lock_operations nlmclnt_lock_ops = {
463 .fl_copy_lock = nlmclnt_locks_copy_lock,
464 .fl_release_private = nlmclnt_locks_release_private,
465 };
466
467 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
468 {
469 BUG_ON(fl->fl_ops != NULL);
470 fl->fl_u.nfs_fl.state = 0;
471 fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
472 INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
473 fl->fl_ops = &nlmclnt_lock_ops;
474 }
475
476 static int do_vfs_lock(struct file_lock *fl)
477 {
478 int res = 0;
479 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
480 case FL_POSIX:
481 res = posix_lock_file_wait(fl->fl_file, fl);
482 break;
483 case FL_FLOCK:
484 res = flock_lock_file_wait(fl->fl_file, fl);
485 break;
486 default:
487 BUG();
488 }
489 return res;
490 }
491
492 /*
493 * LOCK: Try to create a lock
494 *
495 * Programmer Harassment Alert
496 *
497 * When given a blocking lock request in a sync RPC call, the HPUX lockd
498 * will faithfully return LCK_BLOCKED but never cares to notify us when
499 * the lock could be granted. This way, our local process could hang
500 * around forever waiting for the callback.
501 *
502 * Solution A: Implement busy-waiting
503 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
504 *
505 * For now I am implementing solution A, because I hate the idea of
506 * re-implementing lockd for a third time in two months. The async
507 * calls shouldn't be too hard to do, however.
508 *
509 * This is one of the lovely things about standards in the NFS area:
510 * they're so soft and squishy you can't really blame HP for doing this.
511 */
512 static int
513 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
514 {
515 struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
516 struct nlm_host *host = req->a_host;
517 struct nlm_res *resp = &req->a_res;
518 struct nlm_wait *block = NULL;
519 unsigned char fl_flags = fl->fl_flags;
520 unsigned char fl_type;
521 int status = -ENOLCK;
522
523 if (nsm_monitor(host) < 0)
524 goto out;
525 req->a_args.state = nsm_local_state;
526
527 fl->fl_flags |= FL_ACCESS;
528 status = do_vfs_lock(fl);
529 fl->fl_flags = fl_flags;
530 if (status < 0)
531 goto out;
532
533 block = nlmclnt_prepare_block(host, fl);
534 again:
535 /*
536 * Initialise resp->status to a valid non-zero value,
537 * since 0 == nlm_lck_granted
538 */
539 resp->status = nlm_lck_blocked;
540 for(;;) {
541 /* Reboot protection */
542 fl->fl_u.nfs_fl.state = host->h_state;
543 status = nlmclnt_call(cred, req, NLMPROC_LOCK);
544 if (status < 0)
545 break;
546 /* Did a reclaimer thread notify us of a server reboot? */
547 if (resp->status == nlm_lck_denied_grace_period)
548 continue;
549 if (resp->status != nlm_lck_blocked)
550 break;
551 /* Wait on an NLM blocking lock */
552 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
553 if (status < 0)
554 break;
555 if (resp->status != nlm_lck_blocked)
556 break;
557 }
558
559 /* if we were interrupted while blocking, then cancel the lock request
560 * and exit
561 */
562 if (resp->status == nlm_lck_blocked) {
563 if (!req->a_args.block)
564 goto out_unlock;
565 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
566 goto out_unblock;
567 }
568
569 if (resp->status == nlm_granted) {
570 down_read(&host->h_rwsem);
571 /* Check whether or not the server has rebooted */
572 if (fl->fl_u.nfs_fl.state != host->h_state) {
573 up_read(&host->h_rwsem);
574 goto again;
575 }
576 /* Ensure the resulting lock will get added to granted list */
577 fl->fl_flags |= FL_SLEEP;
578 if (do_vfs_lock(fl) < 0)
579 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
580 up_read(&host->h_rwsem);
581 fl->fl_flags = fl_flags;
582 status = 0;
583 }
584 if (status < 0)
585 goto out_unlock;
586 /*
587 * EAGAIN doesn't make sense for sleeping locks, and in some
588 * cases NLM_LCK_DENIED is returned for a permanent error. So
589 * turn it into an ENOLCK.
590 */
591 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
592 status = -ENOLCK;
593 else
594 status = nlm_stat_to_errno(resp->status);
595 out_unblock:
596 nlmclnt_finish_block(block);
597 out:
598 nlm_release_call(req);
599 return status;
600 out_unlock:
601 /* Fatal error: ensure that we remove the lock altogether */
602 dprintk("lockd: lock attempt ended in fatal error.\n"
603 " Attempting to unlock.\n");
604 nlmclnt_finish_block(block);
605 fl_type = fl->fl_type;
606 fl->fl_type = F_UNLCK;
607 down_read(&host->h_rwsem);
608 do_vfs_lock(fl);
609 up_read(&host->h_rwsem);
610 fl->fl_type = fl_type;
611 fl->fl_flags = fl_flags;
612 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
613 return status;
614 }
615
616 /*
617 * RECLAIM: Try to reclaim a lock
618 */
619 int
620 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
621 {
622 struct nlm_rqst reqst, *req;
623 int status;
624
625 req = &reqst;
626 memset(req, 0, sizeof(*req));
627 locks_init_lock(&req->a_args.lock.fl);
628 locks_init_lock(&req->a_res.lock.fl);
629 req->a_host = host;
630 req->a_flags = 0;
631
632 /* Set up the argument struct */
633 nlmclnt_setlockargs(req, fl);
634 req->a_args.reclaim = 1;
635
636 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
637 if (status >= 0 && req->a_res.status == nlm_granted)
638 return 0;
639
640 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
641 "(errno %d, status %d)\n", fl->fl_pid,
642 status, ntohl(req->a_res.status));
643
644 /*
645 * FIXME: This is a serious failure. We can
646 *
647 * a. Ignore the problem
648 * b. Send the owning process some signal (Linux doesn't have
649 * SIGLOST, though...)
650 * c. Retry the operation
651 *
652 * Until someone comes up with a simple implementation
653 * for b or c, I'll choose option a.
654 */
655
656 return -ENOLCK;
657 }
658
659 /*
660 * UNLOCK: remove an existing lock
661 */
662 static int
663 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
664 {
665 struct nlm_host *host = req->a_host;
666 struct nlm_res *resp = &req->a_res;
667 int status;
668 unsigned char fl_flags = fl->fl_flags;
669
670 /*
671 * Note: the server is supposed to either grant us the unlock
672 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
673 * case, we want to unlock.
674 */
675 fl->fl_flags |= FL_EXISTS;
676 down_read(&host->h_rwsem);
677 status = do_vfs_lock(fl);
678 up_read(&host->h_rwsem);
679 fl->fl_flags = fl_flags;
680 if (status == -ENOENT) {
681 status = 0;
682 goto out;
683 }
684
685 atomic_inc(&req->a_count);
686 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
687 NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
688 if (status < 0)
689 goto out;
690
691 if (resp->status == nlm_granted)
692 goto out;
693
694 if (resp->status != nlm_lck_denied_nolocks)
695 printk("lockd: unexpected unlock status: %d\n", resp->status);
696 /* What to do now? I'm out of my depth... */
697 status = -ENOLCK;
698 out:
699 nlm_release_call(req);
700 return status;
701 }
702
703 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
704 {
705 struct nlm_rqst *req = data;
706 u32 status = ntohl(req->a_res.status);
707
708 if (RPC_ASSASSINATED(task))
709 goto die;
710
711 if (task->tk_status < 0) {
712 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
713 goto retry_rebind;
714 }
715 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
716 rpc_delay(task, NLMCLNT_GRACE_WAIT);
717 goto retry_unlock;
718 }
719 if (status != NLM_LCK_GRANTED)
720 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
721 die:
722 return;
723 retry_rebind:
724 lock_kernel();
725 nlm_rebind_host(req->a_host);
726 unlock_kernel();
727 retry_unlock:
728 rpc_restart_call(task);
729 }
730
731 static const struct rpc_call_ops nlmclnt_unlock_ops = {
732 .rpc_call_done = nlmclnt_unlock_callback,
733 .rpc_release = nlmclnt_rpc_release,
734 };
735
736 /*
737 * Cancel a blocked lock request.
738 * We always use an async RPC call for this in order not to hang a
739 * process that has been Ctrl-C'ed.
740 */
741 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
742 {
743 struct nlm_rqst *req;
744 int status;
745
746 dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
747 " Attempting to cancel lock.\n");
748
749 req = nlm_alloc_call(nlm_get_host(host));
750 if (!req)
751 return -ENOMEM;
752 req->a_flags = RPC_TASK_ASYNC;
753
754 nlmclnt_setlockargs(req, fl);
755 req->a_args.block = block;
756
757 atomic_inc(&req->a_count);
758 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
759 NLMPROC_CANCEL, &nlmclnt_cancel_ops);
760 if (status == 0 && req->a_res.status == nlm_lck_denied)
761 status = -ENOLCK;
762 nlm_release_call(req);
763 return status;
764 }
765
766 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
767 {
768 struct nlm_rqst *req = data;
769 u32 status = ntohl(req->a_res.status);
770
771 if (RPC_ASSASSINATED(task))
772 goto die;
773
774 if (task->tk_status < 0) {
775 dprintk("lockd: CANCEL call error %d, retrying.\n",
776 task->tk_status);
777 goto retry_cancel;
778 }
779
780 dprintk("lockd: cancel status %u (task %u)\n",
781 status, task->tk_pid);
782
783 switch (status) {
784 case NLM_LCK_GRANTED:
785 case NLM_LCK_DENIED_GRACE_PERIOD:
786 case NLM_LCK_DENIED:
787 /* Everything's good */
788 break;
789 case NLM_LCK_DENIED_NOLOCKS:
790 dprintk("lockd: CANCEL failed (server has no locks)\n");
791 goto retry_cancel;
792 default:
793 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
794 status);
795 }
796
797 die:
798 return;
799
800 retry_cancel:
801 /* Don't ever retry more than 3 times */
802 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
803 goto die;
804 lock_kernel();
805 nlm_rebind_host(req->a_host);
806 unlock_kernel();
807 rpc_restart_call(task);
808 rpc_delay(task, 30 * HZ);
809 }
810
811 static const struct rpc_call_ops nlmclnt_cancel_ops = {
812 .rpc_call_done = nlmclnt_cancel_callback,
813 .rpc_release = nlmclnt_rpc_release,
814 };
815
816 /*
817 * Convert an NLM status code to a generic kernel errno
818 */
819 static int
820 nlm_stat_to_errno(__be32 status)
821 {
822 switch(ntohl(status)) {
823 case NLM_LCK_GRANTED:
824 return 0;
825 case NLM_LCK_DENIED:
826 return -EAGAIN;
827 case NLM_LCK_DENIED_NOLOCKS:
828 case NLM_LCK_DENIED_GRACE_PERIOD:
829 return -ENOLCK;
830 case NLM_LCK_BLOCKED:
831 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
832 return -ENOLCK;
833 #ifdef CONFIG_LOCKD_V4
834 case NLM_DEADLCK:
835 return -EDEADLK;
836 case NLM_ROFS:
837 return -EROFS;
838 case NLM_STALE_FH:
839 return -ESTALE;
840 case NLM_FBIG:
841 return -EOVERFLOW;
842 case NLM_FAILED:
843 return -ENOLCK;
844 #endif
845 }
846 printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
847 return -ENOLCK;
848 }