]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/lockd/svclock.c
locks: Copy fl_lmops information for conflock in locks_copy_conflock()
[mirror_ubuntu-artful-kernel.git] / fs / lockd / svclock.c
1 /*
2 * linux/fs/lockd/svclock.c
3 *
4 * Handling of server-side locks, mostly of the blocked variety.
5 * This is the ugliest part of lockd because we tread on very thin ice.
6 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
7 * IMNSHO introducing the grant callback into the NLM protocol was one
8 * of the worst ideas Sun ever had. Except maybe for the idea of doing
9 * NFS file locking at all.
10 *
11 * I'm trying hard to avoid race conditions by protecting most accesses
12 * to a file's list of blocked locks through a semaphore. The global
13 * list of blocked locks is not protected in this fashion however.
14 * Therefore, some functions (such as the RPC callback for the async grant
15 * call) move blocked locks towards the head of the list *while some other
16 * process might be traversing it*. This should not be a problem in
17 * practice, because this will only cause functions traversing the list
18 * to visit some blocks twice.
19 *
20 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
21 */
22
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/errno.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/sunrpc/clnt.h>
29 #include <linux/sunrpc/svc_xprt.h>
30 #include <linux/lockd/nlm.h>
31 #include <linux/lockd/lockd.h>
32 #include <linux/kthread.h>
33
34 #define NLMDBG_FACILITY NLMDBG_SVCLOCK
35
36 #ifdef CONFIG_LOCKD_V4
37 #define nlm_deadlock nlm4_deadlock
38 #else
39 #define nlm_deadlock nlm_lck_denied
40 #endif
41
42 static void nlmsvc_release_block(struct nlm_block *block);
43 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
44 static void nlmsvc_remove_block(struct nlm_block *block);
45
46 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
47 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
48 static const struct rpc_call_ops nlmsvc_grant_ops;
49
50 /*
51 * The list of blocked locks to retry
52 */
53 static LIST_HEAD(nlm_blocked);
54 static DEFINE_SPINLOCK(nlm_blocked_lock);
55
56 #ifdef LOCKD_DEBUG
57 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
58 {
59 /*
60 * We can get away with a static buffer because we're only
61 * called with BKL held.
62 */
63 static char buf[2*NLM_MAXCOOKIELEN+1];
64 unsigned int i, len = sizeof(buf);
65 char *p = buf;
66
67 len--; /* allow for trailing \0 */
68 if (len < 3)
69 return "???";
70 for (i = 0 ; i < cookie->len ; i++) {
71 if (len < 2) {
72 strcpy(p-3, "...");
73 break;
74 }
75 sprintf(p, "%02x", cookie->data[i]);
76 p += 2;
77 len -= 2;
78 }
79 *p = '\0';
80
81 return buf;
82 }
83 #endif
84
85 /*
86 * Insert a blocked lock into the global list
87 */
88 static void
89 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
90 {
91 struct nlm_block *b;
92 struct list_head *pos;
93
94 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
95 if (list_empty(&block->b_list)) {
96 kref_get(&block->b_count);
97 } else {
98 list_del_init(&block->b_list);
99 }
100
101 pos = &nlm_blocked;
102 if (when != NLM_NEVER) {
103 if ((when += jiffies) == NLM_NEVER)
104 when ++;
105 list_for_each(pos, &nlm_blocked) {
106 b = list_entry(pos, struct nlm_block, b_list);
107 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
108 break;
109 }
110 /* On normal exit from the loop, pos == &nlm_blocked,
111 * so we will be adding to the end of the list - good
112 */
113 }
114
115 list_add_tail(&block->b_list, pos);
116 block->b_when = when;
117 }
118
119 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
120 {
121 spin_lock(&nlm_blocked_lock);
122 nlmsvc_insert_block_locked(block, when);
123 spin_unlock(&nlm_blocked_lock);
124 }
125
126 /*
127 * Remove a block from the global list
128 */
129 static inline void
130 nlmsvc_remove_block(struct nlm_block *block)
131 {
132 if (!list_empty(&block->b_list)) {
133 spin_lock(&nlm_blocked_lock);
134 list_del_init(&block->b_list);
135 spin_unlock(&nlm_blocked_lock);
136 nlmsvc_release_block(block);
137 }
138 }
139
140 /*
141 * Find a block for a given lock
142 */
143 static struct nlm_block *
144 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
145 {
146 struct nlm_block *block;
147 struct file_lock *fl;
148
149 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
150 file, lock->fl.fl_pid,
151 (long long)lock->fl.fl_start,
152 (long long)lock->fl.fl_end, lock->fl.fl_type);
153 list_for_each_entry(block, &nlm_blocked, b_list) {
154 fl = &block->b_call->a_args.lock.fl;
155 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
156 block->b_file, fl->fl_pid,
157 (long long)fl->fl_start,
158 (long long)fl->fl_end, fl->fl_type,
159 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
160 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
161 kref_get(&block->b_count);
162 return block;
163 }
164 }
165
166 return NULL;
167 }
168
169 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
170 {
171 if (a->len != b->len)
172 return 0;
173 if (memcmp(a->data, b->data, a->len))
174 return 0;
175 return 1;
176 }
177
178 /*
179 * Find a block with a given NLM cookie.
180 */
181 static inline struct nlm_block *
182 nlmsvc_find_block(struct nlm_cookie *cookie)
183 {
184 struct nlm_block *block;
185
186 list_for_each_entry(block, &nlm_blocked, b_list) {
187 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
188 goto found;
189 }
190
191 return NULL;
192
193 found:
194 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
195 kref_get(&block->b_count);
196 return block;
197 }
198
199 /*
200 * Create a block and initialize it.
201 *
202 * Note: we explicitly set the cookie of the grant reply to that of
203 * the blocked lock request. The spec explicitly mentions that the client
204 * should _not_ rely on the callback containing the same cookie as the
205 * request, but (as I found out later) that's because some implementations
206 * do just this. Never mind the standards comittees, they support our
207 * logging industries.
208 *
209 * 10 years later: I hope we can safely ignore these old and broken
210 * clients by now. Let's fix this so we can uniquely identify an incoming
211 * GRANTED_RES message by cookie, without having to rely on the client's IP
212 * address. --okir
213 */
214 static struct nlm_block *
215 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
216 struct nlm_file *file, struct nlm_lock *lock,
217 struct nlm_cookie *cookie)
218 {
219 struct nlm_block *block;
220 struct nlm_rqst *call = NULL;
221
222 call = nlm_alloc_call(host);
223 if (call == NULL)
224 return NULL;
225
226 /* Allocate memory for block, and initialize arguments */
227 block = kzalloc(sizeof(*block), GFP_KERNEL);
228 if (block == NULL)
229 goto failed;
230 kref_init(&block->b_count);
231 INIT_LIST_HEAD(&block->b_list);
232 INIT_LIST_HEAD(&block->b_flist);
233
234 if (!nlmsvc_setgrantargs(call, lock))
235 goto failed_free;
236
237 /* Set notifier function for VFS, and init args */
238 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
239 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
240 nlmclnt_next_cookie(&call->a_args.cookie);
241
242 dprintk("lockd: created block %p...\n", block);
243
244 /* Create and initialize the block */
245 block->b_daemon = rqstp->rq_server;
246 block->b_host = host;
247 block->b_file = file;
248 block->b_fl = NULL;
249 file->f_count++;
250
251 /* Add to file's list of blocks */
252 list_add(&block->b_flist, &file->f_blocks);
253
254 /* Set up RPC arguments for callback */
255 block->b_call = call;
256 call->a_flags = RPC_TASK_ASYNC;
257 call->a_block = block;
258
259 return block;
260
261 failed_free:
262 kfree(block);
263 failed:
264 nlmsvc_release_call(call);
265 return NULL;
266 }
267
268 /*
269 * Delete a block.
270 * It is the caller's responsibility to check whether the file
271 * can be closed hereafter.
272 */
273 static int nlmsvc_unlink_block(struct nlm_block *block)
274 {
275 int status;
276 dprintk("lockd: unlinking block %p...\n", block);
277
278 /* Remove block from list */
279 status = posix_unblock_lock(&block->b_call->a_args.lock.fl);
280 nlmsvc_remove_block(block);
281 return status;
282 }
283
284 static void nlmsvc_free_block(struct kref *kref)
285 {
286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 struct nlm_file *file = block->b_file;
288
289 dprintk("lockd: freeing block %p...\n", block);
290
291 /* Remove block from file's list of blocks */
292 list_del_init(&block->b_flist);
293 mutex_unlock(&file->f_mutex);
294
295 nlmsvc_freegrantargs(block->b_call);
296 nlmsvc_release_call(block->b_call);
297 nlm_release_file(block->b_file);
298 kfree(block->b_fl);
299 kfree(block);
300 }
301
302 static void nlmsvc_release_block(struct nlm_block *block)
303 {
304 if (block != NULL)
305 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
306 }
307
308 /*
309 * Loop over all blocks and delete blocks held by
310 * a matching host.
311 */
312 void nlmsvc_traverse_blocks(struct nlm_host *host,
313 struct nlm_file *file,
314 nlm_host_match_fn_t match)
315 {
316 struct nlm_block *block, *next;
317
318 restart:
319 mutex_lock(&file->f_mutex);
320 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
321 if (!match(block->b_host, host))
322 continue;
323 /* Do not destroy blocks that are not on
324 * the global retry list - why? */
325 if (list_empty(&block->b_list))
326 continue;
327 kref_get(&block->b_count);
328 mutex_unlock(&file->f_mutex);
329 nlmsvc_unlink_block(block);
330 nlmsvc_release_block(block);
331 goto restart;
332 }
333 mutex_unlock(&file->f_mutex);
334 }
335
336 /*
337 * Initialize arguments for GRANTED call. The nlm_rqst structure
338 * has been cleared already.
339 */
340 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
341 {
342 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
343 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
344 call->a_args.lock.caller = utsname()->nodename;
345 call->a_args.lock.oh.len = lock->oh.len;
346
347 /* set default data area */
348 call->a_args.lock.oh.data = call->a_owner;
349 call->a_args.lock.svid = lock->fl.fl_pid;
350
351 if (lock->oh.len > NLMCLNT_OHSIZE) {
352 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
353 if (!data)
354 return 0;
355 call->a_args.lock.oh.data = (u8 *) data;
356 }
357
358 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
359 return 1;
360 }
361
362 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
363 {
364 if (call->a_args.lock.oh.data != call->a_owner)
365 kfree(call->a_args.lock.oh.data);
366
367 locks_release_private(&call->a_args.lock.fl);
368 }
369
370 /*
371 * Deferred lock request handling for non-blocking lock
372 */
373 static __be32
374 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
375 {
376 __be32 status = nlm_lck_denied_nolocks;
377
378 block->b_flags |= B_QUEUED;
379
380 nlmsvc_insert_block(block, NLM_TIMEOUT);
381
382 block->b_cache_req = &rqstp->rq_chandle;
383 if (rqstp->rq_chandle.defer) {
384 block->b_deferred_req =
385 rqstp->rq_chandle.defer(block->b_cache_req);
386 if (block->b_deferred_req != NULL)
387 status = nlm_drop_reply;
388 }
389 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
390 block, block->b_flags, ntohl(status));
391
392 return status;
393 }
394
395 /*
396 * Attempt to establish a lock, and if it can't be granted, block it
397 * if required.
398 */
399 __be32
400 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
401 struct nlm_host *host, struct nlm_lock *lock, int wait,
402 struct nlm_cookie *cookie, int reclaim)
403 {
404 struct nlm_block *block = NULL;
405 int error;
406 __be32 ret;
407
408 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
409 file_inode(file->f_file)->i_sb->s_id,
410 file_inode(file->f_file)->i_ino,
411 lock->fl.fl_type, lock->fl.fl_pid,
412 (long long)lock->fl.fl_start,
413 (long long)lock->fl.fl_end,
414 wait);
415
416 /* Lock file against concurrent access */
417 mutex_lock(&file->f_mutex);
418 /* Get existing block (in case client is busy-waiting)
419 * or create new block
420 */
421 block = nlmsvc_lookup_block(file, lock);
422 if (block == NULL) {
423 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
424 ret = nlm_lck_denied_nolocks;
425 if (block == NULL)
426 goto out;
427 lock = &block->b_call->a_args.lock;
428 } else
429 lock->fl.fl_flags &= ~FL_SLEEP;
430
431 if (block->b_flags & B_QUEUED) {
432 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
433 block, block->b_flags);
434 if (block->b_granted) {
435 nlmsvc_unlink_block(block);
436 ret = nlm_granted;
437 goto out;
438 }
439 if (block->b_flags & B_TIMED_OUT) {
440 nlmsvc_unlink_block(block);
441 ret = nlm_lck_denied;
442 goto out;
443 }
444 ret = nlm_drop_reply;
445 goto out;
446 }
447
448 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
449 ret = nlm_lck_denied_grace_period;
450 goto out;
451 }
452 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
453 ret = nlm_lck_denied_grace_period;
454 goto out;
455 }
456
457 if (!wait)
458 lock->fl.fl_flags &= ~FL_SLEEP;
459 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
460 lock->fl.fl_flags &= ~FL_SLEEP;
461
462 dprintk("lockd: vfs_lock_file returned %d\n", error);
463 switch (error) {
464 case 0:
465 ret = nlm_granted;
466 goto out;
467 case -EAGAIN:
468 /*
469 * If this is a blocking request for an
470 * already pending lock request then we need
471 * to put it back on lockd's block list
472 */
473 if (wait)
474 break;
475 ret = nlm_lck_denied;
476 goto out;
477 case FILE_LOCK_DEFERRED:
478 if (wait)
479 break;
480 /* Filesystem lock operation is in progress
481 Add it to the queue waiting for callback */
482 ret = nlmsvc_defer_lock_rqst(rqstp, block);
483 goto out;
484 case -EDEADLK:
485 ret = nlm_deadlock;
486 goto out;
487 default: /* includes ENOLCK */
488 ret = nlm_lck_denied_nolocks;
489 goto out;
490 }
491
492 ret = nlm_lck_blocked;
493
494 /* Append to list of blocked */
495 nlmsvc_insert_block(block, NLM_NEVER);
496 out:
497 mutex_unlock(&file->f_mutex);
498 nlmsvc_release_block(block);
499 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
500 return ret;
501 }
502
503 /*
504 * Test for presence of a conflicting lock.
505 */
506 __be32
507 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
508 struct nlm_host *host, struct nlm_lock *lock,
509 struct nlm_lock *conflock, struct nlm_cookie *cookie)
510 {
511 struct nlm_block *block = NULL;
512 int error;
513 __be32 ret;
514
515 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
516 file_inode(file->f_file)->i_sb->s_id,
517 file_inode(file->f_file)->i_ino,
518 lock->fl.fl_type,
519 (long long)lock->fl.fl_start,
520 (long long)lock->fl.fl_end);
521
522 /* Get existing block (in case client is busy-waiting) */
523 block = nlmsvc_lookup_block(file, lock);
524
525 if (block == NULL) {
526 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
527
528 if (conf == NULL)
529 return nlm_granted;
530 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
531 if (block == NULL) {
532 kfree(conf);
533 return nlm_granted;
534 }
535 block->b_fl = conf;
536 }
537 if (block->b_flags & B_QUEUED) {
538 dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n",
539 block, block->b_flags, block->b_fl);
540 if (block->b_flags & B_TIMED_OUT) {
541 nlmsvc_unlink_block(block);
542 ret = nlm_lck_denied;
543 goto out;
544 }
545 if (block->b_flags & B_GOT_CALLBACK) {
546 nlmsvc_unlink_block(block);
547 if (block->b_fl != NULL
548 && block->b_fl->fl_type != F_UNLCK) {
549 lock->fl = *block->b_fl;
550 goto conf_lock;
551 } else {
552 ret = nlm_granted;
553 goto out;
554 }
555 }
556 ret = nlm_drop_reply;
557 goto out;
558 }
559
560 if (locks_in_grace(SVC_NET(rqstp))) {
561 ret = nlm_lck_denied_grace_period;
562 goto out;
563 }
564 error = vfs_test_lock(file->f_file, &lock->fl);
565 if (error == FILE_LOCK_DEFERRED) {
566 ret = nlmsvc_defer_lock_rqst(rqstp, block);
567 goto out;
568 }
569 if (error) {
570 ret = nlm_lck_denied_nolocks;
571 goto out;
572 }
573 if (lock->fl.fl_type == F_UNLCK) {
574 ret = nlm_granted;
575 goto out;
576 }
577
578 conf_lock:
579 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
580 lock->fl.fl_type, (long long)lock->fl.fl_start,
581 (long long)lock->fl.fl_end);
582 conflock->caller = "somehost"; /* FIXME */
583 conflock->len = strlen(conflock->caller);
584 conflock->oh.len = 0; /* don't return OH info */
585 conflock->svid = lock->fl.fl_pid;
586 conflock->fl.fl_type = lock->fl.fl_type;
587 conflock->fl.fl_start = lock->fl.fl_start;
588 conflock->fl.fl_end = lock->fl.fl_end;
589 locks_release_private(&lock->fl);
590 ret = nlm_lck_denied;
591 out:
592 if (block)
593 nlmsvc_release_block(block);
594 return ret;
595 }
596
597 /*
598 * Remove a lock.
599 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
600 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
601 * afterwards. In this case the block will still be there, and hence
602 * must be removed.
603 */
604 __be32
605 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
606 {
607 int error;
608
609 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
610 file_inode(file->f_file)->i_sb->s_id,
611 file_inode(file->f_file)->i_ino,
612 lock->fl.fl_pid,
613 (long long)lock->fl.fl_start,
614 (long long)lock->fl.fl_end);
615
616 /* First, cancel any lock that might be there */
617 nlmsvc_cancel_blocked(net, file, lock);
618
619 lock->fl.fl_type = F_UNLCK;
620 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
621
622 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
623 }
624
625 /*
626 * Cancel a previously blocked request.
627 *
628 * A cancel request always overrides any grant that may currently
629 * be in progress.
630 * The calling procedure must check whether the file can be closed.
631 */
632 __be32
633 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
634 {
635 struct nlm_block *block;
636 int status = 0;
637
638 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
639 file_inode(file->f_file)->i_sb->s_id,
640 file_inode(file->f_file)->i_ino,
641 lock->fl.fl_pid,
642 (long long)lock->fl.fl_start,
643 (long long)lock->fl.fl_end);
644
645 if (locks_in_grace(net))
646 return nlm_lck_denied_grace_period;
647
648 mutex_lock(&file->f_mutex);
649 block = nlmsvc_lookup_block(file, lock);
650 mutex_unlock(&file->f_mutex);
651 if (block != NULL) {
652 vfs_cancel_lock(block->b_file->f_file,
653 &block->b_call->a_args.lock.fl);
654 status = nlmsvc_unlink_block(block);
655 nlmsvc_release_block(block);
656 }
657 return status ? nlm_lck_denied : nlm_granted;
658 }
659
660 /*
661 * This is a callback from the filesystem for VFS file lock requests.
662 * It will be used if lm_grant is defined and the filesystem can not
663 * respond to the request immediately.
664 * For GETLK request it will copy the reply to the nlm_block.
665 * For SETLK or SETLKW request it will get the local posix lock.
666 * In all cases it will move the block to the head of nlm_blocked q where
667 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
668 * deferred rpc for GETLK and SETLK.
669 */
670 static void
671 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
672 {
673 block->b_flags |= B_GOT_CALLBACK;
674 if (result == 0)
675 block->b_granted = 1;
676 else
677 block->b_flags |= B_TIMED_OUT;
678 }
679
680 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
681 {
682 struct nlm_block *block;
683 int rc = -ENOENT;
684
685 spin_lock(&nlm_blocked_lock);
686 list_for_each_entry(block, &nlm_blocked, b_list) {
687 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
688 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
689 block, block->b_flags);
690 if (block->b_flags & B_QUEUED) {
691 if (block->b_flags & B_TIMED_OUT) {
692 rc = -ENOLCK;
693 break;
694 }
695 nlmsvc_update_deferred_block(block, result);
696 } else if (result == 0)
697 block->b_granted = 1;
698
699 nlmsvc_insert_block_locked(block, 0);
700 svc_wake_up(block->b_daemon);
701 rc = 0;
702 break;
703 }
704 }
705 spin_unlock(&nlm_blocked_lock);
706 if (rc == -ENOENT)
707 printk(KERN_WARNING "lockd: grant for unknown block\n");
708 return rc;
709 }
710
711 /*
712 * Unblock a blocked lock request. This is a callback invoked from the
713 * VFS layer when a lock on which we blocked is removed.
714 *
715 * This function doesn't grant the blocked lock instantly, but rather moves
716 * the block to the head of nlm_blocked where it can be picked up by lockd.
717 */
718 static void
719 nlmsvc_notify_blocked(struct file_lock *fl)
720 {
721 struct nlm_block *block;
722
723 dprintk("lockd: VFS unblock notification for block %p\n", fl);
724 spin_lock(&nlm_blocked_lock);
725 list_for_each_entry(block, &nlm_blocked, b_list) {
726 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
727 nlmsvc_insert_block_locked(block, 0);
728 spin_unlock(&nlm_blocked_lock);
729 svc_wake_up(block->b_daemon);
730 return;
731 }
732 }
733 spin_unlock(&nlm_blocked_lock);
734 printk(KERN_WARNING "lockd: notification for unknown block!\n");
735 }
736
737 static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
738 {
739 return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
740 }
741
742 /*
743 * Since NLM uses two "keys" for tracking locks, we need to hash them down
744 * to one for the blocked_hash. Here, we're just xor'ing the host address
745 * with the pid in order to create a key value for picking a hash bucket.
746 */
747 static unsigned long
748 nlmsvc_owner_key(struct file_lock *fl)
749 {
750 return (unsigned long)fl->fl_owner ^ (unsigned long)fl->fl_pid;
751 }
752
753 const struct lock_manager_operations nlmsvc_lock_operations = {
754 .lm_compare_owner = nlmsvc_same_owner,
755 .lm_owner_key = nlmsvc_owner_key,
756 .lm_notify = nlmsvc_notify_blocked,
757 .lm_grant = nlmsvc_grant_deferred,
758 };
759
760 /*
761 * Try to claim a lock that was previously blocked.
762 *
763 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
764 * RPC thread when notifying the client. This seems like overkill...
765 * Here's why:
766 * - we don't want to use a synchronous RPC thread, otherwise
767 * we might find ourselves hanging on a dead portmapper.
768 * - Some lockd implementations (e.g. HP) don't react to
769 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
770 */
771 static void
772 nlmsvc_grant_blocked(struct nlm_block *block)
773 {
774 struct nlm_file *file = block->b_file;
775 struct nlm_lock *lock = &block->b_call->a_args.lock;
776 int error;
777 loff_t fl_start, fl_end;
778
779 dprintk("lockd: grant blocked lock %p\n", block);
780
781 kref_get(&block->b_count);
782
783 /* Unlink block request from list */
784 nlmsvc_unlink_block(block);
785
786 /* If b_granted is true this means we've been here before.
787 * Just retry the grant callback, possibly refreshing the RPC
788 * binding */
789 if (block->b_granted) {
790 nlm_rebind_host(block->b_host);
791 goto callback;
792 }
793
794 /* Try the lock operation again */
795 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
796 * them unchanged for the GRANT_MSG
797 */
798 lock->fl.fl_flags |= FL_SLEEP;
799 fl_start = lock->fl.fl_start;
800 fl_end = lock->fl.fl_end;
801 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
802 lock->fl.fl_flags &= ~FL_SLEEP;
803 lock->fl.fl_start = fl_start;
804 lock->fl.fl_end = fl_end;
805
806 switch (error) {
807 case 0:
808 break;
809 case FILE_LOCK_DEFERRED:
810 dprintk("lockd: lock still blocked error %d\n", error);
811 nlmsvc_insert_block(block, NLM_NEVER);
812 nlmsvc_release_block(block);
813 return;
814 default:
815 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
816 -error, __func__);
817 nlmsvc_insert_block(block, 10 * HZ);
818 nlmsvc_release_block(block);
819 return;
820 }
821
822 callback:
823 /* Lock was granted by VFS. */
824 dprintk("lockd: GRANTing blocked lock.\n");
825 block->b_granted = 1;
826
827 /* keep block on the list, but don't reattempt until the RPC
828 * completes or the submission fails
829 */
830 nlmsvc_insert_block(block, NLM_NEVER);
831
832 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
833 * will queue up a new one if this one times out
834 */
835 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
836 &nlmsvc_grant_ops);
837
838 /* RPC submission failed, wait a bit and retry */
839 if (error < 0)
840 nlmsvc_insert_block(block, 10 * HZ);
841 }
842
843 /*
844 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
845 * RPC call has succeeded or timed out.
846 * Like all RPC callbacks, it is invoked by the rpciod process, so it
847 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
848 * chain once more in order to have it removed by lockd itself (which can
849 * then sleep on the file semaphore without disrupting e.g. the nfs client).
850 */
851 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
852 {
853 struct nlm_rqst *call = data;
854 struct nlm_block *block = call->a_block;
855 unsigned long timeout;
856
857 dprintk("lockd: GRANT_MSG RPC callback\n");
858
859 spin_lock(&nlm_blocked_lock);
860 /* if the block is not on a list at this point then it has
861 * been invalidated. Don't try to requeue it.
862 *
863 * FIXME: it's possible that the block is removed from the list
864 * after this check but before the nlmsvc_insert_block. In that
865 * case it will be added back. Perhaps we need better locking
866 * for nlm_blocked?
867 */
868 if (list_empty(&block->b_list))
869 goto out;
870
871 /* Technically, we should down the file semaphore here. Since we
872 * move the block towards the head of the queue only, no harm
873 * can be done, though. */
874 if (task->tk_status < 0) {
875 /* RPC error: Re-insert for retransmission */
876 timeout = 10 * HZ;
877 } else {
878 /* Call was successful, now wait for client callback */
879 timeout = 60 * HZ;
880 }
881 nlmsvc_insert_block_locked(block, timeout);
882 svc_wake_up(block->b_daemon);
883 out:
884 spin_unlock(&nlm_blocked_lock);
885 }
886
887 /*
888 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
889 * .rpc_release rpc_call_op
890 */
891 static void nlmsvc_grant_release(void *data)
892 {
893 struct nlm_rqst *call = data;
894 nlmsvc_release_block(call->a_block);
895 }
896
897 static const struct rpc_call_ops nlmsvc_grant_ops = {
898 .rpc_call_done = nlmsvc_grant_callback,
899 .rpc_release = nlmsvc_grant_release,
900 };
901
902 /*
903 * We received a GRANT_RES callback. Try to find the corresponding
904 * block.
905 */
906 void
907 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
908 {
909 struct nlm_block *block;
910
911 dprintk("grant_reply: looking for cookie %x, s=%d \n",
912 *(unsigned int *)(cookie->data), status);
913 if (!(block = nlmsvc_find_block(cookie)))
914 return;
915
916 if (block) {
917 if (status == nlm_lck_denied_grace_period) {
918 /* Try again in a couple of seconds */
919 nlmsvc_insert_block(block, 10 * HZ);
920 } else {
921 /* Lock is now held by client, or has been rejected.
922 * In both cases, the block should be removed. */
923 nlmsvc_unlink_block(block);
924 }
925 }
926 nlmsvc_release_block(block);
927 }
928
929 /* Helper function to handle retry of a deferred block.
930 * If it is a blocking lock, call grant_blocked.
931 * For a non-blocking lock or test lock, revisit the request.
932 */
933 static void
934 retry_deferred_block(struct nlm_block *block)
935 {
936 if (!(block->b_flags & B_GOT_CALLBACK))
937 block->b_flags |= B_TIMED_OUT;
938 nlmsvc_insert_block(block, NLM_TIMEOUT);
939 dprintk("revisit block %p flags %d\n", block, block->b_flags);
940 if (block->b_deferred_req) {
941 block->b_deferred_req->revisit(block->b_deferred_req, 0);
942 block->b_deferred_req = NULL;
943 }
944 }
945
946 /*
947 * Retry all blocked locks that have been notified. This is where lockd
948 * picks up locks that can be granted, or grant notifications that must
949 * be retransmitted.
950 */
951 unsigned long
952 nlmsvc_retry_blocked(void)
953 {
954 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
955 struct nlm_block *block;
956
957 spin_lock(&nlm_blocked_lock);
958 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
959 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
960
961 if (block->b_when == NLM_NEVER)
962 break;
963 if (time_after(block->b_when, jiffies)) {
964 timeout = block->b_when - jiffies;
965 break;
966 }
967 spin_unlock(&nlm_blocked_lock);
968
969 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
970 block, block->b_when);
971 if (block->b_flags & B_QUEUED) {
972 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
973 block, block->b_granted, block->b_flags);
974 retry_deferred_block(block);
975 } else
976 nlmsvc_grant_blocked(block);
977 spin_lock(&nlm_blocked_lock);
978 }
979 spin_unlock(&nlm_blocked_lock);
980
981 return timeout;
982 }