]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/lockd/svclock.c
sunrpc: mark all struct svc_version instances as const
[mirror_ubuntu-artful-kernel.git] / fs / lockd / svclock.c
1 /*
2 * linux/fs/lockd/svclock.c
3 *
4 * Handling of server-side locks, mostly of the blocked variety.
5 * This is the ugliest part of lockd because we tread on very thin ice.
6 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
7 * IMNSHO introducing the grant callback into the NLM protocol was one
8 * of the worst ideas Sun ever had. Except maybe for the idea of doing
9 * NFS file locking at all.
10 *
11 * I'm trying hard to avoid race conditions by protecting most accesses
12 * to a file's list of blocked locks through a semaphore. The global
13 * list of blocked locks is not protected in this fashion however.
14 * Therefore, some functions (such as the RPC callback for the async grant
15 * call) move blocked locks towards the head of the list *while some other
16 * process might be traversing it*. This should not be a problem in
17 * practice, because this will only cause functions traversing the list
18 * to visit some blocks twice.
19 *
20 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
21 */
22
23 #include <linux/types.h>
24 #include <linux/slab.h>
25 #include <linux/errno.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/sunrpc/clnt.h>
29 #include <linux/sunrpc/svc_xprt.h>
30 #include <linux/lockd/nlm.h>
31 #include <linux/lockd/lockd.h>
32 #include <linux/kthread.h>
33
34 #define NLMDBG_FACILITY NLMDBG_SVCLOCK
35
36 #ifdef CONFIG_LOCKD_V4
37 #define nlm_deadlock nlm4_deadlock
38 #else
39 #define nlm_deadlock nlm_lck_denied
40 #endif
41
42 static void nlmsvc_release_block(struct nlm_block *block);
43 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
44 static void nlmsvc_remove_block(struct nlm_block *block);
45
46 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
47 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
48 static const struct rpc_call_ops nlmsvc_grant_ops;
49
50 /*
51 * The list of blocked locks to retry
52 */
53 static LIST_HEAD(nlm_blocked);
54 static DEFINE_SPINLOCK(nlm_blocked_lock);
55
56 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
58 {
59 /*
60 * We can get away with a static buffer because this is only called
61 * from lockd, which is single-threaded.
62 */
63 static char buf[2*NLM_MAXCOOKIELEN+1];
64 unsigned int i, len = sizeof(buf);
65 char *p = buf;
66
67 len--; /* allow for trailing \0 */
68 if (len < 3)
69 return "???";
70 for (i = 0 ; i < cookie->len ; i++) {
71 if (len < 2) {
72 strcpy(p-3, "...");
73 break;
74 }
75 sprintf(p, "%02x", cookie->data[i]);
76 p += 2;
77 len -= 2;
78 }
79 *p = '\0';
80
81 return buf;
82 }
83 #endif
84
85 /*
86 * Insert a blocked lock into the global list
87 */
88 static void
89 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
90 {
91 struct nlm_block *b;
92 struct list_head *pos;
93
94 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
95 if (list_empty(&block->b_list)) {
96 kref_get(&block->b_count);
97 } else {
98 list_del_init(&block->b_list);
99 }
100
101 pos = &nlm_blocked;
102 if (when != NLM_NEVER) {
103 if ((when += jiffies) == NLM_NEVER)
104 when ++;
105 list_for_each(pos, &nlm_blocked) {
106 b = list_entry(pos, struct nlm_block, b_list);
107 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
108 break;
109 }
110 /* On normal exit from the loop, pos == &nlm_blocked,
111 * so we will be adding to the end of the list - good
112 */
113 }
114
115 list_add_tail(&block->b_list, pos);
116 block->b_when = when;
117 }
118
119 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
120 {
121 spin_lock(&nlm_blocked_lock);
122 nlmsvc_insert_block_locked(block, when);
123 spin_unlock(&nlm_blocked_lock);
124 }
125
126 /*
127 * Remove a block from the global list
128 */
129 static inline void
130 nlmsvc_remove_block(struct nlm_block *block)
131 {
132 if (!list_empty(&block->b_list)) {
133 spin_lock(&nlm_blocked_lock);
134 list_del_init(&block->b_list);
135 spin_unlock(&nlm_blocked_lock);
136 nlmsvc_release_block(block);
137 }
138 }
139
140 /*
141 * Find a block for a given lock
142 */
143 static struct nlm_block *
144 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
145 {
146 struct nlm_block *block;
147 struct file_lock *fl;
148
149 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
150 file, lock->fl.fl_pid,
151 (long long)lock->fl.fl_start,
152 (long long)lock->fl.fl_end, lock->fl.fl_type);
153 list_for_each_entry(block, &nlm_blocked, b_list) {
154 fl = &block->b_call->a_args.lock.fl;
155 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
156 block->b_file, fl->fl_pid,
157 (long long)fl->fl_start,
158 (long long)fl->fl_end, fl->fl_type,
159 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
160 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
161 kref_get(&block->b_count);
162 return block;
163 }
164 }
165
166 return NULL;
167 }
168
169 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
170 {
171 if (a->len != b->len)
172 return 0;
173 if (memcmp(a->data, b->data, a->len))
174 return 0;
175 return 1;
176 }
177
178 /*
179 * Find a block with a given NLM cookie.
180 */
181 static inline struct nlm_block *
182 nlmsvc_find_block(struct nlm_cookie *cookie)
183 {
184 struct nlm_block *block;
185
186 list_for_each_entry(block, &nlm_blocked, b_list) {
187 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
188 goto found;
189 }
190
191 return NULL;
192
193 found:
194 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
195 kref_get(&block->b_count);
196 return block;
197 }
198
199 /*
200 * Create a block and initialize it.
201 *
202 * Note: we explicitly set the cookie of the grant reply to that of
203 * the blocked lock request. The spec explicitly mentions that the client
204 * should _not_ rely on the callback containing the same cookie as the
205 * request, but (as I found out later) that's because some implementations
206 * do just this. Never mind the standards comittees, they support our
207 * logging industries.
208 *
209 * 10 years later: I hope we can safely ignore these old and broken
210 * clients by now. Let's fix this so we can uniquely identify an incoming
211 * GRANTED_RES message by cookie, without having to rely on the client's IP
212 * address. --okir
213 */
214 static struct nlm_block *
215 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
216 struct nlm_file *file, struct nlm_lock *lock,
217 struct nlm_cookie *cookie)
218 {
219 struct nlm_block *block;
220 struct nlm_rqst *call = NULL;
221
222 call = nlm_alloc_call(host);
223 if (call == NULL)
224 return NULL;
225
226 /* Allocate memory for block, and initialize arguments */
227 block = kzalloc(sizeof(*block), GFP_KERNEL);
228 if (block == NULL)
229 goto failed;
230 kref_init(&block->b_count);
231 INIT_LIST_HEAD(&block->b_list);
232 INIT_LIST_HEAD(&block->b_flist);
233
234 if (!nlmsvc_setgrantargs(call, lock))
235 goto failed_free;
236
237 /* Set notifier function for VFS, and init args */
238 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
239 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
240 nlmclnt_next_cookie(&call->a_args.cookie);
241
242 dprintk("lockd: created block %p...\n", block);
243
244 /* Create and initialize the block */
245 block->b_daemon = rqstp->rq_server;
246 block->b_host = host;
247 block->b_file = file;
248 file->f_count++;
249
250 /* Add to file's list of blocks */
251 list_add(&block->b_flist, &file->f_blocks);
252
253 /* Set up RPC arguments for callback */
254 block->b_call = call;
255 call->a_flags = RPC_TASK_ASYNC;
256 call->a_block = block;
257
258 return block;
259
260 failed_free:
261 kfree(block);
262 failed:
263 nlmsvc_release_call(call);
264 return NULL;
265 }
266
267 /*
268 * Delete a block.
269 * It is the caller's responsibility to check whether the file
270 * can be closed hereafter.
271 */
272 static int nlmsvc_unlink_block(struct nlm_block *block)
273 {
274 int status;
275 dprintk("lockd: unlinking block %p...\n", block);
276
277 /* Remove block from list */
278 status = posix_unblock_lock(&block->b_call->a_args.lock.fl);
279 nlmsvc_remove_block(block);
280 return status;
281 }
282
283 static void nlmsvc_free_block(struct kref *kref)
284 {
285 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
286 struct nlm_file *file = block->b_file;
287
288 dprintk("lockd: freeing block %p...\n", block);
289
290 /* Remove block from file's list of blocks */
291 list_del_init(&block->b_flist);
292 mutex_unlock(&file->f_mutex);
293
294 nlmsvc_freegrantargs(block->b_call);
295 nlmsvc_release_call(block->b_call);
296 nlm_release_file(block->b_file);
297 kfree(block);
298 }
299
300 static void nlmsvc_release_block(struct nlm_block *block)
301 {
302 if (block != NULL)
303 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
304 }
305
306 /*
307 * Loop over all blocks and delete blocks held by
308 * a matching host.
309 */
310 void nlmsvc_traverse_blocks(struct nlm_host *host,
311 struct nlm_file *file,
312 nlm_host_match_fn_t match)
313 {
314 struct nlm_block *block, *next;
315
316 restart:
317 mutex_lock(&file->f_mutex);
318 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
319 if (!match(block->b_host, host))
320 continue;
321 /* Do not destroy blocks that are not on
322 * the global retry list - why? */
323 if (list_empty(&block->b_list))
324 continue;
325 kref_get(&block->b_count);
326 mutex_unlock(&file->f_mutex);
327 nlmsvc_unlink_block(block);
328 nlmsvc_release_block(block);
329 goto restart;
330 }
331 mutex_unlock(&file->f_mutex);
332 }
333
334 /*
335 * Initialize arguments for GRANTED call. The nlm_rqst structure
336 * has been cleared already.
337 */
338 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
339 {
340 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
341 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
342 call->a_args.lock.caller = utsname()->nodename;
343 call->a_args.lock.oh.len = lock->oh.len;
344
345 /* set default data area */
346 call->a_args.lock.oh.data = call->a_owner;
347 call->a_args.lock.svid = lock->fl.fl_pid;
348
349 if (lock->oh.len > NLMCLNT_OHSIZE) {
350 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
351 if (!data)
352 return 0;
353 call->a_args.lock.oh.data = (u8 *) data;
354 }
355
356 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
357 return 1;
358 }
359
360 static void nlmsvc_freegrantargs(struct nlm_rqst *call)
361 {
362 if (call->a_args.lock.oh.data != call->a_owner)
363 kfree(call->a_args.lock.oh.data);
364
365 locks_release_private(&call->a_args.lock.fl);
366 }
367
368 /*
369 * Deferred lock request handling for non-blocking lock
370 */
371 static __be32
372 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
373 {
374 __be32 status = nlm_lck_denied_nolocks;
375
376 block->b_flags |= B_QUEUED;
377
378 nlmsvc_insert_block(block, NLM_TIMEOUT);
379
380 block->b_cache_req = &rqstp->rq_chandle;
381 if (rqstp->rq_chandle.defer) {
382 block->b_deferred_req =
383 rqstp->rq_chandle.defer(block->b_cache_req);
384 if (block->b_deferred_req != NULL)
385 status = nlm_drop_reply;
386 }
387 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
388 block, block->b_flags, ntohl(status));
389
390 return status;
391 }
392
393 /*
394 * Attempt to establish a lock, and if it can't be granted, block it
395 * if required.
396 */
397 __be32
398 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
399 struct nlm_host *host, struct nlm_lock *lock, int wait,
400 struct nlm_cookie *cookie, int reclaim)
401 {
402 struct nlm_block *block = NULL;
403 int error;
404 __be32 ret;
405
406 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
407 file_inode(file->f_file)->i_sb->s_id,
408 file_inode(file->f_file)->i_ino,
409 lock->fl.fl_type, lock->fl.fl_pid,
410 (long long)lock->fl.fl_start,
411 (long long)lock->fl.fl_end,
412 wait);
413
414 /* Lock file against concurrent access */
415 mutex_lock(&file->f_mutex);
416 /* Get existing block (in case client is busy-waiting)
417 * or create new block
418 */
419 block = nlmsvc_lookup_block(file, lock);
420 if (block == NULL) {
421 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
422 ret = nlm_lck_denied_nolocks;
423 if (block == NULL)
424 goto out;
425 lock = &block->b_call->a_args.lock;
426 } else
427 lock->fl.fl_flags &= ~FL_SLEEP;
428
429 if (block->b_flags & B_QUEUED) {
430 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
431 block, block->b_flags);
432 if (block->b_granted) {
433 nlmsvc_unlink_block(block);
434 ret = nlm_granted;
435 goto out;
436 }
437 if (block->b_flags & B_TIMED_OUT) {
438 nlmsvc_unlink_block(block);
439 ret = nlm_lck_denied;
440 goto out;
441 }
442 ret = nlm_drop_reply;
443 goto out;
444 }
445
446 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
447 ret = nlm_lck_denied_grace_period;
448 goto out;
449 }
450 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
451 ret = nlm_lck_denied_grace_period;
452 goto out;
453 }
454
455 if (!wait)
456 lock->fl.fl_flags &= ~FL_SLEEP;
457 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
458 lock->fl.fl_flags &= ~FL_SLEEP;
459
460 dprintk("lockd: vfs_lock_file returned %d\n", error);
461 switch (error) {
462 case 0:
463 ret = nlm_granted;
464 goto out;
465 case -EAGAIN:
466 /*
467 * If this is a blocking request for an
468 * already pending lock request then we need
469 * to put it back on lockd's block list
470 */
471 if (wait)
472 break;
473 ret = nlm_lck_denied;
474 goto out;
475 case FILE_LOCK_DEFERRED:
476 if (wait)
477 break;
478 /* Filesystem lock operation is in progress
479 Add it to the queue waiting for callback */
480 ret = nlmsvc_defer_lock_rqst(rqstp, block);
481 goto out;
482 case -EDEADLK:
483 ret = nlm_deadlock;
484 goto out;
485 default: /* includes ENOLCK */
486 ret = nlm_lck_denied_nolocks;
487 goto out;
488 }
489
490 ret = nlm_lck_blocked;
491
492 /* Append to list of blocked */
493 nlmsvc_insert_block(block, NLM_NEVER);
494 out:
495 mutex_unlock(&file->f_mutex);
496 nlmsvc_release_block(block);
497 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
498 return ret;
499 }
500
501 /*
502 * Test for presence of a conflicting lock.
503 */
504 __be32
505 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
506 struct nlm_host *host, struct nlm_lock *lock,
507 struct nlm_lock *conflock, struct nlm_cookie *cookie)
508 {
509 int error;
510 __be32 ret;
511
512 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
513 file_inode(file->f_file)->i_sb->s_id,
514 file_inode(file->f_file)->i_ino,
515 lock->fl.fl_type,
516 (long long)lock->fl.fl_start,
517 (long long)lock->fl.fl_end);
518
519 if (locks_in_grace(SVC_NET(rqstp))) {
520 ret = nlm_lck_denied_grace_period;
521 goto out;
522 }
523
524 error = vfs_test_lock(file->f_file, &lock->fl);
525 if (error) {
526 /* We can't currently deal with deferred test requests */
527 if (error == FILE_LOCK_DEFERRED)
528 WARN_ON_ONCE(1);
529
530 ret = nlm_lck_denied_nolocks;
531 goto out;
532 }
533
534 if (lock->fl.fl_type == F_UNLCK) {
535 ret = nlm_granted;
536 goto out;
537 }
538
539 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
540 lock->fl.fl_type, (long long)lock->fl.fl_start,
541 (long long)lock->fl.fl_end);
542 conflock->caller = "somehost"; /* FIXME */
543 conflock->len = strlen(conflock->caller);
544 conflock->oh.len = 0; /* don't return OH info */
545 conflock->svid = lock->fl.fl_pid;
546 conflock->fl.fl_type = lock->fl.fl_type;
547 conflock->fl.fl_start = lock->fl.fl_start;
548 conflock->fl.fl_end = lock->fl.fl_end;
549 locks_release_private(&lock->fl);
550 ret = nlm_lck_denied;
551 out:
552 return ret;
553 }
554
555 /*
556 * Remove a lock.
557 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
558 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
559 * afterwards. In this case the block will still be there, and hence
560 * must be removed.
561 */
562 __be32
563 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
564 {
565 int error;
566
567 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
568 file_inode(file->f_file)->i_sb->s_id,
569 file_inode(file->f_file)->i_ino,
570 lock->fl.fl_pid,
571 (long long)lock->fl.fl_start,
572 (long long)lock->fl.fl_end);
573
574 /* First, cancel any lock that might be there */
575 nlmsvc_cancel_blocked(net, file, lock);
576
577 lock->fl.fl_type = F_UNLCK;
578 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
579
580 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
581 }
582
583 /*
584 * Cancel a previously blocked request.
585 *
586 * A cancel request always overrides any grant that may currently
587 * be in progress.
588 * The calling procedure must check whether the file can be closed.
589 */
590 __be32
591 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
592 {
593 struct nlm_block *block;
594 int status = 0;
595
596 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
597 file_inode(file->f_file)->i_sb->s_id,
598 file_inode(file->f_file)->i_ino,
599 lock->fl.fl_pid,
600 (long long)lock->fl.fl_start,
601 (long long)lock->fl.fl_end);
602
603 if (locks_in_grace(net))
604 return nlm_lck_denied_grace_period;
605
606 mutex_lock(&file->f_mutex);
607 block = nlmsvc_lookup_block(file, lock);
608 mutex_unlock(&file->f_mutex);
609 if (block != NULL) {
610 vfs_cancel_lock(block->b_file->f_file,
611 &block->b_call->a_args.lock.fl);
612 status = nlmsvc_unlink_block(block);
613 nlmsvc_release_block(block);
614 }
615 return status ? nlm_lck_denied : nlm_granted;
616 }
617
618 /*
619 * This is a callback from the filesystem for VFS file lock requests.
620 * It will be used if lm_grant is defined and the filesystem can not
621 * respond to the request immediately.
622 * For SETLK or SETLKW request it will get the local posix lock.
623 * In all cases it will move the block to the head of nlm_blocked q where
624 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
625 * deferred rpc for GETLK and SETLK.
626 */
627 static void
628 nlmsvc_update_deferred_block(struct nlm_block *block, int result)
629 {
630 block->b_flags |= B_GOT_CALLBACK;
631 if (result == 0)
632 block->b_granted = 1;
633 else
634 block->b_flags |= B_TIMED_OUT;
635 }
636
637 static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
638 {
639 struct nlm_block *block;
640 int rc = -ENOENT;
641
642 spin_lock(&nlm_blocked_lock);
643 list_for_each_entry(block, &nlm_blocked, b_list) {
644 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
645 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
646 block, block->b_flags);
647 if (block->b_flags & B_QUEUED) {
648 if (block->b_flags & B_TIMED_OUT) {
649 rc = -ENOLCK;
650 break;
651 }
652 nlmsvc_update_deferred_block(block, result);
653 } else if (result == 0)
654 block->b_granted = 1;
655
656 nlmsvc_insert_block_locked(block, 0);
657 svc_wake_up(block->b_daemon);
658 rc = 0;
659 break;
660 }
661 }
662 spin_unlock(&nlm_blocked_lock);
663 if (rc == -ENOENT)
664 printk(KERN_WARNING "lockd: grant for unknown block\n");
665 return rc;
666 }
667
668 /*
669 * Unblock a blocked lock request. This is a callback invoked from the
670 * VFS layer when a lock on which we blocked is removed.
671 *
672 * This function doesn't grant the blocked lock instantly, but rather moves
673 * the block to the head of nlm_blocked where it can be picked up by lockd.
674 */
675 static void
676 nlmsvc_notify_blocked(struct file_lock *fl)
677 {
678 struct nlm_block *block;
679
680 dprintk("lockd: VFS unblock notification for block %p\n", fl);
681 spin_lock(&nlm_blocked_lock);
682 list_for_each_entry(block, &nlm_blocked, b_list) {
683 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
684 nlmsvc_insert_block_locked(block, 0);
685 spin_unlock(&nlm_blocked_lock);
686 svc_wake_up(block->b_daemon);
687 return;
688 }
689 }
690 spin_unlock(&nlm_blocked_lock);
691 printk(KERN_WARNING "lockd: notification for unknown block!\n");
692 }
693
694 static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
695 {
696 return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
697 }
698
699 /*
700 * Since NLM uses two "keys" for tracking locks, we need to hash them down
701 * to one for the blocked_hash. Here, we're just xor'ing the host address
702 * with the pid in order to create a key value for picking a hash bucket.
703 */
704 static unsigned long
705 nlmsvc_owner_key(struct file_lock *fl)
706 {
707 return (unsigned long)fl->fl_owner ^ (unsigned long)fl->fl_pid;
708 }
709
710 const struct lock_manager_operations nlmsvc_lock_operations = {
711 .lm_compare_owner = nlmsvc_same_owner,
712 .lm_owner_key = nlmsvc_owner_key,
713 .lm_notify = nlmsvc_notify_blocked,
714 .lm_grant = nlmsvc_grant_deferred,
715 };
716
717 /*
718 * Try to claim a lock that was previously blocked.
719 *
720 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
721 * RPC thread when notifying the client. This seems like overkill...
722 * Here's why:
723 * - we don't want to use a synchronous RPC thread, otherwise
724 * we might find ourselves hanging on a dead portmapper.
725 * - Some lockd implementations (e.g. HP) don't react to
726 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
727 */
728 static void
729 nlmsvc_grant_blocked(struct nlm_block *block)
730 {
731 struct nlm_file *file = block->b_file;
732 struct nlm_lock *lock = &block->b_call->a_args.lock;
733 int error;
734 loff_t fl_start, fl_end;
735
736 dprintk("lockd: grant blocked lock %p\n", block);
737
738 kref_get(&block->b_count);
739
740 /* Unlink block request from list */
741 nlmsvc_unlink_block(block);
742
743 /* If b_granted is true this means we've been here before.
744 * Just retry the grant callback, possibly refreshing the RPC
745 * binding */
746 if (block->b_granted) {
747 nlm_rebind_host(block->b_host);
748 goto callback;
749 }
750
751 /* Try the lock operation again */
752 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
753 * them unchanged for the GRANT_MSG
754 */
755 lock->fl.fl_flags |= FL_SLEEP;
756 fl_start = lock->fl.fl_start;
757 fl_end = lock->fl.fl_end;
758 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
759 lock->fl.fl_flags &= ~FL_SLEEP;
760 lock->fl.fl_start = fl_start;
761 lock->fl.fl_end = fl_end;
762
763 switch (error) {
764 case 0:
765 break;
766 case FILE_LOCK_DEFERRED:
767 dprintk("lockd: lock still blocked error %d\n", error);
768 nlmsvc_insert_block(block, NLM_NEVER);
769 nlmsvc_release_block(block);
770 return;
771 default:
772 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
773 -error, __func__);
774 nlmsvc_insert_block(block, 10 * HZ);
775 nlmsvc_release_block(block);
776 return;
777 }
778
779 callback:
780 /* Lock was granted by VFS. */
781 dprintk("lockd: GRANTing blocked lock.\n");
782 block->b_granted = 1;
783
784 /* keep block on the list, but don't reattempt until the RPC
785 * completes or the submission fails
786 */
787 nlmsvc_insert_block(block, NLM_NEVER);
788
789 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
790 * will queue up a new one if this one times out
791 */
792 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
793 &nlmsvc_grant_ops);
794
795 /* RPC submission failed, wait a bit and retry */
796 if (error < 0)
797 nlmsvc_insert_block(block, 10 * HZ);
798 }
799
800 /*
801 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
802 * RPC call has succeeded or timed out.
803 * Like all RPC callbacks, it is invoked by the rpciod process, so it
804 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
805 * chain once more in order to have it removed by lockd itself (which can
806 * then sleep on the file semaphore without disrupting e.g. the nfs client).
807 */
808 static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
809 {
810 struct nlm_rqst *call = data;
811 struct nlm_block *block = call->a_block;
812 unsigned long timeout;
813
814 dprintk("lockd: GRANT_MSG RPC callback\n");
815
816 spin_lock(&nlm_blocked_lock);
817 /* if the block is not on a list at this point then it has
818 * been invalidated. Don't try to requeue it.
819 *
820 * FIXME: it's possible that the block is removed from the list
821 * after this check but before the nlmsvc_insert_block. In that
822 * case it will be added back. Perhaps we need better locking
823 * for nlm_blocked?
824 */
825 if (list_empty(&block->b_list))
826 goto out;
827
828 /* Technically, we should down the file semaphore here. Since we
829 * move the block towards the head of the queue only, no harm
830 * can be done, though. */
831 if (task->tk_status < 0) {
832 /* RPC error: Re-insert for retransmission */
833 timeout = 10 * HZ;
834 } else {
835 /* Call was successful, now wait for client callback */
836 timeout = 60 * HZ;
837 }
838 nlmsvc_insert_block_locked(block, timeout);
839 svc_wake_up(block->b_daemon);
840 out:
841 spin_unlock(&nlm_blocked_lock);
842 }
843
844 /*
845 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
846 * .rpc_release rpc_call_op
847 */
848 static void nlmsvc_grant_release(void *data)
849 {
850 struct nlm_rqst *call = data;
851 nlmsvc_release_block(call->a_block);
852 }
853
854 static const struct rpc_call_ops nlmsvc_grant_ops = {
855 .rpc_call_done = nlmsvc_grant_callback,
856 .rpc_release = nlmsvc_grant_release,
857 };
858
859 /*
860 * We received a GRANT_RES callback. Try to find the corresponding
861 * block.
862 */
863 void
864 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
865 {
866 struct nlm_block *block;
867
868 dprintk("grant_reply: looking for cookie %x, s=%d \n",
869 *(unsigned int *)(cookie->data), status);
870 if (!(block = nlmsvc_find_block(cookie)))
871 return;
872
873 if (status == nlm_lck_denied_grace_period) {
874 /* Try again in a couple of seconds */
875 nlmsvc_insert_block(block, 10 * HZ);
876 } else {
877 /*
878 * Lock is now held by client, or has been rejected.
879 * In both cases, the block should be removed.
880 */
881 nlmsvc_unlink_block(block);
882 }
883 nlmsvc_release_block(block);
884 }
885
886 /* Helper function to handle retry of a deferred block.
887 * If it is a blocking lock, call grant_blocked.
888 * For a non-blocking lock or test lock, revisit the request.
889 */
890 static void
891 retry_deferred_block(struct nlm_block *block)
892 {
893 if (!(block->b_flags & B_GOT_CALLBACK))
894 block->b_flags |= B_TIMED_OUT;
895 nlmsvc_insert_block(block, NLM_TIMEOUT);
896 dprintk("revisit block %p flags %d\n", block, block->b_flags);
897 if (block->b_deferred_req) {
898 block->b_deferred_req->revisit(block->b_deferred_req, 0);
899 block->b_deferred_req = NULL;
900 }
901 }
902
903 /*
904 * Retry all blocked locks that have been notified. This is where lockd
905 * picks up locks that can be granted, or grant notifications that must
906 * be retransmitted.
907 */
908 unsigned long
909 nlmsvc_retry_blocked(void)
910 {
911 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
912 struct nlm_block *block;
913
914 spin_lock(&nlm_blocked_lock);
915 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
916 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
917
918 if (block->b_when == NLM_NEVER)
919 break;
920 if (time_after(block->b_when, jiffies)) {
921 timeout = block->b_when - jiffies;
922 break;
923 }
924 spin_unlock(&nlm_blocked_lock);
925
926 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
927 block, block->b_when);
928 if (block->b_flags & B_QUEUED) {
929 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
930 block, block->b_granted, block->b_flags);
931 retry_deferred_block(block);
932 } else
933 nlmsvc_grant_blocked(block);
934 spin_lock(&nlm_blocked_lock);
935 }
936 spin_unlock(&nlm_blocked_lock);
937
938 return timeout;
939 }