1 // SPDX-License-Identifier: GPL-2.0-only
5 * We implement four types of file locks: BSD locks, posix locks, open
6 * file description locks, and leases. For details about BSD locks,
7 * see the flock(2) man page; for details about the other three, see
11 * Locking conflicts and dependencies:
12 * If multiple threads attempt to lock the same byte (or flock the same file)
13 * only one can be granted the lock, and other must wait their turn.
14 * The first lock has been "applied" or "granted", the others are "waiting"
15 * and are "blocked" by the "applied" lock..
17 * Waiting and applied locks are all kept in trees whose properties are:
19 * - the root of a tree may be an applied or waiting lock.
20 * - every other node in the tree is a waiting lock that
21 * conflicts with every ancestor of that node.
23 * Every such tree begins life as a waiting singleton which obviously
24 * satisfies the above properties.
26 * The only ways we modify trees preserve these properties:
28 * 1. We may add a new leaf node, but only after first verifying that it
29 * conflicts with all of its ancestors.
30 * 2. We may remove the root of a tree, creating a new singleton
31 * tree from the root and N new trees rooted in the immediate
33 * 3. If the root of a tree is not currently an applied lock, we may
34 * apply it (if possible).
35 * 4. We may upgrade the root of the tree (either extend its range,
36 * or upgrade its entire range from read to write).
38 * When an applied lock is modified in a way that reduces or downgrades any
39 * part of its range, we remove all its children (2 above). This particularly
40 * happens when a lock is unlocked.
42 * For each of those child trees we "wake up" the thread which is
43 * waiting for the lock so it can continue handling as follows: if the
44 * root of the tree applies, we do so (3). If it doesn't, it must
45 * conflict with some applied lock. We remove (wake up) all of its children
46 * (2), and add it is a new leaf to the tree rooted in the applied
47 * lock (1). We then repeat the process recursively with those
52 #include <linux/capability.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
56 #include <linux/init.h>
57 #include <linux/security.h>
58 #include <linux/slab.h>
59 #include <linux/syscalls.h>
60 #include <linux/time.h>
61 #include <linux/rcupdate.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/hashtable.h>
64 #include <linux/percpu.h>
65 #include <linux/sysctl.h>
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/filelock.h>
70 #include <linux/uaccess.h>
72 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
73 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
74 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
75 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
76 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
78 static bool lease_breaking(struct file_lock
*fl
)
80 return fl
->fl_flags
& (FL_UNLOCK_PENDING
| FL_DOWNGRADE_PENDING
);
83 static int target_leasetype(struct file_lock
*fl
)
85 if (fl
->fl_flags
& FL_UNLOCK_PENDING
)
87 if (fl
->fl_flags
& FL_DOWNGRADE_PENDING
)
92 static int leases_enable
= 1;
93 static int lease_break_time
= 45;
96 static struct ctl_table locks_sysctls
[] = {
98 .procname
= "leases-enable",
99 .data
= &leases_enable
,
100 .maxlen
= sizeof(int),
102 .proc_handler
= proc_dointvec
,
106 .procname
= "lease-break-time",
107 .data
= &lease_break_time
,
108 .maxlen
= sizeof(int),
110 .proc_handler
= proc_dointvec
,
112 #endif /* CONFIG_MMU */
116 static int __init
init_fs_locks_sysctls(void)
118 register_sysctl_init("fs", locks_sysctls
);
121 early_initcall(init_fs_locks_sysctls
);
122 #endif /* CONFIG_SYSCTL */
125 * The global file_lock_list is only used for displaying /proc/locks, so we
126 * keep a list on each CPU, with each list protected by its own spinlock.
127 * Global serialization is done using file_rwsem.
129 * Note that alterations to the list also require that the relevant flc_lock is
132 struct file_lock_list_struct
{
134 struct hlist_head hlist
;
136 static DEFINE_PER_CPU(struct file_lock_list_struct
, file_lock_list
);
137 DEFINE_STATIC_PERCPU_RWSEM(file_rwsem
);
141 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
142 * It is protected by blocked_lock_lock.
144 * We hash locks by lockowner in order to optimize searching for the lock a
145 * particular lockowner is waiting on.
147 * FIXME: make this value scale via some heuristic? We generally will want more
148 * buckets when we have more lockowners holding locks, but that's a little
149 * difficult to determine without knowing what the workload will look like.
151 #define BLOCKED_HASH_BITS 7
152 static DEFINE_HASHTABLE(blocked_hash
, BLOCKED_HASH_BITS
);
155 * This lock protects the blocked_hash. Generally, if you're accessing it, you
156 * want to be holding this lock.
158 * In addition, it also protects the fl->fl_blocked_requests list, and the
159 * fl->fl_blocker pointer for file_lock structures that are acting as lock
160 * requests (in contrast to those that are acting as records of acquired locks).
162 * Note that when we acquire this lock in order to change the above fields,
163 * we often hold the flc_lock as well. In certain cases, when reading the fields
164 * protected by this lock, we can skip acquiring it iff we already hold the
167 static DEFINE_SPINLOCK(blocked_lock_lock
);
169 static struct kmem_cache
*flctx_cache __read_mostly
;
170 static struct kmem_cache
*filelock_cache __read_mostly
;
172 static struct file_lock_context
*
173 locks_get_lock_context(struct inode
*inode
, int type
)
175 struct file_lock_context
*ctx
;
177 /* paired with cmpxchg() below */
178 ctx
= locks_inode_context(inode
);
179 if (likely(ctx
) || type
== F_UNLCK
)
182 ctx
= kmem_cache_alloc(flctx_cache
, GFP_KERNEL
);
186 spin_lock_init(&ctx
->flc_lock
);
187 INIT_LIST_HEAD(&ctx
->flc_flock
);
188 INIT_LIST_HEAD(&ctx
->flc_posix
);
189 INIT_LIST_HEAD(&ctx
->flc_lease
);
192 * Assign the pointer if it's not already assigned. If it is, then
193 * free the context we just allocated.
195 if (cmpxchg(&inode
->i_flctx
, NULL
, ctx
)) {
196 kmem_cache_free(flctx_cache
, ctx
);
197 ctx
= locks_inode_context(inode
);
200 trace_locks_get_lock_context(inode
, type
, ctx
);
205 locks_dump_ctx_list(struct list_head
*list
, char *list_type
)
207 struct file_lock
*fl
;
209 list_for_each_entry(fl
, list
, fl_list
) {
210 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type
, fl
->fl_owner
, fl
->fl_flags
, fl
->fl_type
, fl
->fl_pid
);
215 locks_check_ctx_lists(struct inode
*inode
)
217 struct file_lock_context
*ctx
= inode
->i_flctx
;
219 if (unlikely(!list_empty(&ctx
->flc_flock
) ||
220 !list_empty(&ctx
->flc_posix
) ||
221 !list_empty(&ctx
->flc_lease
))) {
222 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
223 MAJOR(inode
->i_sb
->s_dev
), MINOR(inode
->i_sb
->s_dev
),
225 locks_dump_ctx_list(&ctx
->flc_flock
, "FLOCK");
226 locks_dump_ctx_list(&ctx
->flc_posix
, "POSIX");
227 locks_dump_ctx_list(&ctx
->flc_lease
, "LEASE");
232 locks_check_ctx_file_list(struct file
*filp
, struct list_head
*list
,
235 struct file_lock
*fl
;
236 struct inode
*inode
= locks_inode(filp
);
238 list_for_each_entry(fl
, list
, fl_list
)
239 if (fl
->fl_file
== filp
)
240 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
241 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
242 list_type
, MAJOR(inode
->i_sb
->s_dev
),
243 MINOR(inode
->i_sb
->s_dev
), inode
->i_ino
,
244 fl
->fl_owner
, fl
->fl_flags
, fl
->fl_type
, fl
->fl_pid
);
248 locks_free_lock_context(struct inode
*inode
)
250 struct file_lock_context
*ctx
= locks_inode_context(inode
);
253 locks_check_ctx_lists(inode
);
254 kmem_cache_free(flctx_cache
, ctx
);
258 static void locks_init_lock_heads(struct file_lock
*fl
)
260 INIT_HLIST_NODE(&fl
->fl_link
);
261 INIT_LIST_HEAD(&fl
->fl_list
);
262 INIT_LIST_HEAD(&fl
->fl_blocked_requests
);
263 INIT_LIST_HEAD(&fl
->fl_blocked_member
);
264 init_waitqueue_head(&fl
->fl_wait
);
267 /* Allocate an empty lock structure. */
268 struct file_lock
*locks_alloc_lock(void)
270 struct file_lock
*fl
= kmem_cache_zalloc(filelock_cache
, GFP_KERNEL
);
273 locks_init_lock_heads(fl
);
277 EXPORT_SYMBOL_GPL(locks_alloc_lock
);
279 void locks_release_private(struct file_lock
*fl
)
281 BUG_ON(waitqueue_active(&fl
->fl_wait
));
282 BUG_ON(!list_empty(&fl
->fl_list
));
283 BUG_ON(!list_empty(&fl
->fl_blocked_requests
));
284 BUG_ON(!list_empty(&fl
->fl_blocked_member
));
285 BUG_ON(!hlist_unhashed(&fl
->fl_link
));
288 if (fl
->fl_ops
->fl_release_private
)
289 fl
->fl_ops
->fl_release_private(fl
);
294 if (fl
->fl_lmops
->lm_put_owner
) {
295 fl
->fl_lmops
->lm_put_owner(fl
->fl_owner
);
301 EXPORT_SYMBOL_GPL(locks_release_private
);
304 * locks_owner_has_blockers - Check for blocking lock requests
305 * @flctx: file lock context
309 * %true: @owner has at least one blocker
310 * %false: @owner has no blockers
312 bool locks_owner_has_blockers(struct file_lock_context
*flctx
,
315 struct file_lock
*fl
;
317 spin_lock(&flctx
->flc_lock
);
318 list_for_each_entry(fl
, &flctx
->flc_posix
, fl_list
) {
319 if (fl
->fl_owner
!= owner
)
321 if (!list_empty(&fl
->fl_blocked_requests
)) {
322 spin_unlock(&flctx
->flc_lock
);
326 spin_unlock(&flctx
->flc_lock
);
329 EXPORT_SYMBOL_GPL(locks_owner_has_blockers
);
331 /* Free a lock which is not in use. */
332 void locks_free_lock(struct file_lock
*fl
)
334 locks_release_private(fl
);
335 kmem_cache_free(filelock_cache
, fl
);
337 EXPORT_SYMBOL(locks_free_lock
);
340 locks_dispose_list(struct list_head
*dispose
)
342 struct file_lock
*fl
;
344 while (!list_empty(dispose
)) {
345 fl
= list_first_entry(dispose
, struct file_lock
, fl_list
);
346 list_del_init(&fl
->fl_list
);
351 void locks_init_lock(struct file_lock
*fl
)
353 memset(fl
, 0, sizeof(struct file_lock
));
354 locks_init_lock_heads(fl
);
356 EXPORT_SYMBOL(locks_init_lock
);
359 * Initialize a new lock from an existing file_lock structure.
361 void locks_copy_conflock(struct file_lock
*new, struct file_lock
*fl
)
363 new->fl_owner
= fl
->fl_owner
;
364 new->fl_pid
= fl
->fl_pid
;
366 new->fl_flags
= fl
->fl_flags
;
367 new->fl_type
= fl
->fl_type
;
368 new->fl_start
= fl
->fl_start
;
369 new->fl_end
= fl
->fl_end
;
370 new->fl_lmops
= fl
->fl_lmops
;
374 if (fl
->fl_lmops
->lm_get_owner
)
375 fl
->fl_lmops
->lm_get_owner(fl
->fl_owner
);
378 EXPORT_SYMBOL(locks_copy_conflock
);
380 void locks_copy_lock(struct file_lock
*new, struct file_lock
*fl
)
382 /* "new" must be a freshly-initialized lock */
383 WARN_ON_ONCE(new->fl_ops
);
385 locks_copy_conflock(new, fl
);
387 new->fl_file
= fl
->fl_file
;
388 new->fl_ops
= fl
->fl_ops
;
391 if (fl
->fl_ops
->fl_copy_lock
)
392 fl
->fl_ops
->fl_copy_lock(new, fl
);
395 EXPORT_SYMBOL(locks_copy_lock
);
397 static void locks_move_blocks(struct file_lock
*new, struct file_lock
*fl
)
402 * As ctx->flc_lock is held, new requests cannot be added to
403 * ->fl_blocked_requests, so we don't need a lock to check if it
406 if (list_empty(&fl
->fl_blocked_requests
))
408 spin_lock(&blocked_lock_lock
);
409 list_splice_init(&fl
->fl_blocked_requests
, &new->fl_blocked_requests
);
410 list_for_each_entry(f
, &new->fl_blocked_requests
, fl_blocked_member
)
412 spin_unlock(&blocked_lock_lock
);
415 static inline int flock_translate_cmd(int cmd
) {
427 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
428 static void flock_make_lock(struct file
*filp
, struct file_lock
*fl
, int type
)
434 fl
->fl_pid
= current
->tgid
;
435 fl
->fl_flags
= FL_FLOCK
;
437 fl
->fl_end
= OFFSET_MAX
;
440 static int assign_type(struct file_lock
*fl
, long type
)
454 static int flock64_to_posix_lock(struct file
*filp
, struct file_lock
*fl
,
457 switch (l
->l_whence
) {
462 fl
->fl_start
= filp
->f_pos
;
465 fl
->fl_start
= i_size_read(file_inode(filp
));
470 if (l
->l_start
> OFFSET_MAX
- fl
->fl_start
)
472 fl
->fl_start
+= l
->l_start
;
473 if (fl
->fl_start
< 0)
476 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
477 POSIX-2001 defines it. */
479 if (l
->l_len
- 1 > OFFSET_MAX
- fl
->fl_start
)
481 fl
->fl_end
= fl
->fl_start
+ (l
->l_len
- 1);
483 } else if (l
->l_len
< 0) {
484 if (fl
->fl_start
+ l
->l_len
< 0)
486 fl
->fl_end
= fl
->fl_start
- 1;
487 fl
->fl_start
+= l
->l_len
;
489 fl
->fl_end
= OFFSET_MAX
;
491 fl
->fl_owner
= current
->files
;
492 fl
->fl_pid
= current
->tgid
;
494 fl
->fl_flags
= FL_POSIX
;
498 return assign_type(fl
, l
->l_type
);
501 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
504 static int flock_to_posix_lock(struct file
*filp
, struct file_lock
*fl
,
507 struct flock64 ll
= {
509 .l_whence
= l
->l_whence
,
510 .l_start
= l
->l_start
,
514 return flock64_to_posix_lock(filp
, fl
, &ll
);
517 /* default lease lock manager operations */
519 lease_break_callback(struct file_lock
*fl
)
521 kill_fasync(&fl
->fl_fasync
, SIGIO
, POLL_MSG
);
526 lease_setup(struct file_lock
*fl
, void **priv
)
528 struct file
*filp
= fl
->fl_file
;
529 struct fasync_struct
*fa
= *priv
;
532 * fasync_insert_entry() returns the old entry if any. If there was no
533 * old entry, then it used "priv" and inserted it into the fasync list.
534 * Clear the pointer to indicate that it shouldn't be freed.
536 if (!fasync_insert_entry(fa
->fa_fd
, filp
, &fl
->fl_fasync
, fa
))
539 __f_setown(filp
, task_pid(current
), PIDTYPE_TGID
, 0);
542 static const struct lock_manager_operations lease_manager_ops
= {
543 .lm_break
= lease_break_callback
,
544 .lm_change
= lease_modify
,
545 .lm_setup
= lease_setup
,
549 * Initialize a lease, use the default lock manager operations
551 static int lease_init(struct file
*filp
, long type
, struct file_lock
*fl
)
553 if (assign_type(fl
, type
) != 0)
557 fl
->fl_pid
= current
->tgid
;
560 fl
->fl_flags
= FL_LEASE
;
562 fl
->fl_end
= OFFSET_MAX
;
564 fl
->fl_lmops
= &lease_manager_ops
;
568 /* Allocate a file_lock initialised to this type of lease */
569 static struct file_lock
*lease_alloc(struct file
*filp
, long type
)
571 struct file_lock
*fl
= locks_alloc_lock();
575 return ERR_PTR(error
);
577 error
= lease_init(filp
, type
, fl
);
580 return ERR_PTR(error
);
585 /* Check if two locks overlap each other.
587 static inline int locks_overlap(struct file_lock
*fl1
, struct file_lock
*fl2
)
589 return ((fl1
->fl_end
>= fl2
->fl_start
) &&
590 (fl2
->fl_end
>= fl1
->fl_start
));
594 * Check whether two locks have the same owner.
596 static int posix_same_owner(struct file_lock
*fl1
, struct file_lock
*fl2
)
598 return fl1
->fl_owner
== fl2
->fl_owner
;
601 /* Must be called with the flc_lock held! */
602 static void locks_insert_global_locks(struct file_lock
*fl
)
604 struct file_lock_list_struct
*fll
= this_cpu_ptr(&file_lock_list
);
606 percpu_rwsem_assert_held(&file_rwsem
);
608 spin_lock(&fll
->lock
);
609 fl
->fl_link_cpu
= smp_processor_id();
610 hlist_add_head(&fl
->fl_link
, &fll
->hlist
);
611 spin_unlock(&fll
->lock
);
614 /* Must be called with the flc_lock held! */
615 static void locks_delete_global_locks(struct file_lock
*fl
)
617 struct file_lock_list_struct
*fll
;
619 percpu_rwsem_assert_held(&file_rwsem
);
622 * Avoid taking lock if already unhashed. This is safe since this check
623 * is done while holding the flc_lock, and new insertions into the list
624 * also require that it be held.
626 if (hlist_unhashed(&fl
->fl_link
))
629 fll
= per_cpu_ptr(&file_lock_list
, fl
->fl_link_cpu
);
630 spin_lock(&fll
->lock
);
631 hlist_del_init(&fl
->fl_link
);
632 spin_unlock(&fll
->lock
);
636 posix_owner_key(struct file_lock
*fl
)
638 return (unsigned long)fl
->fl_owner
;
641 static void locks_insert_global_blocked(struct file_lock
*waiter
)
643 lockdep_assert_held(&blocked_lock_lock
);
645 hash_add(blocked_hash
, &waiter
->fl_link
, posix_owner_key(waiter
));
648 static void locks_delete_global_blocked(struct file_lock
*waiter
)
650 lockdep_assert_held(&blocked_lock_lock
);
652 hash_del(&waiter
->fl_link
);
655 /* Remove waiter from blocker's block list.
656 * When blocker ends up pointing to itself then the list is empty.
658 * Must be called with blocked_lock_lock held.
660 static void __locks_delete_block(struct file_lock
*waiter
)
662 locks_delete_global_blocked(waiter
);
663 list_del_init(&waiter
->fl_blocked_member
);
666 static void __locks_wake_up_blocks(struct file_lock
*blocker
)
668 while (!list_empty(&blocker
->fl_blocked_requests
)) {
669 struct file_lock
*waiter
;
671 waiter
= list_first_entry(&blocker
->fl_blocked_requests
,
672 struct file_lock
, fl_blocked_member
);
673 __locks_delete_block(waiter
);
674 if (waiter
->fl_lmops
&& waiter
->fl_lmops
->lm_notify
)
675 waiter
->fl_lmops
->lm_notify(waiter
);
677 wake_up(&waiter
->fl_wait
);
680 * The setting of fl_blocker to NULL marks the "done"
681 * point in deleting a block. Paired with acquire at the top
682 * of locks_delete_block().
684 smp_store_release(&waiter
->fl_blocker
, NULL
);
689 * locks_delete_block - stop waiting for a file lock
690 * @waiter: the lock which was waiting
692 * lockd/nfsd need to disconnect the lock while working on it.
694 int locks_delete_block(struct file_lock
*waiter
)
696 int status
= -ENOENT
;
699 * If fl_blocker is NULL, it won't be set again as this thread "owns"
700 * the lock and is the only one that might try to claim the lock.
702 * We use acquire/release to manage fl_blocker so that we can
703 * optimize away taking the blocked_lock_lock in many cases.
705 * The smp_load_acquire guarantees two things:
707 * 1/ that fl_blocked_requests can be tested locklessly. If something
708 * was recently added to that list it must have been in a locked region
709 * *before* the locked region when fl_blocker was set to NULL.
711 * 2/ that no other thread is accessing 'waiter', so it is safe to free
712 * it. __locks_wake_up_blocks is careful not to touch waiter after
713 * fl_blocker is released.
715 * If a lockless check of fl_blocker shows it to be NULL, we know that
716 * no new locks can be inserted into its fl_blocked_requests list, and
717 * can avoid doing anything further if the list is empty.
719 if (!smp_load_acquire(&waiter
->fl_blocker
) &&
720 list_empty(&waiter
->fl_blocked_requests
))
723 spin_lock(&blocked_lock_lock
);
724 if (waiter
->fl_blocker
)
726 __locks_wake_up_blocks(waiter
);
727 __locks_delete_block(waiter
);
730 * The setting of fl_blocker to NULL marks the "done" point in deleting
731 * a block. Paired with acquire at the top of this function.
733 smp_store_release(&waiter
->fl_blocker
, NULL
);
734 spin_unlock(&blocked_lock_lock
);
737 EXPORT_SYMBOL(locks_delete_block
);
739 /* Insert waiter into blocker's block list.
740 * We use a circular list so that processes can be easily woken up in
741 * the order they blocked. The documentation doesn't require this but
742 * it seems like the reasonable thing to do.
744 * Must be called with both the flc_lock and blocked_lock_lock held. The
745 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
746 * but by ensuring that the flc_lock is also held on insertions we can avoid
747 * taking the blocked_lock_lock in some cases when we see that the
748 * fl_blocked_requests list is empty.
750 * Rather than just adding to the list, we check for conflicts with any existing
751 * waiters, and add beneath any waiter that blocks the new waiter.
752 * Thus wakeups don't happen until needed.
754 static void __locks_insert_block(struct file_lock
*blocker
,
755 struct file_lock
*waiter
,
756 bool conflict(struct file_lock
*,
759 struct file_lock
*fl
;
760 BUG_ON(!list_empty(&waiter
->fl_blocked_member
));
763 list_for_each_entry(fl
, &blocker
->fl_blocked_requests
, fl_blocked_member
)
764 if (conflict(fl
, waiter
)) {
768 waiter
->fl_blocker
= blocker
;
769 list_add_tail(&waiter
->fl_blocked_member
, &blocker
->fl_blocked_requests
);
770 if (IS_POSIX(blocker
) && !IS_OFDLCK(blocker
))
771 locks_insert_global_blocked(waiter
);
773 /* The requests in waiter->fl_blocked are known to conflict with
774 * waiter, but might not conflict with blocker, or the requests
775 * and lock which block it. So they all need to be woken.
777 __locks_wake_up_blocks(waiter
);
780 /* Must be called with flc_lock held. */
781 static void locks_insert_block(struct file_lock
*blocker
,
782 struct file_lock
*waiter
,
783 bool conflict(struct file_lock
*,
786 spin_lock(&blocked_lock_lock
);
787 __locks_insert_block(blocker
, waiter
, conflict
);
788 spin_unlock(&blocked_lock_lock
);
792 * Wake up processes blocked waiting for blocker.
794 * Must be called with the inode->flc_lock held!
796 static void locks_wake_up_blocks(struct file_lock
*blocker
)
799 * Avoid taking global lock if list is empty. This is safe since new
800 * blocked requests are only added to the list under the flc_lock, and
801 * the flc_lock is always held here. Note that removal from the
802 * fl_blocked_requests list does not require the flc_lock, so we must
803 * recheck list_empty() after acquiring the blocked_lock_lock.
805 if (list_empty(&blocker
->fl_blocked_requests
))
808 spin_lock(&blocked_lock_lock
);
809 __locks_wake_up_blocks(blocker
);
810 spin_unlock(&blocked_lock_lock
);
814 locks_insert_lock_ctx(struct file_lock
*fl
, struct list_head
*before
)
816 list_add_tail(&fl
->fl_list
, before
);
817 locks_insert_global_locks(fl
);
821 locks_unlink_lock_ctx(struct file_lock
*fl
)
823 locks_delete_global_locks(fl
);
824 list_del_init(&fl
->fl_list
);
825 locks_wake_up_blocks(fl
);
829 locks_delete_lock_ctx(struct file_lock
*fl
, struct list_head
*dispose
)
831 locks_unlink_lock_ctx(fl
);
833 list_add(&fl
->fl_list
, dispose
);
838 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
839 * checks for shared/exclusive status of overlapping locks.
841 static bool locks_conflict(struct file_lock
*caller_fl
,
842 struct file_lock
*sys_fl
)
844 if (sys_fl
->fl_type
== F_WRLCK
)
846 if (caller_fl
->fl_type
== F_WRLCK
)
851 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
852 * checking before calling the locks_conflict().
854 static bool posix_locks_conflict(struct file_lock
*caller_fl
,
855 struct file_lock
*sys_fl
)
857 /* POSIX locks owned by the same process do not conflict with
860 if (posix_same_owner(caller_fl
, sys_fl
))
863 /* Check whether they overlap */
864 if (!locks_overlap(caller_fl
, sys_fl
))
867 return locks_conflict(caller_fl
, sys_fl
);
870 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
871 * checking before calling the locks_conflict().
873 static bool flock_locks_conflict(struct file_lock
*caller_fl
,
874 struct file_lock
*sys_fl
)
876 /* FLOCK locks referring to the same filp do not conflict with
879 if (caller_fl
->fl_file
== sys_fl
->fl_file
)
882 return locks_conflict(caller_fl
, sys_fl
);
886 posix_test_lock(struct file
*filp
, struct file_lock
*fl
)
888 struct file_lock
*cfl
;
889 struct file_lock_context
*ctx
;
890 struct inode
*inode
= locks_inode(filp
);
894 ctx
= locks_inode_context(inode
);
895 if (!ctx
|| list_empty_careful(&ctx
->flc_posix
)) {
896 fl
->fl_type
= F_UNLCK
;
901 spin_lock(&ctx
->flc_lock
);
902 list_for_each_entry(cfl
, &ctx
->flc_posix
, fl_list
) {
903 if (!posix_locks_conflict(fl
, cfl
))
905 if (cfl
->fl_lmops
&& cfl
->fl_lmops
->lm_lock_expirable
906 && (*cfl
->fl_lmops
->lm_lock_expirable
)(cfl
)) {
907 owner
= cfl
->fl_lmops
->lm_mod_owner
;
908 func
= cfl
->fl_lmops
->lm_expire_lock
;
910 spin_unlock(&ctx
->flc_lock
);
915 locks_copy_conflock(fl
, cfl
);
918 fl
->fl_type
= F_UNLCK
;
920 spin_unlock(&ctx
->flc_lock
);
923 EXPORT_SYMBOL(posix_test_lock
);
926 * Deadlock detection:
928 * We attempt to detect deadlocks that are due purely to posix file
931 * We assume that a task can be waiting for at most one lock at a time.
932 * So for any acquired lock, the process holding that lock may be
933 * waiting on at most one other lock. That lock in turns may be held by
934 * someone waiting for at most one other lock. Given a requested lock
935 * caller_fl which is about to wait for a conflicting lock block_fl, we
936 * follow this chain of waiters to ensure we are not about to create a
939 * Since we do this before we ever put a process to sleep on a lock, we
940 * are ensured that there is never a cycle; that is what guarantees that
941 * the while() loop in posix_locks_deadlock() eventually completes.
943 * Note: the above assumption may not be true when handling lock
944 * requests from a broken NFS client. It may also fail in the presence
945 * of tasks (such as posix threads) sharing the same open file table.
946 * To handle those cases, we just bail out after a few iterations.
948 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
949 * Because the owner is not even nominally tied to a thread of
950 * execution, the deadlock detection below can't reasonably work well. Just
953 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
954 * locks that just checks for the case where two tasks are attempting to
955 * upgrade from read to write locks on the same inode.
958 #define MAX_DEADLK_ITERATIONS 10
960 /* Find a lock that the owner of the given block_fl is blocking on. */
961 static struct file_lock
*what_owner_is_waiting_for(struct file_lock
*block_fl
)
963 struct file_lock
*fl
;
965 hash_for_each_possible(blocked_hash
, fl
, fl_link
, posix_owner_key(block_fl
)) {
966 if (posix_same_owner(fl
, block_fl
)) {
967 while (fl
->fl_blocker
)
975 /* Must be called with the blocked_lock_lock held! */
976 static int posix_locks_deadlock(struct file_lock
*caller_fl
,
977 struct file_lock
*block_fl
)
981 lockdep_assert_held(&blocked_lock_lock
);
984 * This deadlock detector can't reasonably detect deadlocks with
985 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
987 if (IS_OFDLCK(caller_fl
))
990 while ((block_fl
= what_owner_is_waiting_for(block_fl
))) {
991 if (i
++ > MAX_DEADLK_ITERATIONS
)
993 if (posix_same_owner(caller_fl
, block_fl
))
999 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1000 * after any leases, but before any posix locks.
1002 * Note that if called with an FL_EXISTS argument, the caller may determine
1003 * whether or not a lock was successfully freed by testing the return
1004 * value for -ENOENT.
1006 static int flock_lock_inode(struct inode
*inode
, struct file_lock
*request
)
1008 struct file_lock
*new_fl
= NULL
;
1009 struct file_lock
*fl
;
1010 struct file_lock_context
*ctx
;
1015 ctx
= locks_get_lock_context(inode
, request
->fl_type
);
1017 if (request
->fl_type
!= F_UNLCK
)
1019 return (request
->fl_flags
& FL_EXISTS
) ? -ENOENT
: 0;
1022 if (!(request
->fl_flags
& FL_ACCESS
) && (request
->fl_type
!= F_UNLCK
)) {
1023 new_fl
= locks_alloc_lock();
1028 percpu_down_read(&file_rwsem
);
1029 spin_lock(&ctx
->flc_lock
);
1030 if (request
->fl_flags
& FL_ACCESS
)
1033 list_for_each_entry(fl
, &ctx
->flc_flock
, fl_list
) {
1034 if (request
->fl_file
!= fl
->fl_file
)
1036 if (request
->fl_type
== fl
->fl_type
)
1039 locks_delete_lock_ctx(fl
, &dispose
);
1043 if (request
->fl_type
== F_UNLCK
) {
1044 if ((request
->fl_flags
& FL_EXISTS
) && !found
)
1050 list_for_each_entry(fl
, &ctx
->flc_flock
, fl_list
) {
1051 if (!flock_locks_conflict(request
, fl
))
1054 if (!(request
->fl_flags
& FL_SLEEP
))
1056 error
= FILE_LOCK_DEFERRED
;
1057 locks_insert_block(fl
, request
, flock_locks_conflict
);
1060 if (request
->fl_flags
& FL_ACCESS
)
1062 locks_copy_lock(new_fl
, request
);
1063 locks_move_blocks(new_fl
, request
);
1064 locks_insert_lock_ctx(new_fl
, &ctx
->flc_flock
);
1069 spin_unlock(&ctx
->flc_lock
);
1070 percpu_up_read(&file_rwsem
);
1072 locks_free_lock(new_fl
);
1073 locks_dispose_list(&dispose
);
1074 trace_flock_lock_inode(inode
, request
, error
);
1078 static int posix_lock_inode(struct inode
*inode
, struct file_lock
*request
,
1079 struct file_lock
*conflock
)
1081 struct file_lock
*fl
, *tmp
;
1082 struct file_lock
*new_fl
= NULL
;
1083 struct file_lock
*new_fl2
= NULL
;
1084 struct file_lock
*left
= NULL
;
1085 struct file_lock
*right
= NULL
;
1086 struct file_lock_context
*ctx
;
1093 ctx
= locks_get_lock_context(inode
, request
->fl_type
);
1095 return (request
->fl_type
== F_UNLCK
) ? 0 : -ENOMEM
;
1098 * We may need two file_lock structures for this operation,
1099 * so we get them in advance to avoid races.
1101 * In some cases we can be sure, that no new locks will be needed
1103 if (!(request
->fl_flags
& FL_ACCESS
) &&
1104 (request
->fl_type
!= F_UNLCK
||
1105 request
->fl_start
!= 0 || request
->fl_end
!= OFFSET_MAX
)) {
1106 new_fl
= locks_alloc_lock();
1107 new_fl2
= locks_alloc_lock();
1111 percpu_down_read(&file_rwsem
);
1112 spin_lock(&ctx
->flc_lock
);
1114 * New lock request. Walk all POSIX locks and look for conflicts. If
1115 * there are any, either return error or put the request on the
1116 * blocker's list of waiters and the global blocked_hash.
1118 if (request
->fl_type
!= F_UNLCK
) {
1119 list_for_each_entry(fl
, &ctx
->flc_posix
, fl_list
) {
1120 if (!posix_locks_conflict(request
, fl
))
1122 if (fl
->fl_lmops
&& fl
->fl_lmops
->lm_lock_expirable
1123 && (*fl
->fl_lmops
->lm_lock_expirable
)(fl
)) {
1124 owner
= fl
->fl_lmops
->lm_mod_owner
;
1125 func
= fl
->fl_lmops
->lm_expire_lock
;
1126 __module_get(owner
);
1127 spin_unlock(&ctx
->flc_lock
);
1128 percpu_up_read(&file_rwsem
);
1134 locks_copy_conflock(conflock
, fl
);
1136 if (!(request
->fl_flags
& FL_SLEEP
))
1139 * Deadlock detection and insertion into the blocked
1140 * locks list must be done while holding the same lock!
1143 spin_lock(&blocked_lock_lock
);
1145 * Ensure that we don't find any locks blocked on this
1146 * request during deadlock detection.
1148 __locks_wake_up_blocks(request
);
1149 if (likely(!posix_locks_deadlock(request
, fl
))) {
1150 error
= FILE_LOCK_DEFERRED
;
1151 __locks_insert_block(fl
, request
,
1152 posix_locks_conflict
);
1154 spin_unlock(&blocked_lock_lock
);
1159 /* If we're just looking for a conflict, we're done. */
1161 if (request
->fl_flags
& FL_ACCESS
)
1164 /* Find the first old lock with the same owner as the new lock */
1165 list_for_each_entry(fl
, &ctx
->flc_posix
, fl_list
) {
1166 if (posix_same_owner(request
, fl
))
1170 /* Process locks with this owner. */
1171 list_for_each_entry_safe_from(fl
, tmp
, &ctx
->flc_posix
, fl_list
) {
1172 if (!posix_same_owner(request
, fl
))
1175 /* Detect adjacent or overlapping regions (if same lock type) */
1176 if (request
->fl_type
== fl
->fl_type
) {
1177 /* In all comparisons of start vs end, use
1178 * "start - 1" rather than "end + 1". If end
1179 * is OFFSET_MAX, end + 1 will become negative.
1181 if (fl
->fl_end
< request
->fl_start
- 1)
1183 /* If the next lock in the list has entirely bigger
1184 * addresses than the new one, insert the lock here.
1186 if (fl
->fl_start
- 1 > request
->fl_end
)
1189 /* If we come here, the new and old lock are of the
1190 * same type and adjacent or overlapping. Make one
1191 * lock yielding from the lower start address of both
1192 * locks to the higher end address.
1194 if (fl
->fl_start
> request
->fl_start
)
1195 fl
->fl_start
= request
->fl_start
;
1197 request
->fl_start
= fl
->fl_start
;
1198 if (fl
->fl_end
< request
->fl_end
)
1199 fl
->fl_end
= request
->fl_end
;
1201 request
->fl_end
= fl
->fl_end
;
1203 locks_delete_lock_ctx(fl
, &dispose
);
1209 /* Processing for different lock types is a bit
1212 if (fl
->fl_end
< request
->fl_start
)
1214 if (fl
->fl_start
> request
->fl_end
)
1216 if (request
->fl_type
== F_UNLCK
)
1218 if (fl
->fl_start
< request
->fl_start
)
1220 /* If the next lock in the list has a higher end
1221 * address than the new one, insert the new one here.
1223 if (fl
->fl_end
> request
->fl_end
) {
1227 if (fl
->fl_start
>= request
->fl_start
) {
1228 /* The new lock completely replaces an old
1229 * one (This may happen several times).
1232 locks_delete_lock_ctx(fl
, &dispose
);
1236 * Replace the old lock with new_fl, and
1237 * remove the old one. It's safe to do the
1238 * insert here since we know that we won't be
1239 * using new_fl later, and that the lock is
1240 * just replacing an existing lock.
1245 locks_copy_lock(new_fl
, request
);
1246 locks_move_blocks(new_fl
, request
);
1249 locks_insert_lock_ctx(request
, &fl
->fl_list
);
1250 locks_delete_lock_ctx(fl
, &dispose
);
1257 * The above code only modifies existing locks in case of merging or
1258 * replacing. If new lock(s) need to be inserted all modifications are
1259 * done below this, so it's safe yet to bail out.
1261 error
= -ENOLCK
; /* "no luck" */
1262 if (right
&& left
== right
&& !new_fl2
)
1267 if (request
->fl_type
== F_UNLCK
) {
1268 if (request
->fl_flags
& FL_EXISTS
)
1277 locks_copy_lock(new_fl
, request
);
1278 locks_move_blocks(new_fl
, request
);
1279 locks_insert_lock_ctx(new_fl
, &fl
->fl_list
);
1284 if (left
== right
) {
1285 /* The new lock breaks the old one in two pieces,
1286 * so we have to use the second new lock.
1290 locks_copy_lock(left
, right
);
1291 locks_insert_lock_ctx(left
, &fl
->fl_list
);
1293 right
->fl_start
= request
->fl_end
+ 1;
1294 locks_wake_up_blocks(right
);
1297 left
->fl_end
= request
->fl_start
- 1;
1298 locks_wake_up_blocks(left
);
1301 spin_unlock(&ctx
->flc_lock
);
1302 percpu_up_read(&file_rwsem
);
1304 * Free any unused locks.
1307 locks_free_lock(new_fl
);
1309 locks_free_lock(new_fl2
);
1310 locks_dispose_list(&dispose
);
1311 trace_posix_lock_inode(inode
, request
, error
);
1317 * posix_lock_file - Apply a POSIX-style lock to a file
1318 * @filp: The file to apply the lock to
1319 * @fl: The lock to be applied
1320 * @conflock: Place to return a copy of the conflicting lock, if found.
1322 * Add a POSIX style lock to a file.
1323 * We merge adjacent & overlapping locks whenever possible.
1324 * POSIX locks are sorted by owner task, then by starting address
1326 * Note that if called with an FL_EXISTS argument, the caller may determine
1327 * whether or not a lock was successfully freed by testing the return
1328 * value for -ENOENT.
1330 int posix_lock_file(struct file
*filp
, struct file_lock
*fl
,
1331 struct file_lock
*conflock
)
1333 return posix_lock_inode(locks_inode(filp
), fl
, conflock
);
1335 EXPORT_SYMBOL(posix_lock_file
);
1338 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1339 * @inode: inode of file to which lock request should be applied
1340 * @fl: The lock to be applied
1342 * Apply a POSIX style lock request to an inode.
1344 static int posix_lock_inode_wait(struct inode
*inode
, struct file_lock
*fl
)
1349 error
= posix_lock_inode(inode
, fl
, NULL
);
1350 if (error
!= FILE_LOCK_DEFERRED
)
1352 error
= wait_event_interruptible(fl
->fl_wait
,
1353 list_empty(&fl
->fl_blocked_member
));
1357 locks_delete_block(fl
);
1361 static void lease_clear_pending(struct file_lock
*fl
, int arg
)
1365 fl
->fl_flags
&= ~FL_UNLOCK_PENDING
;
1368 fl
->fl_flags
&= ~FL_DOWNGRADE_PENDING
;
1372 /* We already had a lease on this file; just change its type */
1373 int lease_modify(struct file_lock
*fl
, int arg
, struct list_head
*dispose
)
1375 int error
= assign_type(fl
, arg
);
1379 lease_clear_pending(fl
, arg
);
1380 locks_wake_up_blocks(fl
);
1381 if (arg
== F_UNLCK
) {
1382 struct file
*filp
= fl
->fl_file
;
1385 filp
->f_owner
.signum
= 0;
1386 fasync_helper(0, fl
->fl_file
, 0, &fl
->fl_fasync
);
1387 if (fl
->fl_fasync
!= NULL
) {
1388 printk(KERN_ERR
"locks_delete_lock: fasync == %p\n", fl
->fl_fasync
);
1389 fl
->fl_fasync
= NULL
;
1391 locks_delete_lock_ctx(fl
, dispose
);
1395 EXPORT_SYMBOL(lease_modify
);
1397 static bool past_time(unsigned long then
)
1400 /* 0 is a special value meaning "this never expires": */
1402 return time_after(jiffies
, then
);
1405 static void time_out_leases(struct inode
*inode
, struct list_head
*dispose
)
1407 struct file_lock_context
*ctx
= inode
->i_flctx
;
1408 struct file_lock
*fl
, *tmp
;
1410 lockdep_assert_held(&ctx
->flc_lock
);
1412 list_for_each_entry_safe(fl
, tmp
, &ctx
->flc_lease
, fl_list
) {
1413 trace_time_out_leases(inode
, fl
);
1414 if (past_time(fl
->fl_downgrade_time
))
1415 lease_modify(fl
, F_RDLCK
, dispose
);
1416 if (past_time(fl
->fl_break_time
))
1417 lease_modify(fl
, F_UNLCK
, dispose
);
1421 static bool leases_conflict(struct file_lock
*lease
, struct file_lock
*breaker
)
1425 if (lease
->fl_lmops
->lm_breaker_owns_lease
1426 && lease
->fl_lmops
->lm_breaker_owns_lease(lease
))
1428 if ((breaker
->fl_flags
& FL_LAYOUT
) != (lease
->fl_flags
& FL_LAYOUT
)) {
1432 if ((breaker
->fl_flags
& FL_DELEG
) && (lease
->fl_flags
& FL_LEASE
)) {
1437 rc
= locks_conflict(breaker
, lease
);
1439 trace_leases_conflict(rc
, lease
, breaker
);
1444 any_leases_conflict(struct inode
*inode
, struct file_lock
*breaker
)
1446 struct file_lock_context
*ctx
= inode
->i_flctx
;
1447 struct file_lock
*fl
;
1449 lockdep_assert_held(&ctx
->flc_lock
);
1451 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1452 if (leases_conflict(fl
, breaker
))
1459 * __break_lease - revoke all outstanding leases on file
1460 * @inode: the inode of the file to return
1461 * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1463 * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
1466 * break_lease (inlined for speed) has checked there already is at least
1467 * some kind of lock (maybe a lease) on this file. Leases are broken on
1468 * a call to open() or truncate(). This function can sleep unless you
1469 * specified %O_NONBLOCK to your open().
1471 int __break_lease(struct inode
*inode
, unsigned int mode
, unsigned int type
)
1474 struct file_lock_context
*ctx
;
1475 struct file_lock
*new_fl
, *fl
, *tmp
;
1476 unsigned long break_time
;
1477 int want_write
= (mode
& O_ACCMODE
) != O_RDONLY
;
1480 new_fl
= lease_alloc(NULL
, want_write
? F_WRLCK
: F_RDLCK
);
1482 return PTR_ERR(new_fl
);
1483 new_fl
->fl_flags
= type
;
1485 /* typically we will check that ctx is non-NULL before calling */
1486 ctx
= locks_inode_context(inode
);
1492 percpu_down_read(&file_rwsem
);
1493 spin_lock(&ctx
->flc_lock
);
1495 time_out_leases(inode
, &dispose
);
1497 if (!any_leases_conflict(inode
, new_fl
))
1501 if (lease_break_time
> 0) {
1502 break_time
= jiffies
+ lease_break_time
* HZ
;
1503 if (break_time
== 0)
1504 break_time
++; /* so that 0 means no break time */
1507 list_for_each_entry_safe(fl
, tmp
, &ctx
->flc_lease
, fl_list
) {
1508 if (!leases_conflict(fl
, new_fl
))
1511 if (fl
->fl_flags
& FL_UNLOCK_PENDING
)
1513 fl
->fl_flags
|= FL_UNLOCK_PENDING
;
1514 fl
->fl_break_time
= break_time
;
1516 if (lease_breaking(fl
))
1518 fl
->fl_flags
|= FL_DOWNGRADE_PENDING
;
1519 fl
->fl_downgrade_time
= break_time
;
1521 if (fl
->fl_lmops
->lm_break(fl
))
1522 locks_delete_lock_ctx(fl
, &dispose
);
1525 if (list_empty(&ctx
->flc_lease
))
1528 if (mode
& O_NONBLOCK
) {
1529 trace_break_lease_noblock(inode
, new_fl
);
1530 error
= -EWOULDBLOCK
;
1535 fl
= list_first_entry(&ctx
->flc_lease
, struct file_lock
, fl_list
);
1536 break_time
= fl
->fl_break_time
;
1537 if (break_time
!= 0)
1538 break_time
-= jiffies
;
1539 if (break_time
== 0)
1541 locks_insert_block(fl
, new_fl
, leases_conflict
);
1542 trace_break_lease_block(inode
, new_fl
);
1543 spin_unlock(&ctx
->flc_lock
);
1544 percpu_up_read(&file_rwsem
);
1546 locks_dispose_list(&dispose
);
1547 error
= wait_event_interruptible_timeout(new_fl
->fl_wait
,
1548 list_empty(&new_fl
->fl_blocked_member
),
1551 percpu_down_read(&file_rwsem
);
1552 spin_lock(&ctx
->flc_lock
);
1553 trace_break_lease_unblock(inode
, new_fl
);
1554 locks_delete_block(new_fl
);
1557 * Wait for the next conflicting lease that has not been
1561 time_out_leases(inode
, &dispose
);
1562 if (any_leases_conflict(inode
, new_fl
))
1567 spin_unlock(&ctx
->flc_lock
);
1568 percpu_up_read(&file_rwsem
);
1569 locks_dispose_list(&dispose
);
1571 locks_free_lock(new_fl
);
1574 EXPORT_SYMBOL(__break_lease
);
1577 * lease_get_mtime - update modified time of an inode with exclusive lease
1579 * @time: pointer to a timespec which contains the last modified time
1581 * This is to force NFS clients to flush their caches for files with
1582 * exclusive leases. The justification is that if someone has an
1583 * exclusive lease, then they could be modifying it.
1585 void lease_get_mtime(struct inode
*inode
, struct timespec64
*time
)
1587 bool has_lease
= false;
1588 struct file_lock_context
*ctx
;
1589 struct file_lock
*fl
;
1591 ctx
= locks_inode_context(inode
);
1592 if (ctx
&& !list_empty_careful(&ctx
->flc_lease
)) {
1593 spin_lock(&ctx
->flc_lock
);
1594 fl
= list_first_entry_or_null(&ctx
->flc_lease
,
1595 struct file_lock
, fl_list
);
1596 if (fl
&& (fl
->fl_type
== F_WRLCK
))
1598 spin_unlock(&ctx
->flc_lock
);
1602 *time
= current_time(inode
);
1604 EXPORT_SYMBOL(lease_get_mtime
);
1607 * fcntl_getlease - Enquire what lease is currently active
1610 * The value returned by this function will be one of
1611 * (if no lease break is pending):
1613 * %F_RDLCK to indicate a shared lease is held.
1615 * %F_WRLCK to indicate an exclusive lease is held.
1617 * %F_UNLCK to indicate no lease is held.
1619 * (if a lease break is pending):
1621 * %F_RDLCK to indicate an exclusive lease needs to be
1622 * changed to a shared lease (or removed).
1624 * %F_UNLCK to indicate the lease needs to be removed.
1626 * XXX: sfr & willy disagree over whether F_INPROGRESS
1627 * should be returned to userspace.
1629 int fcntl_getlease(struct file
*filp
)
1631 struct file_lock
*fl
;
1632 struct inode
*inode
= locks_inode(filp
);
1633 struct file_lock_context
*ctx
;
1637 ctx
= locks_inode_context(inode
);
1638 if (ctx
&& !list_empty_careful(&ctx
->flc_lease
)) {
1639 percpu_down_read(&file_rwsem
);
1640 spin_lock(&ctx
->flc_lock
);
1641 time_out_leases(inode
, &dispose
);
1642 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1643 if (fl
->fl_file
!= filp
)
1645 type
= target_leasetype(fl
);
1648 spin_unlock(&ctx
->flc_lock
);
1649 percpu_up_read(&file_rwsem
);
1651 locks_dispose_list(&dispose
);
1657 * check_conflicting_open - see if the given file points to an inode that has
1658 * an existing open that would conflict with the
1660 * @filp: file to check
1661 * @arg: type of lease that we're trying to acquire
1662 * @flags: current lock flags
1664 * Check to see if there's an existing open fd on this file that would
1665 * conflict with the lease we're trying to set.
1668 check_conflicting_open(struct file
*filp
, const long arg
, int flags
)
1670 struct inode
*inode
= locks_inode(filp
);
1671 int self_wcount
= 0, self_rcount
= 0;
1673 if (flags
& FL_LAYOUT
)
1675 if (flags
& FL_DELEG
)
1676 /* We leave these checks to the caller */
1680 return inode_is_open_for_write(inode
) ? -EAGAIN
: 0;
1681 else if (arg
!= F_WRLCK
)
1685 * Make sure that only read/write count is from lease requestor.
1686 * Note that this will result in denying write leases when i_writecount
1687 * is negative, which is what we want. (We shouldn't grant write leases
1688 * on files open for execution.)
1690 if (filp
->f_mode
& FMODE_WRITE
)
1692 else if (filp
->f_mode
& FMODE_READ
)
1695 if (atomic_read(&inode
->i_writecount
) != self_wcount
||
1696 atomic_read(&inode
->i_readcount
) != self_rcount
)
1703 generic_add_lease(struct file
*filp
, long arg
, struct file_lock
**flp
, void **priv
)
1705 struct file_lock
*fl
, *my_fl
= NULL
, *lease
;
1706 struct inode
*inode
= locks_inode(filp
);
1707 struct file_lock_context
*ctx
;
1708 bool is_deleg
= (*flp
)->fl_flags
& FL_DELEG
;
1713 trace_generic_add_lease(inode
, lease
);
1715 /* Note that arg is never F_UNLCK here */
1716 ctx
= locks_get_lock_context(inode
, arg
);
1721 * In the delegation case we need mutual exclusion with
1722 * a number of operations that take the i_mutex. We trylock
1723 * because delegations are an optional optimization, and if
1724 * there's some chance of a conflict--we'd rather not
1725 * bother, maybe that's a sign this just isn't a good file to
1726 * hand out a delegation on.
1728 if (is_deleg
&& !inode_trylock(inode
))
1731 if (is_deleg
&& arg
== F_WRLCK
) {
1732 /* Write delegations are not currently supported: */
1733 inode_unlock(inode
);
1738 percpu_down_read(&file_rwsem
);
1739 spin_lock(&ctx
->flc_lock
);
1740 time_out_leases(inode
, &dispose
);
1741 error
= check_conflicting_open(filp
, arg
, lease
->fl_flags
);
1746 * At this point, we know that if there is an exclusive
1747 * lease on this file, then we hold it on this filp
1748 * (otherwise our open of this file would have blocked).
1749 * And if we are trying to acquire an exclusive lease,
1750 * then the file is not open by anyone (including us)
1751 * except for this filp.
1754 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1755 if (fl
->fl_file
== filp
&&
1756 fl
->fl_owner
== lease
->fl_owner
) {
1762 * No exclusive leases if someone else has a lease on
1768 * Modifying our existing lease is OK, but no getting a
1769 * new lease if someone else is opening for write:
1771 if (fl
->fl_flags
& FL_UNLOCK_PENDING
)
1775 if (my_fl
!= NULL
) {
1777 error
= lease
->fl_lmops
->lm_change(lease
, arg
, &dispose
);
1787 locks_insert_lock_ctx(lease
, &ctx
->flc_lease
);
1789 * The check in break_lease() is lockless. It's possible for another
1790 * open to race in after we did the earlier check for a conflicting
1791 * open but before the lease was inserted. Check again for a
1792 * conflicting open and cancel the lease if there is one.
1794 * We also add a barrier here to ensure that the insertion of the lock
1795 * precedes these checks.
1798 error
= check_conflicting_open(filp
, arg
, lease
->fl_flags
);
1800 locks_unlink_lock_ctx(lease
);
1805 if (lease
->fl_lmops
->lm_setup
)
1806 lease
->fl_lmops
->lm_setup(lease
, priv
);
1808 spin_unlock(&ctx
->flc_lock
);
1809 percpu_up_read(&file_rwsem
);
1810 locks_dispose_list(&dispose
);
1812 inode_unlock(inode
);
1813 if (!error
&& !my_fl
)
1818 static int generic_delete_lease(struct file
*filp
, void *owner
)
1820 int error
= -EAGAIN
;
1821 struct file_lock
*fl
, *victim
= NULL
;
1822 struct inode
*inode
= locks_inode(filp
);
1823 struct file_lock_context
*ctx
;
1826 ctx
= locks_inode_context(inode
);
1828 trace_generic_delete_lease(inode
, NULL
);
1832 percpu_down_read(&file_rwsem
);
1833 spin_lock(&ctx
->flc_lock
);
1834 list_for_each_entry(fl
, &ctx
->flc_lease
, fl_list
) {
1835 if (fl
->fl_file
== filp
&&
1836 fl
->fl_owner
== owner
) {
1841 trace_generic_delete_lease(inode
, victim
);
1843 error
= fl
->fl_lmops
->lm_change(victim
, F_UNLCK
, &dispose
);
1844 spin_unlock(&ctx
->flc_lock
);
1845 percpu_up_read(&file_rwsem
);
1846 locks_dispose_list(&dispose
);
1851 * generic_setlease - sets a lease on an open file
1852 * @filp: file pointer
1853 * @arg: type of lease to obtain
1854 * @flp: input - file_lock to use, output - file_lock inserted
1855 * @priv: private data for lm_setup (may be NULL if lm_setup
1856 * doesn't require it)
1858 * The (input) flp->fl_lmops->lm_break function is required
1861 int generic_setlease(struct file
*filp
, long arg
, struct file_lock
**flp
,
1864 struct inode
*inode
= locks_inode(filp
);
1867 if ((!uid_eq(current_fsuid(), inode
->i_uid
)) && !capable(CAP_LEASE
))
1869 if (!S_ISREG(inode
->i_mode
))
1871 error
= security_file_lock(filp
, arg
);
1877 return generic_delete_lease(filp
, *priv
);
1880 if (!(*flp
)->fl_lmops
->lm_break
) {
1885 return generic_add_lease(filp
, arg
, flp
, priv
);
1890 EXPORT_SYMBOL(generic_setlease
);
1892 #if IS_ENABLED(CONFIG_SRCU)
1894 * Kernel subsystems can register to be notified on any attempt to set
1895 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1896 * to close files that it may have cached when there is an attempt to set a
1897 * conflicting lease.
1899 static struct srcu_notifier_head lease_notifier_chain
;
1902 lease_notifier_chain_init(void)
1904 srcu_init_notifier_head(&lease_notifier_chain
);
1908 setlease_notifier(long arg
, struct file_lock
*lease
)
1911 srcu_notifier_call_chain(&lease_notifier_chain
, arg
, lease
);
1914 int lease_register_notifier(struct notifier_block
*nb
)
1916 return srcu_notifier_chain_register(&lease_notifier_chain
, nb
);
1918 EXPORT_SYMBOL_GPL(lease_register_notifier
);
1920 void lease_unregister_notifier(struct notifier_block
*nb
)
1922 srcu_notifier_chain_unregister(&lease_notifier_chain
, nb
);
1924 EXPORT_SYMBOL_GPL(lease_unregister_notifier
);
1926 #else /* !IS_ENABLED(CONFIG_SRCU) */
1928 lease_notifier_chain_init(void)
1933 setlease_notifier(long arg
, struct file_lock
*lease
)
1937 int lease_register_notifier(struct notifier_block
*nb
)
1941 EXPORT_SYMBOL_GPL(lease_register_notifier
);
1943 void lease_unregister_notifier(struct notifier_block
*nb
)
1946 EXPORT_SYMBOL_GPL(lease_unregister_notifier
);
1948 #endif /* IS_ENABLED(CONFIG_SRCU) */
1951 * vfs_setlease - sets a lease on an open file
1952 * @filp: file pointer
1953 * @arg: type of lease to obtain
1954 * @lease: file_lock to use when adding a lease
1955 * @priv: private info for lm_setup when adding a lease (may be
1956 * NULL if lm_setup doesn't require it)
1958 * Call this to establish a lease on the file. The "lease" argument is not
1959 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1960 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1961 * set; if not, this function will return -ENOLCK (and generate a scary-looking
1964 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1965 * may be NULL if the lm_setup operation doesn't require it.
1968 vfs_setlease(struct file
*filp
, long arg
, struct file_lock
**lease
, void **priv
)
1971 setlease_notifier(arg
, *lease
);
1972 if (filp
->f_op
->setlease
)
1973 return filp
->f_op
->setlease(filp
, arg
, lease
, priv
);
1975 return generic_setlease(filp
, arg
, lease
, priv
);
1977 EXPORT_SYMBOL_GPL(vfs_setlease
);
1979 static int do_fcntl_add_lease(unsigned int fd
, struct file
*filp
, long arg
)
1981 struct file_lock
*fl
;
1982 struct fasync_struct
*new;
1985 fl
= lease_alloc(filp
, arg
);
1989 new = fasync_alloc();
1991 locks_free_lock(fl
);
1996 error
= vfs_setlease(filp
, arg
, &fl
, (void **)&new);
1998 locks_free_lock(fl
);
2005 * fcntl_setlease - sets a lease on an open file
2006 * @fd: open file descriptor
2007 * @filp: file pointer
2008 * @arg: type of lease to obtain
2010 * Call this fcntl to establish a lease on the file.
2011 * Note that you also need to call %F_SETSIG to
2012 * receive a signal when the lease is broken.
2014 int fcntl_setlease(unsigned int fd
, struct file
*filp
, long arg
)
2017 return vfs_setlease(filp
, F_UNLCK
, NULL
, (void **)&filp
);
2018 return do_fcntl_add_lease(fd
, filp
, arg
);
2022 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2023 * @inode: inode of the file to apply to
2024 * @fl: The lock to be applied
2026 * Apply a FLOCK style lock request to an inode.
2028 static int flock_lock_inode_wait(struct inode
*inode
, struct file_lock
*fl
)
2033 error
= flock_lock_inode(inode
, fl
);
2034 if (error
!= FILE_LOCK_DEFERRED
)
2036 error
= wait_event_interruptible(fl
->fl_wait
,
2037 list_empty(&fl
->fl_blocked_member
));
2041 locks_delete_block(fl
);
2046 * locks_lock_inode_wait - Apply a lock to an inode
2047 * @inode: inode of the file to apply to
2048 * @fl: The lock to be applied
2050 * Apply a POSIX or FLOCK style lock request to an inode.
2052 int locks_lock_inode_wait(struct inode
*inode
, struct file_lock
*fl
)
2055 switch (fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)) {
2057 res
= posix_lock_inode_wait(inode
, fl
);
2060 res
= flock_lock_inode_wait(inode
, fl
);
2067 EXPORT_SYMBOL(locks_lock_inode_wait
);
2070 * sys_flock: - flock() system call.
2071 * @fd: the file descriptor to lock.
2072 * @cmd: the type of lock to apply.
2074 * Apply a %FL_FLOCK style lock to an open file descriptor.
2075 * The @cmd can be one of:
2077 * - %LOCK_SH -- a shared lock.
2078 * - %LOCK_EX -- an exclusive lock.
2079 * - %LOCK_UN -- remove an existing lock.
2080 * - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2082 * %LOCK_MAND support has been removed from the kernel.
2084 SYSCALL_DEFINE2(flock
, unsigned int, fd
, unsigned int, cmd
)
2086 int can_sleep
, error
, type
;
2087 struct file_lock fl
;
2091 * LOCK_MAND locks were broken for a long time in that they never
2092 * conflicted with one another and didn't prevent any sort of open,
2093 * read or write activity.
2095 * Just ignore these requests now, to preserve legacy behavior, but
2096 * throw a warning to let people know that they don't actually work.
2098 if (cmd
& LOCK_MAND
) {
2099 pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current
->comm
, current
->pid
);
2103 type
= flock_translate_cmd(cmd
& ~LOCK_NB
);
2112 if (type
!= F_UNLCK
&& !(f
.file
->f_mode
& (FMODE_READ
| FMODE_WRITE
)))
2115 flock_make_lock(f
.file
, &fl
, type
);
2117 error
= security_file_lock(f
.file
, fl
.fl_type
);
2121 can_sleep
= !(cmd
& LOCK_NB
);
2123 fl
.fl_flags
|= FL_SLEEP
;
2125 if (f
.file
->f_op
->flock
)
2126 error
= f
.file
->f_op
->flock(f
.file
,
2127 (can_sleep
) ? F_SETLKW
: F_SETLK
,
2130 error
= locks_lock_file_wait(f
.file
, &fl
);
2132 locks_release_private(&fl
);
2140 * vfs_test_lock - test file byte range lock
2141 * @filp: The file to test lock for
2142 * @fl: The lock to test; also used to hold result
2144 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
2145 * setting conf->fl_type to something other than F_UNLCK.
2147 int vfs_test_lock(struct file
*filp
, struct file_lock
*fl
)
2149 WARN_ON_ONCE(filp
!= fl
->fl_file
);
2150 if (filp
->f_op
->lock
)
2151 return filp
->f_op
->lock(filp
, F_GETLK
, fl
);
2152 posix_test_lock(filp
, fl
);
2155 EXPORT_SYMBOL_GPL(vfs_test_lock
);
2158 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2159 * @fl: The file_lock who's fl_pid should be translated
2160 * @ns: The namespace into which the pid should be translated
2162 * Used to tranlate a fl_pid into a namespace virtual pid number
2164 static pid_t
locks_translate_pid(struct file_lock
*fl
, struct pid_namespace
*ns
)
2171 if (IS_REMOTELCK(fl
))
2174 * If the flock owner process is dead and its pid has been already
2175 * freed, the translation below won't work, but we still want to show
2176 * flock owner pid number in init pidns.
2178 if (ns
== &init_pid_ns
)
2179 return (pid_t
)fl
->fl_pid
;
2182 pid
= find_pid_ns(fl
->fl_pid
, &init_pid_ns
);
2183 vnr
= pid_nr_ns(pid
, ns
);
2188 static int posix_lock_to_flock(struct flock
*flock
, struct file_lock
*fl
)
2190 flock
->l_pid
= locks_translate_pid(fl
, task_active_pid_ns(current
));
2191 #if BITS_PER_LONG == 32
2193 * Make sure we can represent the posix lock via
2194 * legacy 32bit flock.
2196 if (fl
->fl_start
> OFFT_OFFSET_MAX
)
2198 if (fl
->fl_end
!= OFFSET_MAX
&& fl
->fl_end
> OFFT_OFFSET_MAX
)
2201 flock
->l_start
= fl
->fl_start
;
2202 flock
->l_len
= fl
->fl_end
== OFFSET_MAX
? 0 :
2203 fl
->fl_end
- fl
->fl_start
+ 1;
2204 flock
->l_whence
= 0;
2205 flock
->l_type
= fl
->fl_type
;
2209 #if BITS_PER_LONG == 32
2210 static void posix_lock_to_flock64(struct flock64
*flock
, struct file_lock
*fl
)
2212 flock
->l_pid
= locks_translate_pid(fl
, task_active_pid_ns(current
));
2213 flock
->l_start
= fl
->fl_start
;
2214 flock
->l_len
= fl
->fl_end
== OFFSET_MAX
? 0 :
2215 fl
->fl_end
- fl
->fl_start
+ 1;
2216 flock
->l_whence
= 0;
2217 flock
->l_type
= fl
->fl_type
;
2221 /* Report the first existing lock that would conflict with l.
2222 * This implements the F_GETLK command of fcntl().
2224 int fcntl_getlk(struct file
*filp
, unsigned int cmd
, struct flock
*flock
)
2226 struct file_lock
*fl
;
2229 fl
= locks_alloc_lock();
2233 if (flock
->l_type
!= F_RDLCK
&& flock
->l_type
!= F_WRLCK
)
2236 error
= flock_to_posix_lock(filp
, fl
, flock
);
2240 if (cmd
== F_OFD_GETLK
) {
2242 if (flock
->l_pid
!= 0)
2245 fl
->fl_flags
|= FL_OFDLCK
;
2246 fl
->fl_owner
= filp
;
2249 error
= vfs_test_lock(filp
, fl
);
2253 flock
->l_type
= fl
->fl_type
;
2254 if (fl
->fl_type
!= F_UNLCK
) {
2255 error
= posix_lock_to_flock(flock
, fl
);
2260 locks_free_lock(fl
);
2265 * vfs_lock_file - file byte range lock
2266 * @filp: The file to apply the lock to
2267 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2268 * @fl: The lock to be applied
2269 * @conf: Place to return a copy of the conflicting lock, if found.
2271 * A caller that doesn't care about the conflicting lock may pass NULL
2272 * as the final argument.
2274 * If the filesystem defines a private ->lock() method, then @conf will
2275 * be left unchanged; so a caller that cares should initialize it to
2276 * some acceptable default.
2278 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2279 * locks, the ->lock() interface may return asynchronously, before the lock has
2280 * been granted or denied by the underlying filesystem, if (and only if)
2281 * lm_grant is set. Callers expecting ->lock() to return asynchronously
2282 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
2283 * the request is for a blocking lock. When ->lock() does return asynchronously,
2284 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
2285 * request completes.
2286 * If the request is for non-blocking lock the file system should return
2287 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2288 * with the result. If the request timed out the callback routine will return a
2289 * nonzero return code and the file system should release the lock. The file
2290 * system is also responsible to keep a corresponding posix lock when it
2291 * grants a lock so the VFS can find out which locks are locally held and do
2292 * the correct lock cleanup when required.
2293 * The underlying filesystem must not drop the kernel lock or call
2294 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2297 int vfs_lock_file(struct file
*filp
, unsigned int cmd
, struct file_lock
*fl
, struct file_lock
*conf
)
2299 WARN_ON_ONCE(filp
!= fl
->fl_file
);
2300 if (filp
->f_op
->lock
)
2301 return filp
->f_op
->lock(filp
, cmd
, fl
);
2303 return posix_lock_file(filp
, fl
, conf
);
2305 EXPORT_SYMBOL_GPL(vfs_lock_file
);
2307 static int do_lock_file_wait(struct file
*filp
, unsigned int cmd
,
2308 struct file_lock
*fl
)
2312 error
= security_file_lock(filp
, fl
->fl_type
);
2317 error
= vfs_lock_file(filp
, cmd
, fl
, NULL
);
2318 if (error
!= FILE_LOCK_DEFERRED
)
2320 error
= wait_event_interruptible(fl
->fl_wait
,
2321 list_empty(&fl
->fl_blocked_member
));
2325 locks_delete_block(fl
);
2330 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2332 check_fmode_for_setlk(struct file_lock
*fl
)
2334 switch (fl
->fl_type
) {
2336 if (!(fl
->fl_file
->f_mode
& FMODE_READ
))
2340 if (!(fl
->fl_file
->f_mode
& FMODE_WRITE
))
2346 /* Apply the lock described by l to an open file descriptor.
2347 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2349 int fcntl_setlk(unsigned int fd
, struct file
*filp
, unsigned int cmd
,
2350 struct flock
*flock
)
2352 struct file_lock
*file_lock
= locks_alloc_lock();
2353 struct inode
*inode
= locks_inode(filp
);
2357 if (file_lock
== NULL
)
2360 error
= flock_to_posix_lock(filp
, file_lock
, flock
);
2364 error
= check_fmode_for_setlk(file_lock
);
2369 * If the cmd is requesting file-private locks, then set the
2370 * FL_OFDLCK flag and override the owner.
2375 if (flock
->l_pid
!= 0)
2379 file_lock
->fl_flags
|= FL_OFDLCK
;
2380 file_lock
->fl_owner
= filp
;
2384 if (flock
->l_pid
!= 0)
2388 file_lock
->fl_flags
|= FL_OFDLCK
;
2389 file_lock
->fl_owner
= filp
;
2392 file_lock
->fl_flags
|= FL_SLEEP
;
2395 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2398 * Attempt to detect a close/fcntl race and recover by releasing the
2399 * lock that was just acquired. There is no need to do that when we're
2400 * unlocking though, or for OFD locks.
2402 if (!error
&& file_lock
->fl_type
!= F_UNLCK
&&
2403 !(file_lock
->fl_flags
& FL_OFDLCK
)) {
2404 struct files_struct
*files
= current
->files
;
2406 * We need that spin_lock here - it prevents reordering between
2407 * update of i_flctx->flc_posix and check for it done in
2408 * close(). rcu_read_lock() wouldn't do.
2410 spin_lock(&files
->file_lock
);
2411 f
= files_lookup_fd_locked(files
, fd
);
2412 spin_unlock(&files
->file_lock
);
2414 file_lock
->fl_type
= F_UNLCK
;
2415 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2416 WARN_ON_ONCE(error
);
2421 trace_fcntl_setlk(inode
, file_lock
, error
);
2422 locks_free_lock(file_lock
);
2426 #if BITS_PER_LONG == 32
2427 /* Report the first existing lock that would conflict with l.
2428 * This implements the F_GETLK command of fcntl().
2430 int fcntl_getlk64(struct file
*filp
, unsigned int cmd
, struct flock64
*flock
)
2432 struct file_lock
*fl
;
2435 fl
= locks_alloc_lock();
2440 if (flock
->l_type
!= F_RDLCK
&& flock
->l_type
!= F_WRLCK
)
2443 error
= flock64_to_posix_lock(filp
, fl
, flock
);
2447 if (cmd
== F_OFD_GETLK
) {
2449 if (flock
->l_pid
!= 0)
2453 fl
->fl_flags
|= FL_OFDLCK
;
2454 fl
->fl_owner
= filp
;
2457 error
= vfs_test_lock(filp
, fl
);
2461 flock
->l_type
= fl
->fl_type
;
2462 if (fl
->fl_type
!= F_UNLCK
)
2463 posix_lock_to_flock64(flock
, fl
);
2466 locks_free_lock(fl
);
2470 /* Apply the lock described by l to an open file descriptor.
2471 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2473 int fcntl_setlk64(unsigned int fd
, struct file
*filp
, unsigned int cmd
,
2474 struct flock64
*flock
)
2476 struct file_lock
*file_lock
= locks_alloc_lock();
2480 if (file_lock
== NULL
)
2483 error
= flock64_to_posix_lock(filp
, file_lock
, flock
);
2487 error
= check_fmode_for_setlk(file_lock
);
2492 * If the cmd is requesting file-private locks, then set the
2493 * FL_OFDLCK flag and override the owner.
2498 if (flock
->l_pid
!= 0)
2502 file_lock
->fl_flags
|= FL_OFDLCK
;
2503 file_lock
->fl_owner
= filp
;
2507 if (flock
->l_pid
!= 0)
2511 file_lock
->fl_flags
|= FL_OFDLCK
;
2512 file_lock
->fl_owner
= filp
;
2515 file_lock
->fl_flags
|= FL_SLEEP
;
2518 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2521 * Attempt to detect a close/fcntl race and recover by releasing the
2522 * lock that was just acquired. There is no need to do that when we're
2523 * unlocking though, or for OFD locks.
2525 if (!error
&& file_lock
->fl_type
!= F_UNLCK
&&
2526 !(file_lock
->fl_flags
& FL_OFDLCK
)) {
2527 struct files_struct
*files
= current
->files
;
2529 * We need that spin_lock here - it prevents reordering between
2530 * update of i_flctx->flc_posix and check for it done in
2531 * close(). rcu_read_lock() wouldn't do.
2533 spin_lock(&files
->file_lock
);
2534 f
= files_lookup_fd_locked(files
, fd
);
2535 spin_unlock(&files
->file_lock
);
2537 file_lock
->fl_type
= F_UNLCK
;
2538 error
= do_lock_file_wait(filp
, cmd
, file_lock
);
2539 WARN_ON_ONCE(error
);
2544 locks_free_lock(file_lock
);
2547 #endif /* BITS_PER_LONG == 32 */
2550 * This function is called when the file is being removed
2551 * from the task's fd array. POSIX locks belonging to this task
2552 * are deleted at this time.
2554 void locks_remove_posix(struct file
*filp
, fl_owner_t owner
)
2557 struct inode
*inode
= locks_inode(filp
);
2558 struct file_lock lock
;
2559 struct file_lock_context
*ctx
;
2562 * If there are no locks held on this file, we don't need to call
2563 * posix_lock_file(). Another process could be setting a lock on this
2564 * file at the same time, but we wouldn't remove that lock anyway.
2566 ctx
= locks_inode_context(inode
);
2567 if (!ctx
|| list_empty(&ctx
->flc_posix
))
2570 locks_init_lock(&lock
);
2571 lock
.fl_type
= F_UNLCK
;
2572 lock
.fl_flags
= FL_POSIX
| FL_CLOSE
;
2574 lock
.fl_end
= OFFSET_MAX
;
2575 lock
.fl_owner
= owner
;
2576 lock
.fl_pid
= current
->tgid
;
2577 lock
.fl_file
= filp
;
2579 lock
.fl_lmops
= NULL
;
2581 error
= vfs_lock_file(filp
, F_SETLK
, &lock
, NULL
);
2583 if (lock
.fl_ops
&& lock
.fl_ops
->fl_release_private
)
2584 lock
.fl_ops
->fl_release_private(&lock
);
2585 trace_locks_remove_posix(inode
, &lock
, error
);
2587 EXPORT_SYMBOL(locks_remove_posix
);
2589 /* The i_flctx must be valid when calling into here */
2591 locks_remove_flock(struct file
*filp
, struct file_lock_context
*flctx
)
2593 struct file_lock fl
;
2594 struct inode
*inode
= locks_inode(filp
);
2596 if (list_empty(&flctx
->flc_flock
))
2599 flock_make_lock(filp
, &fl
, F_UNLCK
);
2600 fl
.fl_flags
|= FL_CLOSE
;
2602 if (filp
->f_op
->flock
)
2603 filp
->f_op
->flock(filp
, F_SETLKW
, &fl
);
2605 flock_lock_inode(inode
, &fl
);
2607 if (fl
.fl_ops
&& fl
.fl_ops
->fl_release_private
)
2608 fl
.fl_ops
->fl_release_private(&fl
);
2611 /* The i_flctx must be valid when calling into here */
2613 locks_remove_lease(struct file
*filp
, struct file_lock_context
*ctx
)
2615 struct file_lock
*fl
, *tmp
;
2618 if (list_empty(&ctx
->flc_lease
))
2621 percpu_down_read(&file_rwsem
);
2622 spin_lock(&ctx
->flc_lock
);
2623 list_for_each_entry_safe(fl
, tmp
, &ctx
->flc_lease
, fl_list
)
2624 if (filp
== fl
->fl_file
)
2625 lease_modify(fl
, F_UNLCK
, &dispose
);
2626 spin_unlock(&ctx
->flc_lock
);
2627 percpu_up_read(&file_rwsem
);
2629 locks_dispose_list(&dispose
);
2633 * This function is called on the last close of an open file.
2635 void locks_remove_file(struct file
*filp
)
2637 struct file_lock_context
*ctx
;
2639 ctx
= locks_inode_context(locks_inode(filp
));
2643 /* remove any OFD locks */
2644 locks_remove_posix(filp
, filp
);
2646 /* remove flock locks */
2647 locks_remove_flock(filp
, ctx
);
2649 /* remove any leases */
2650 locks_remove_lease(filp
, ctx
);
2652 spin_lock(&ctx
->flc_lock
);
2653 locks_check_ctx_file_list(filp
, &ctx
->flc_posix
, "POSIX");
2654 locks_check_ctx_file_list(filp
, &ctx
->flc_flock
, "FLOCK");
2655 locks_check_ctx_file_list(filp
, &ctx
->flc_lease
, "LEASE");
2656 spin_unlock(&ctx
->flc_lock
);
2660 * vfs_cancel_lock - file byte range unblock lock
2661 * @filp: The file to apply the unblock to
2662 * @fl: The lock to be unblocked
2664 * Used by lock managers to cancel blocked requests
2666 int vfs_cancel_lock(struct file
*filp
, struct file_lock
*fl
)
2668 WARN_ON_ONCE(filp
!= fl
->fl_file
);
2669 if (filp
->f_op
->lock
)
2670 return filp
->f_op
->lock(filp
, F_CANCELLK
, fl
);
2673 EXPORT_SYMBOL_GPL(vfs_cancel_lock
);
2676 * vfs_inode_has_locks - are any file locks held on @inode?
2677 * @inode: inode to check for locks
2679 * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2682 bool vfs_inode_has_locks(struct inode
*inode
)
2684 struct file_lock_context
*ctx
;
2687 ctx
= locks_inode_context(inode
);
2691 spin_lock(&ctx
->flc_lock
);
2692 ret
= !list_empty(&ctx
->flc_posix
) || !list_empty(&ctx
->flc_flock
);
2693 spin_unlock(&ctx
->flc_lock
);
2696 EXPORT_SYMBOL_GPL(vfs_inode_has_locks
);
2698 #ifdef CONFIG_PROC_FS
2699 #include <linux/proc_fs.h>
2700 #include <linux/seq_file.h>
2702 struct locks_iterator
{
2707 static void lock_get_status(struct seq_file
*f
, struct file_lock
*fl
,
2708 loff_t id
, char *pfx
, int repeat
)
2710 struct inode
*inode
= NULL
;
2711 unsigned int fl_pid
;
2712 struct pid_namespace
*proc_pidns
= proc_pid_ns(file_inode(f
->file
)->i_sb
);
2715 fl_pid
= locks_translate_pid(fl
, proc_pidns
);
2717 * If lock owner is dead (and pid is freed) or not visible in current
2718 * pidns, zero is shown as a pid value. Check lock info from
2719 * init_pid_ns to get saved lock pid value.
2722 if (fl
->fl_file
!= NULL
)
2723 inode
= locks_inode(fl
->fl_file
);
2725 seq_printf(f
, "%lld: ", id
);
2728 seq_printf(f
, "%*s", repeat
- 1 + (int)strlen(pfx
), pfx
);
2731 if (fl
->fl_flags
& FL_ACCESS
)
2732 seq_puts(f
, "ACCESS");
2733 else if (IS_OFDLCK(fl
))
2734 seq_puts(f
, "OFDLCK");
2736 seq_puts(f
, "POSIX ");
2738 seq_printf(f
, " %s ",
2739 (inode
== NULL
) ? "*NOINODE*" : "ADVISORY ");
2740 } else if (IS_FLOCK(fl
)) {
2741 seq_puts(f
, "FLOCK ADVISORY ");
2742 } else if (IS_LEASE(fl
)) {
2743 if (fl
->fl_flags
& FL_DELEG
)
2744 seq_puts(f
, "DELEG ");
2746 seq_puts(f
, "LEASE ");
2748 if (lease_breaking(fl
))
2749 seq_puts(f
, "BREAKING ");
2750 else if (fl
->fl_file
)
2751 seq_puts(f
, "ACTIVE ");
2753 seq_puts(f
, "BREAKER ");
2755 seq_puts(f
, "UNKNOWN UNKNOWN ");
2757 type
= IS_LEASE(fl
) ? target_leasetype(fl
) : fl
->fl_type
;
2759 seq_printf(f
, "%s ", (type
== F_WRLCK
) ? "WRITE" :
2760 (type
== F_RDLCK
) ? "READ" : "UNLCK");
2762 /* userspace relies on this representation of dev_t */
2763 seq_printf(f
, "%d %02x:%02x:%lu ", fl_pid
,
2764 MAJOR(inode
->i_sb
->s_dev
),
2765 MINOR(inode
->i_sb
->s_dev
), inode
->i_ino
);
2767 seq_printf(f
, "%d <none>:0 ", fl_pid
);
2770 if (fl
->fl_end
== OFFSET_MAX
)
2771 seq_printf(f
, "%Ld EOF\n", fl
->fl_start
);
2773 seq_printf(f
, "%Ld %Ld\n", fl
->fl_start
, fl
->fl_end
);
2775 seq_puts(f
, "0 EOF\n");
2779 static struct file_lock
*get_next_blocked_member(struct file_lock
*node
)
2781 struct file_lock
*tmp
;
2783 /* NULL node or root node */
2784 if (node
== NULL
|| node
->fl_blocker
== NULL
)
2787 /* Next member in the linked list could be itself */
2788 tmp
= list_next_entry(node
, fl_blocked_member
);
2789 if (list_entry_is_head(tmp
, &node
->fl_blocker
->fl_blocked_requests
, fl_blocked_member
)
2797 static int locks_show(struct seq_file
*f
, void *v
)
2799 struct locks_iterator
*iter
= f
->private;
2800 struct file_lock
*cur
, *tmp
;
2801 struct pid_namespace
*proc_pidns
= proc_pid_ns(file_inode(f
->file
)->i_sb
);
2804 cur
= hlist_entry(v
, struct file_lock
, fl_link
);
2806 if (locks_translate_pid(cur
, proc_pidns
) == 0)
2809 /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2810 * is the left child of current node, the next silibing in fl_blocked_member is the
2811 * right child, we can alse get the parent of current node from fl_blocker, so this
2812 * question becomes traversal of a binary tree
2814 while (cur
!= NULL
) {
2816 lock_get_status(f
, cur
, iter
->li_pos
, "-> ", level
);
2818 lock_get_status(f
, cur
, iter
->li_pos
, "", level
);
2820 if (!list_empty(&cur
->fl_blocked_requests
)) {
2822 cur
= list_first_entry_or_null(&cur
->fl_blocked_requests
,
2823 struct file_lock
, fl_blocked_member
);
2827 tmp
= get_next_blocked_member(cur
);
2828 /* Fall back to parent node */
2829 while (tmp
== NULL
&& cur
->fl_blocker
!= NULL
) {
2830 cur
= cur
->fl_blocker
;
2832 tmp
= get_next_blocked_member(cur
);
2841 static void __show_fd_locks(struct seq_file
*f
,
2842 struct list_head
*head
, int *id
,
2843 struct file
*filp
, struct files_struct
*files
)
2845 struct file_lock
*fl
;
2847 list_for_each_entry(fl
, head
, fl_list
) {
2849 if (filp
!= fl
->fl_file
)
2851 if (fl
->fl_owner
!= files
&&
2852 fl
->fl_owner
!= filp
)
2856 seq_puts(f
, "lock:\t");
2857 lock_get_status(f
, fl
, *id
, "", 0);
2861 void show_fd_locks(struct seq_file
*f
,
2862 struct file
*filp
, struct files_struct
*files
)
2864 struct inode
*inode
= locks_inode(filp
);
2865 struct file_lock_context
*ctx
;
2868 ctx
= locks_inode_context(inode
);
2872 spin_lock(&ctx
->flc_lock
);
2873 __show_fd_locks(f
, &ctx
->flc_flock
, &id
, filp
, files
);
2874 __show_fd_locks(f
, &ctx
->flc_posix
, &id
, filp
, files
);
2875 __show_fd_locks(f
, &ctx
->flc_lease
, &id
, filp
, files
);
2876 spin_unlock(&ctx
->flc_lock
);
2879 static void *locks_start(struct seq_file
*f
, loff_t
*pos
)
2880 __acquires(&blocked_lock_lock
)
2882 struct locks_iterator
*iter
= f
->private;
2884 iter
->li_pos
= *pos
+ 1;
2885 percpu_down_write(&file_rwsem
);
2886 spin_lock(&blocked_lock_lock
);
2887 return seq_hlist_start_percpu(&file_lock_list
.hlist
, &iter
->li_cpu
, *pos
);
2890 static void *locks_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
2892 struct locks_iterator
*iter
= f
->private;
2895 return seq_hlist_next_percpu(v
, &file_lock_list
.hlist
, &iter
->li_cpu
, pos
);
2898 static void locks_stop(struct seq_file
*f
, void *v
)
2899 __releases(&blocked_lock_lock
)
2901 spin_unlock(&blocked_lock_lock
);
2902 percpu_up_write(&file_rwsem
);
2905 static const struct seq_operations locks_seq_operations
= {
2906 .start
= locks_start
,
2912 static int __init
proc_locks_init(void)
2914 proc_create_seq_private("locks", 0, NULL
, &locks_seq_operations
,
2915 sizeof(struct locks_iterator
), NULL
);
2918 fs_initcall(proc_locks_init
);
2921 static int __init
filelock_init(void)
2925 flctx_cache
= kmem_cache_create("file_lock_ctx",
2926 sizeof(struct file_lock_context
), 0, SLAB_PANIC
, NULL
);
2928 filelock_cache
= kmem_cache_create("file_lock_cache",
2929 sizeof(struct file_lock
), 0, SLAB_PANIC
, NULL
);
2931 for_each_possible_cpu(i
) {
2932 struct file_lock_list_struct
*fll
= per_cpu_ptr(&file_lock_list
, i
);
2934 spin_lock_init(&fll
->lock
);
2935 INIT_HLIST_HEAD(&fll
->hlist
);
2938 lease_notifier_chain_init();
2941 core_initcall(filelock_init
);