1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
34 #include <linux/compat.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
38 #include <linux/file.h>
39 #include <linux/jhash.h>
40 #include <linux/init.h>
41 #include <linux/futex.h>
42 #include <linux/mount.h>
43 #include <linux/pagemap.h>
44 #include <linux/syscalls.h>
45 #include <linux/signal.h>
46 #include <linux/export.h>
47 #include <linux/magic.h>
48 #include <linux/pid.h>
49 #include <linux/nsproxy.h>
50 #include <linux/ptrace.h>
51 #include <linux/sched/rt.h>
52 #include <linux/sched/wake_q.h>
53 #include <linux/sched/mm.h>
54 #include <linux/hugetlb.h>
55 #include <linux/freezer.h>
56 #include <linux/memblock.h>
57 #include <linux/fault-inject.h>
58 #include <linux/refcount.h>
60 #include <asm/futex.h>
62 #include "locking/rtmutex_common.h"
65 * READ this before attempting to hack on futexes!
67 * Basic futex operation and ordering guarantees
68 * =============================================
70 * The waiter reads the futex value in user space and calls
71 * futex_wait(). This function computes the hash bucket and acquires
72 * the hash bucket lock. After that it reads the futex user space value
73 * again and verifies that the data has not changed. If it has not changed
74 * it enqueues itself into the hash bucket, releases the hash bucket lock
77 * The waker side modifies the user space value of the futex and calls
78 * futex_wake(). This function computes the hash bucket and acquires the
79 * hash bucket lock. Then it looks for waiters on that futex in the hash
80 * bucket and wakes them.
82 * In futex wake up scenarios where no tasks are blocked on a futex, taking
83 * the hb spinlock can be avoided and simply return. In order for this
84 * optimization to work, ordering guarantees must exist so that the waiter
85 * being added to the list is acknowledged when the list is concurrently being
86 * checked by the waker, avoiding scenarios like the following:
90 * sys_futex(WAIT, futex, val);
91 * futex_wait(futex, val);
94 * sys_futex(WAKE, futex);
99 * lock(hash_bucket(futex));
101 * unlock(hash_bucket(futex));
104 * This would cause the waiter on CPU 0 to wait forever because it
105 * missed the transition of the user space value from val to newval
106 * and the waker did not find the waiter in the hash bucket queue.
108 * The correct serialization ensures that a waiter either observes
109 * the changed user space value before blocking or is woken by a
114 * sys_futex(WAIT, futex, val);
115 * futex_wait(futex, val);
118 * smp_mb(); (A) <-- paired with -.
120 * lock(hash_bucket(futex)); |
124 * | sys_futex(WAKE, futex);
125 * | futex_wake(futex);
127 * `--------> smp_mb(); (B)
130 * unlock(hash_bucket(futex));
131 * schedule(); if (waiters)
132 * lock(hash_bucket(futex));
133 * else wake_waiters(futex);
134 * waiters--; (b) unlock(hash_bucket(futex));
136 * Where (A) orders the waiters increment and the futex value read through
137 * atomic operations (see hb_waiters_inc) and where (B) orders the write
138 * to futex and the waiters read -- this is done by the barriers for both
139 * shared and private futexes in get_futex_key_refs().
141 * This yields the following case (where X:=waiters, Y:=futex):
149 * Which guarantees that x==0 && y==0 is impossible; which translates back into
150 * the guarantee that we cannot both miss the futex variable change and the
153 * Note that a new waiter is accounted for in (a) even when it is possible that
154 * the wait call can return error, in which case we backtrack from it in (b).
155 * Refer to the comment in queue_lock().
157 * Similarly, in order to account for waiters being requeued on another
158 * address we always increment the waiters for the destination bucket before
159 * acquiring the lock. It then decrements them again after releasing it -
160 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
161 * will do the additional required waiter count housekeeping. This is done for
162 * double_lock_hb() and double_unlock_hb(), respectively.
165 #ifdef CONFIG_HAVE_FUTEX_CMPXCHG
166 #define futex_cmpxchg_enabled 1
168 static int __read_mostly futex_cmpxchg_enabled
;
172 * Futex flags used to encode options to functions and preserve them across
176 # define FLAGS_SHARED 0x01
179 * NOMMU does not have per process address space. Let the compiler optimize
182 # define FLAGS_SHARED 0x00
184 #define FLAGS_CLOCKRT 0x02
185 #define FLAGS_HAS_TIMEOUT 0x04
188 * Priority Inheritance state:
190 struct futex_pi_state
{
192 * list of 'owned' pi_state instances - these have to be
193 * cleaned up in do_exit() if the task exits prematurely:
195 struct list_head list
;
200 struct rt_mutex pi_mutex
;
202 struct task_struct
*owner
;
206 } __randomize_layout
;
209 * struct futex_q - The hashed futex queue entry, one per waiting task
210 * @list: priority-sorted list of tasks waiting on this futex
211 * @task: the task waiting on the futex
212 * @lock_ptr: the hash bucket lock
213 * @key: the key the futex is hashed on
214 * @pi_state: optional priority inheritance state
215 * @rt_waiter: rt_waiter storage for use with requeue_pi
216 * @requeue_pi_key: the requeue_pi target futex key
217 * @bitset: bitset for the optional bitmasked wakeup
219 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
220 * we can wake only the relevant ones (hashed queues may be shared).
222 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
223 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
224 * The order of wakeup is always to make the first condition true, then
227 * PI futexes are typically woken before they are removed from the hash list via
228 * the rt_mutex code. See unqueue_me_pi().
231 struct plist_node list
;
233 struct task_struct
*task
;
234 spinlock_t
*lock_ptr
;
236 struct futex_pi_state
*pi_state
;
237 struct rt_mutex_waiter
*rt_waiter
;
238 union futex_key
*requeue_pi_key
;
240 } __randomize_layout
;
242 static const struct futex_q futex_q_init
= {
243 /* list gets initialized in queue_me()*/
244 .key
= FUTEX_KEY_INIT
,
245 .bitset
= FUTEX_BITSET_MATCH_ANY
249 * Hash buckets are shared by all the futex_keys that hash to the same
250 * location. Each key may have multiple futex_q structures, one for each task
251 * waiting on a futex.
253 struct futex_hash_bucket
{
256 struct plist_head chain
;
257 } ____cacheline_aligned_in_smp
;
260 * The base of the bucket array and its size are always used together
261 * (after initialization only in hash_futex()), so ensure that they
262 * reside in the same cacheline.
265 struct futex_hash_bucket
*queues
;
266 unsigned long hashsize
;
267 } __futex_data __read_mostly
__aligned(2*sizeof(long));
268 #define futex_queues (__futex_data.queues)
269 #define futex_hashsize (__futex_data.hashsize)
273 * Fault injections for futexes.
275 #ifdef CONFIG_FAIL_FUTEX
278 struct fault_attr attr
;
282 .attr
= FAULT_ATTR_INITIALIZER
,
283 .ignore_private
= false,
286 static int __init
setup_fail_futex(char *str
)
288 return setup_fault_attr(&fail_futex
.attr
, str
);
290 __setup("fail_futex=", setup_fail_futex
);
292 static bool should_fail_futex(bool fshared
)
294 if (fail_futex
.ignore_private
&& !fshared
)
297 return should_fail(&fail_futex
.attr
, 1);
300 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
302 static int __init
fail_futex_debugfs(void)
304 umode_t mode
= S_IFREG
| S_IRUSR
| S_IWUSR
;
307 dir
= fault_create_debugfs_attr("fail_futex", NULL
,
312 debugfs_create_bool("ignore-private", mode
, dir
,
313 &fail_futex
.ignore_private
);
317 late_initcall(fail_futex_debugfs
);
319 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
322 static inline bool should_fail_futex(bool fshared
)
326 #endif /* CONFIG_FAIL_FUTEX */
329 static void compat_exit_robust_list(struct task_struct
*curr
);
331 static inline void compat_exit_robust_list(struct task_struct
*curr
) { }
334 static inline void futex_get_mm(union futex_key
*key
)
336 mmgrab(key
->private.mm
);
338 * Ensure futex_get_mm() implies a full barrier such that
339 * get_futex_key() implies a full barrier. This is relied upon
340 * as smp_mb(); (B), see the ordering comment above.
342 smp_mb__after_atomic();
346 * Reflects a new waiter being added to the waitqueue.
348 static inline void hb_waiters_inc(struct futex_hash_bucket
*hb
)
351 atomic_inc(&hb
->waiters
);
353 * Full barrier (A), see the ordering comment above.
355 smp_mb__after_atomic();
360 * Reflects a waiter being removed from the waitqueue by wakeup
363 static inline void hb_waiters_dec(struct futex_hash_bucket
*hb
)
366 atomic_dec(&hb
->waiters
);
370 static inline int hb_waiters_pending(struct futex_hash_bucket
*hb
)
373 return atomic_read(&hb
->waiters
);
380 * hash_futex - Return the hash bucket in the global hash
381 * @key: Pointer to the futex key for which the hash is calculated
383 * We hash on the keys returned from get_futex_key (see below) and return the
384 * corresponding hash bucket in the global hash.
386 static struct futex_hash_bucket
*hash_futex(union futex_key
*key
)
388 u32 hash
= jhash2((u32
*)key
, offsetof(typeof(*key
), both
.offset
) / 4,
391 return &futex_queues
[hash
& (futex_hashsize
- 1)];
396 * match_futex - Check whether two futex keys are equal
397 * @key1: Pointer to key1
398 * @key2: Pointer to key2
400 * Return 1 if two futex_keys are equal, 0 otherwise.
402 static inline int match_futex(union futex_key
*key1
, union futex_key
*key2
)
405 && key1
->both
.word
== key2
->both
.word
406 && key1
->both
.ptr
== key2
->both
.ptr
407 && key1
->both
.offset
== key2
->both
.offset
);
411 * Take a reference to the resource addressed by a key.
412 * Can be called while holding spinlocks.
415 static void get_futex_key_refs(union futex_key
*key
)
421 * On MMU less systems futexes are always "private" as there is no per
422 * process address space. We need the smp wmb nevertheless - yes,
423 * arch/blackfin has MMU less SMP ...
425 if (!IS_ENABLED(CONFIG_MMU
)) {
426 smp_mb(); /* explicit smp_mb(); (B) */
430 switch (key
->both
.offset
& (FUT_OFF_INODE
|FUT_OFF_MMSHARED
)) {
432 smp_mb(); /* explicit smp_mb(); (B) */
434 case FUT_OFF_MMSHARED
:
435 futex_get_mm(key
); /* implies smp_mb(); (B) */
439 * Private futexes do not hold reference on an inode or
440 * mm, therefore the only purpose of calling get_futex_key_refs
441 * is because we need the barrier for the lockless waiter check.
443 smp_mb(); /* explicit smp_mb(); (B) */
448 * Drop a reference to the resource addressed by a key.
449 * The hash bucket spinlock must not be held. This is
450 * a no-op for private futexes, see comment in the get
453 static void drop_futex_key_refs(union futex_key
*key
)
455 if (!key
->both
.ptr
) {
456 /* If we're here then we tried to put a key we failed to get */
461 if (!IS_ENABLED(CONFIG_MMU
))
464 switch (key
->both
.offset
& (FUT_OFF_INODE
|FUT_OFF_MMSHARED
)) {
467 case FUT_OFF_MMSHARED
:
468 mmdrop(key
->private.mm
);
479 * futex_setup_timer - set up the sleeping hrtimer.
480 * @time: ptr to the given timeout value
481 * @timeout: the hrtimer_sleeper structure to be set up
482 * @flags: futex flags
483 * @range_ns: optional range in ns
485 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
488 static inline struct hrtimer_sleeper
*
489 futex_setup_timer(ktime_t
*time
, struct hrtimer_sleeper
*timeout
,
490 int flags
, u64 range_ns
)
495 hrtimer_init_sleeper_on_stack(timeout
, (flags
& FLAGS_CLOCKRT
) ?
496 CLOCK_REALTIME
: CLOCK_MONOTONIC
,
499 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
500 * effectively the same as calling hrtimer_set_expires().
502 hrtimer_set_expires_range_ns(&timeout
->timer
, *time
, range_ns
);
508 * Generate a machine wide unique identifier for this inode.
510 * This relies on u64 not wrapping in the life-time of the machine; which with
511 * 1ns resolution means almost 585 years.
513 * This further relies on the fact that a well formed program will not unmap
514 * the file while it has a (shared) futex waiting on it. This mapping will have
515 * a file reference which pins the mount and inode.
517 * If for some reason an inode gets evicted and read back in again, it will get
518 * a new sequence number and will _NOT_ match, even though it is the exact same
521 * It is important that match_futex() will never have a false-positive, esp.
522 * for PI futexes that can mess up the state. The above argues that false-negatives
523 * are only possible for malformed programs.
525 static u64
get_inode_sequence_number(struct inode
*inode
)
527 static atomic64_t i_seq
;
530 /* Does the inode already have a sequence number? */
531 old
= atomic64_read(&inode
->i_sequence
);
536 u64
new = atomic64_add_return(1, &i_seq
);
537 if (WARN_ON_ONCE(!new))
540 old
= atomic64_cmpxchg_relaxed(&inode
->i_sequence
, 0, new);
548 * get_futex_key() - Get parameters which are the keys for a futex
549 * @uaddr: virtual address of the futex
550 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
551 * @key: address where result is stored.
552 * @rw: mapping needs to be read/write (values: FUTEX_READ,
555 * Return: a negative error code or 0
557 * The key words are stored in @key on success.
559 * For shared mappings (when @fshared), the key is:
560 * ( inode->i_sequence, page->index, offset_within_page )
561 * [ also see get_inode_sequence_number() ]
563 * For private mappings (or when !@fshared), the key is:
564 * ( current->mm, address, 0 )
566 * This allows (cross process, where applicable) identification of the futex
567 * without keeping the page pinned for the duration of the FUTEX_WAIT.
569 * lock_page() might sleep, the caller should not hold a spinlock.
572 get_futex_key(u32 __user
*uaddr
, int fshared
, union futex_key
*key
, enum futex_access rw
)
574 unsigned long address
= (unsigned long)uaddr
;
575 struct mm_struct
*mm
= current
->mm
;
576 struct page
*page
, *tail
;
577 struct address_space
*mapping
;
581 * The futex address must be "naturally" aligned.
583 key
->both
.offset
= address
% PAGE_SIZE
;
584 if (unlikely((address
% sizeof(u32
)) != 0))
586 address
-= key
->both
.offset
;
588 if (unlikely(!access_ok(uaddr
, sizeof(u32
))))
591 if (unlikely(should_fail_futex(fshared
)))
595 * PROCESS_PRIVATE futexes are fast.
596 * As the mm cannot disappear under us and the 'key' only needs
597 * virtual address, we dont even have to find the underlying vma.
598 * Note : We do have to check 'uaddr' is a valid user address,
599 * but access_ok() should be faster than find_vma()
602 key
->private.mm
= mm
;
603 key
->private.address
= address
;
604 get_futex_key_refs(key
); /* implies smp_mb(); (B) */
609 /* Ignore any VERIFY_READ mapping (futex common case) */
610 if (unlikely(should_fail_futex(fshared
)))
613 err
= get_user_pages_fast(address
, 1, FOLL_WRITE
, &page
);
615 * If write access is not required (eg. FUTEX_WAIT), try
616 * and get read-only access.
618 if (err
== -EFAULT
&& rw
== FUTEX_READ
) {
619 err
= get_user_pages_fast(address
, 1, 0, &page
);
628 * The treatment of mapping from this point on is critical. The page
629 * lock protects many things but in this context the page lock
630 * stabilizes mapping, prevents inode freeing in the shared
631 * file-backed region case and guards against movement to swap cache.
633 * Strictly speaking the page lock is not needed in all cases being
634 * considered here and page lock forces unnecessarily serialization
635 * From this point on, mapping will be re-verified if necessary and
636 * page lock will be acquired only if it is unavoidable
638 * Mapping checks require the head page for any compound page so the
639 * head page and mapping is looked up now. For anonymous pages, it
640 * does not matter if the page splits in the future as the key is
641 * based on the address. For filesystem-backed pages, the tail is
642 * required as the index of the page determines the key. For
643 * base pages, there is no tail page and tail == page.
646 page
= compound_head(page
);
647 mapping
= READ_ONCE(page
->mapping
);
650 * If page->mapping is NULL, then it cannot be a PageAnon
651 * page; but it might be the ZERO_PAGE or in the gate area or
652 * in a special mapping (all cases which we are happy to fail);
653 * or it may have been a good file page when get_user_pages_fast
654 * found it, but truncated or holepunched or subjected to
655 * invalidate_complete_page2 before we got the page lock (also
656 * cases which we are happy to fail). And we hold a reference,
657 * so refcount care in invalidate_complete_page's remove_mapping
658 * prevents drop_caches from setting mapping to NULL beneath us.
660 * The case we do have to guard against is when memory pressure made
661 * shmem_writepage move it from filecache to swapcache beneath us:
662 * an unlikely race, but we do need to retry for page->mapping.
664 if (unlikely(!mapping
)) {
668 * Page lock is required to identify which special case above
669 * applies. If this is really a shmem page then the page lock
670 * will prevent unexpected transitions.
673 shmem_swizzled
= PageSwapCache(page
) || page
->mapping
;
684 * Private mappings are handled in a simple way.
686 * If the futex key is stored on an anonymous page, then the associated
687 * object is the mm which is implicitly pinned by the calling process.
689 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
690 * it's a read-only handle, it's expected that futexes attach to
691 * the object not the particular process.
693 if (PageAnon(page
)) {
695 * A RO anonymous page will never change and thus doesn't make
696 * sense for futex operations.
698 if (unlikely(should_fail_futex(fshared
)) || ro
) {
703 key
->both
.offset
|= FUT_OFF_MMSHARED
; /* ref taken on mm */
704 key
->private.mm
= mm
;
705 key
->private.address
= address
;
711 * The associated futex object in this case is the inode and
712 * the page->mapping must be traversed. Ordinarily this should
713 * be stabilised under page lock but it's not strictly
714 * necessary in this case as we just want to pin the inode, not
715 * update the radix tree or anything like that.
717 * The RCU read lock is taken as the inode is finally freed
718 * under RCU. If the mapping still matches expectations then the
719 * mapping->host can be safely accessed as being a valid inode.
723 if (READ_ONCE(page
->mapping
) != mapping
) {
730 inode
= READ_ONCE(mapping
->host
);
738 key
->both
.offset
|= FUT_OFF_INODE
; /* inode-based key */
739 key
->shared
.i_seq
= get_inode_sequence_number(inode
);
740 key
->shared
.pgoff
= basepage_index(tail
);
744 get_futex_key_refs(key
); /* implies smp_mb(); (B) */
751 static inline void put_futex_key(union futex_key
*key
)
753 drop_futex_key_refs(key
);
757 * fault_in_user_writeable() - Fault in user address and verify RW access
758 * @uaddr: pointer to faulting user space address
760 * Slow path to fixup the fault we just took in the atomic write
763 * We have no generic implementation of a non-destructive write to the
764 * user address. We know that we faulted in the atomic pagefault
765 * disabled section so we can as well avoid the #PF overhead by
766 * calling get_user_pages() right away.
768 static int fault_in_user_writeable(u32 __user
*uaddr
)
770 struct mm_struct
*mm
= current
->mm
;
773 down_read(&mm
->mmap_sem
);
774 ret
= fixup_user_fault(current
, mm
, (unsigned long)uaddr
,
775 FAULT_FLAG_WRITE
, NULL
);
776 up_read(&mm
->mmap_sem
);
778 return ret
< 0 ? ret
: 0;
782 * futex_top_waiter() - Return the highest priority waiter on a futex
783 * @hb: the hash bucket the futex_q's reside in
784 * @key: the futex key (to distinguish it from other futex futex_q's)
786 * Must be called with the hb lock held.
788 static struct futex_q
*futex_top_waiter(struct futex_hash_bucket
*hb
,
789 union futex_key
*key
)
791 struct futex_q
*this;
793 plist_for_each_entry(this, &hb
->chain
, list
) {
794 if (match_futex(&this->key
, key
))
800 static int cmpxchg_futex_value_locked(u32
*curval
, u32 __user
*uaddr
,
801 u32 uval
, u32 newval
)
806 ret
= futex_atomic_cmpxchg_inatomic(curval
, uaddr
, uval
, newval
);
812 static int get_futex_value_locked(u32
*dest
, u32 __user
*from
)
817 ret
= __get_user(*dest
, from
);
820 return ret
? -EFAULT
: 0;
827 static int refill_pi_state_cache(void)
829 struct futex_pi_state
*pi_state
;
831 if (likely(current
->pi_state_cache
))
834 pi_state
= kzalloc(sizeof(*pi_state
), GFP_KERNEL
);
839 INIT_LIST_HEAD(&pi_state
->list
);
840 /* pi_mutex gets initialized later */
841 pi_state
->owner
= NULL
;
842 refcount_set(&pi_state
->refcount
, 1);
843 pi_state
->key
= FUTEX_KEY_INIT
;
845 current
->pi_state_cache
= pi_state
;
850 static struct futex_pi_state
*alloc_pi_state(void)
852 struct futex_pi_state
*pi_state
= current
->pi_state_cache
;
855 current
->pi_state_cache
= NULL
;
860 static void get_pi_state(struct futex_pi_state
*pi_state
)
862 WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state
->refcount
));
866 * Drops a reference to the pi_state object and frees or caches it
867 * when the last reference is gone.
869 static void put_pi_state(struct futex_pi_state
*pi_state
)
874 if (!refcount_dec_and_test(&pi_state
->refcount
))
878 * If pi_state->owner is NULL, the owner is most probably dying
879 * and has cleaned up the pi_state already
881 if (pi_state
->owner
) {
882 struct task_struct
*owner
;
884 raw_spin_lock_irq(&pi_state
->pi_mutex
.wait_lock
);
885 owner
= pi_state
->owner
;
887 raw_spin_lock(&owner
->pi_lock
);
888 list_del_init(&pi_state
->list
);
889 raw_spin_unlock(&owner
->pi_lock
);
891 rt_mutex_proxy_unlock(&pi_state
->pi_mutex
, owner
);
892 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
895 if (current
->pi_state_cache
) {
899 * pi_state->list is already empty.
900 * clear pi_state->owner.
901 * refcount is at 0 - put it back to 1.
903 pi_state
->owner
= NULL
;
904 refcount_set(&pi_state
->refcount
, 1);
905 current
->pi_state_cache
= pi_state
;
909 #ifdef CONFIG_FUTEX_PI
912 * This task is holding PI mutexes at exit time => bad.
913 * Kernel cleans up PI-state, but userspace is likely hosed.
914 * (Robust-futex cleanup is separate and might save the day for userspace.)
916 static void exit_pi_state_list(struct task_struct
*curr
)
918 struct list_head
*next
, *head
= &curr
->pi_state_list
;
919 struct futex_pi_state
*pi_state
;
920 struct futex_hash_bucket
*hb
;
921 union futex_key key
= FUTEX_KEY_INIT
;
923 if (!futex_cmpxchg_enabled
)
926 * We are a ZOMBIE and nobody can enqueue itself on
927 * pi_state_list anymore, but we have to be careful
928 * versus waiters unqueueing themselves:
930 raw_spin_lock_irq(&curr
->pi_lock
);
931 while (!list_empty(head
)) {
933 pi_state
= list_entry(next
, struct futex_pi_state
, list
);
935 hb
= hash_futex(&key
);
938 * We can race against put_pi_state() removing itself from the
939 * list (a waiter going away). put_pi_state() will first
940 * decrement the reference count and then modify the list, so
941 * its possible to see the list entry but fail this reference
944 * In that case; drop the locks to let put_pi_state() make
945 * progress and retry the loop.
947 if (!refcount_inc_not_zero(&pi_state
->refcount
)) {
948 raw_spin_unlock_irq(&curr
->pi_lock
);
950 raw_spin_lock_irq(&curr
->pi_lock
);
953 raw_spin_unlock_irq(&curr
->pi_lock
);
955 spin_lock(&hb
->lock
);
956 raw_spin_lock_irq(&pi_state
->pi_mutex
.wait_lock
);
957 raw_spin_lock(&curr
->pi_lock
);
959 * We dropped the pi-lock, so re-check whether this
960 * task still owns the PI-state:
962 if (head
->next
!= next
) {
963 /* retain curr->pi_lock for the loop invariant */
964 raw_spin_unlock(&pi_state
->pi_mutex
.wait_lock
);
965 spin_unlock(&hb
->lock
);
966 put_pi_state(pi_state
);
970 WARN_ON(pi_state
->owner
!= curr
);
971 WARN_ON(list_empty(&pi_state
->list
));
972 list_del_init(&pi_state
->list
);
973 pi_state
->owner
= NULL
;
975 raw_spin_unlock(&curr
->pi_lock
);
976 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
977 spin_unlock(&hb
->lock
);
979 rt_mutex_futex_unlock(&pi_state
->pi_mutex
);
980 put_pi_state(pi_state
);
982 raw_spin_lock_irq(&curr
->pi_lock
);
984 raw_spin_unlock_irq(&curr
->pi_lock
);
987 static inline void exit_pi_state_list(struct task_struct
*curr
) { }
991 * We need to check the following states:
993 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
995 * [1] NULL | --- | --- | 0 | 0/1 | Valid
996 * [2] NULL | --- | --- | >0 | 0/1 | Valid
998 * [3] Found | NULL | -- | Any | 0/1 | Invalid
1000 * [4] Found | Found | NULL | 0 | 1 | Valid
1001 * [5] Found | Found | NULL | >0 | 1 | Invalid
1003 * [6] Found | Found | task | 0 | 1 | Valid
1005 * [7] Found | Found | NULL | Any | 0 | Invalid
1007 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
1008 * [9] Found | Found | task | 0 | 0 | Invalid
1009 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
1011 * [1] Indicates that the kernel can acquire the futex atomically. We
1012 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
1014 * [2] Valid, if TID does not belong to a kernel thread. If no matching
1015 * thread is found then it indicates that the owner TID has died.
1017 * [3] Invalid. The waiter is queued on a non PI futex
1019 * [4] Valid state after exit_robust_list(), which sets the user space
1020 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
1022 * [5] The user space value got manipulated between exit_robust_list()
1023 * and exit_pi_state_list()
1025 * [6] Valid state after exit_pi_state_list() which sets the new owner in
1026 * the pi_state but cannot access the user space value.
1028 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
1030 * [8] Owner and user space value match
1032 * [9] There is no transient state which sets the user space TID to 0
1033 * except exit_robust_list(), but this is indicated by the
1034 * FUTEX_OWNER_DIED bit. See [4]
1036 * [10] There is no transient state which leaves owner and user space
1040 * Serialization and lifetime rules:
1044 * hb -> futex_q, relation
1045 * futex_q -> pi_state, relation
1047 * (cannot be raw because hb can contain arbitrary amount
1050 * pi_mutex->wait_lock:
1054 * (and pi_mutex 'obviously')
1058 * p->pi_state_list -> pi_state->list, relation
1060 * pi_state->refcount:
1068 * pi_mutex->wait_lock
1074 * Validate that the existing waiter has a pi_state and sanity check
1075 * the pi_state against the user space value. If correct, attach to
1078 static int attach_to_pi_state(u32 __user
*uaddr
, u32 uval
,
1079 struct futex_pi_state
*pi_state
,
1080 struct futex_pi_state
**ps
)
1082 pid_t pid
= uval
& FUTEX_TID_MASK
;
1087 * Userspace might have messed up non-PI and PI futexes [3]
1089 if (unlikely(!pi_state
))
1093 * We get here with hb->lock held, and having found a
1094 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1095 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1096 * which in turn means that futex_lock_pi() still has a reference on
1099 * The waiter holding a reference on @pi_state also protects against
1100 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1101 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1102 * free pi_state before we can take a reference ourselves.
1104 WARN_ON(!refcount_read(&pi_state
->refcount
));
1107 * Now that we have a pi_state, we can acquire wait_lock
1108 * and do the state validation.
1110 raw_spin_lock_irq(&pi_state
->pi_mutex
.wait_lock
);
1113 * Since {uval, pi_state} is serialized by wait_lock, and our current
1114 * uval was read without holding it, it can have changed. Verify it
1115 * still is what we expect it to be, otherwise retry the entire
1118 if (get_futex_value_locked(&uval2
, uaddr
))
1125 * Handle the owner died case:
1127 if (uval
& FUTEX_OWNER_DIED
) {
1129 * exit_pi_state_list sets owner to NULL and wakes the
1130 * topmost waiter. The task which acquires the
1131 * pi_state->rt_mutex will fixup owner.
1133 if (!pi_state
->owner
) {
1135 * No pi state owner, but the user space TID
1136 * is not 0. Inconsistent state. [5]
1141 * Take a ref on the state and return success. [4]
1147 * If TID is 0, then either the dying owner has not
1148 * yet executed exit_pi_state_list() or some waiter
1149 * acquired the rtmutex in the pi state, but did not
1150 * yet fixup the TID in user space.
1152 * Take a ref on the state and return success. [6]
1158 * If the owner died bit is not set, then the pi_state
1159 * must have an owner. [7]
1161 if (!pi_state
->owner
)
1166 * Bail out if user space manipulated the futex value. If pi
1167 * state exists then the owner TID must be the same as the
1168 * user space TID. [9/10]
1170 if (pid
!= task_pid_vnr(pi_state
->owner
))
1174 get_pi_state(pi_state
);
1175 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
1192 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
1197 * wait_for_owner_exiting - Block until the owner has exited
1198 * @exiting: Pointer to the exiting task
1200 * Caller must hold a refcount on @exiting.
1202 static void wait_for_owner_exiting(int ret
, struct task_struct
*exiting
)
1204 if (ret
!= -EBUSY
) {
1205 WARN_ON_ONCE(exiting
);
1209 if (WARN_ON_ONCE(ret
== -EBUSY
&& !exiting
))
1212 mutex_lock(&exiting
->futex_exit_mutex
);
1214 * No point in doing state checking here. If the waiter got here
1215 * while the task was in exec()->exec_futex_release() then it can
1216 * have any FUTEX_STATE_* value when the waiter has acquired the
1217 * mutex. OK, if running, EXITING or DEAD if it reached exit()
1218 * already. Highly unlikely and not a problem. Just one more round
1219 * through the futex maze.
1221 mutex_unlock(&exiting
->futex_exit_mutex
);
1223 put_task_struct(exiting
);
1226 static int handle_exit_race(u32 __user
*uaddr
, u32 uval
,
1227 struct task_struct
*tsk
)
1232 * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
1233 * caller that the alleged owner is busy.
1235 if (tsk
&& tsk
->futex_state
!= FUTEX_STATE_DEAD
)
1239 * Reread the user space value to handle the following situation:
1243 * sys_exit() sys_futex()
1244 * do_exit() futex_lock_pi()
1245 * futex_lock_pi_atomic()
1246 * exit_signals(tsk) No waiters:
1247 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1248 * mm_release(tsk) Set waiter bit
1249 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1250 * Set owner died attach_to_pi_owner() {
1251 * *uaddr = 0xC0000000; tsk = get_task(PID);
1252 * } if (!tsk->flags & PF_EXITING) {
1254 * tsk->futex_state = } else {
1255 * FUTEX_STATE_DEAD; if (tsk->futex_state !=
1258 * return -ESRCH; <--- FAIL
1261 * Returning ESRCH unconditionally is wrong here because the
1262 * user space value has been changed by the exiting task.
1264 * The same logic applies to the case where the exiting task is
1267 if (get_futex_value_locked(&uval2
, uaddr
))
1270 /* If the user space value has changed, try again. */
1275 * The exiting task did not have a robust list, the robust list was
1276 * corrupted or the user space value in *uaddr is simply bogus.
1277 * Give up and tell user space.
1283 * Lookup the task for the TID provided from user space and attach to
1284 * it after doing proper sanity checks.
1286 static int attach_to_pi_owner(u32 __user
*uaddr
, u32 uval
, union futex_key
*key
,
1287 struct futex_pi_state
**ps
,
1288 struct task_struct
**exiting
)
1290 pid_t pid
= uval
& FUTEX_TID_MASK
;
1291 struct futex_pi_state
*pi_state
;
1292 struct task_struct
*p
;
1295 * We are the first waiter - try to look up the real owner and attach
1296 * the new pi_state to it, but bail out when TID = 0 [1]
1298 * The !pid check is paranoid. None of the call sites should end up
1299 * with pid == 0, but better safe than sorry. Let the caller retry
1303 p
= find_get_task_by_vpid(pid
);
1305 return handle_exit_race(uaddr
, uval
, NULL
);
1307 if (unlikely(p
->flags
& PF_KTHREAD
)) {
1313 * We need to look at the task state to figure out, whether the
1314 * task is exiting. To protect against the change of the task state
1315 * in futex_exit_release(), we do this protected by p->pi_lock:
1317 raw_spin_lock_irq(&p
->pi_lock
);
1318 if (unlikely(p
->futex_state
!= FUTEX_STATE_OK
)) {
1320 * The task is on the way out. When the futex state is
1321 * FUTEX_STATE_DEAD, we know that the task has finished
1324 int ret
= handle_exit_race(uaddr
, uval
, p
);
1326 raw_spin_unlock_irq(&p
->pi_lock
);
1328 * If the owner task is between FUTEX_STATE_EXITING and
1329 * FUTEX_STATE_DEAD then store the task pointer and keep
1330 * the reference on the task struct. The calling code will
1331 * drop all locks, wait for the task to reach
1332 * FUTEX_STATE_DEAD and then drop the refcount. This is
1333 * required to prevent a live lock when the current task
1334 * preempted the exiting task between the two states.
1344 * No existing pi state. First waiter. [2]
1346 * This creates pi_state, we have hb->lock held, this means nothing can
1347 * observe this state, wait_lock is irrelevant.
1349 pi_state
= alloc_pi_state();
1352 * Initialize the pi_mutex in locked state and make @p
1355 rt_mutex_init_proxy_locked(&pi_state
->pi_mutex
, p
);
1357 /* Store the key for possible exit cleanups: */
1358 pi_state
->key
= *key
;
1360 WARN_ON(!list_empty(&pi_state
->list
));
1361 list_add(&pi_state
->list
, &p
->pi_state_list
);
1363 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1364 * because there is no concurrency as the object is not published yet.
1366 pi_state
->owner
= p
;
1367 raw_spin_unlock_irq(&p
->pi_lock
);
1376 static int lookup_pi_state(u32 __user
*uaddr
, u32 uval
,
1377 struct futex_hash_bucket
*hb
,
1378 union futex_key
*key
, struct futex_pi_state
**ps
,
1379 struct task_struct
**exiting
)
1381 struct futex_q
*top_waiter
= futex_top_waiter(hb
, key
);
1384 * If there is a waiter on that futex, validate it and
1385 * attach to the pi_state when the validation succeeds.
1388 return attach_to_pi_state(uaddr
, uval
, top_waiter
->pi_state
, ps
);
1391 * We are the first waiter - try to look up the owner based on
1392 * @uval and attach to it.
1394 return attach_to_pi_owner(uaddr
, uval
, key
, ps
, exiting
);
1397 static int lock_pi_update_atomic(u32 __user
*uaddr
, u32 uval
, u32 newval
)
1400 u32
uninitialized_var(curval
);
1402 if (unlikely(should_fail_futex(true)))
1405 err
= cmpxchg_futex_value_locked(&curval
, uaddr
, uval
, newval
);
1409 /* If user space value changed, let the caller retry */
1410 return curval
!= uval
? -EAGAIN
: 0;
1414 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1415 * @uaddr: the pi futex user address
1416 * @hb: the pi futex hash bucket
1417 * @key: the futex key associated with uaddr and hb
1418 * @ps: the pi_state pointer where we store the result of the
1420 * @task: the task to perform the atomic lock work for. This will
1421 * be "current" except in the case of requeue pi.
1422 * @exiting: Pointer to store the task pointer of the owner task
1423 * which is in the middle of exiting
1424 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1427 * - 0 - ready to wait;
1428 * - 1 - acquired the lock;
1431 * The hb->lock and futex_key refs shall be held by the caller.
1433 * @exiting is only set when the return value is -EBUSY. If so, this holds
1434 * a refcount on the exiting task on return and the caller needs to drop it
1435 * after waiting for the exit to complete.
1437 static int futex_lock_pi_atomic(u32 __user
*uaddr
, struct futex_hash_bucket
*hb
,
1438 union futex_key
*key
,
1439 struct futex_pi_state
**ps
,
1440 struct task_struct
*task
,
1441 struct task_struct
**exiting
,
1444 u32 uval
, newval
, vpid
= task_pid_vnr(task
);
1445 struct futex_q
*top_waiter
;
1449 * Read the user space value first so we can validate a few
1450 * things before proceeding further.
1452 if (get_futex_value_locked(&uval
, uaddr
))
1455 if (unlikely(should_fail_futex(true)))
1461 if ((unlikely((uval
& FUTEX_TID_MASK
) == vpid
)))
1464 if ((unlikely(should_fail_futex(true))))
1468 * Lookup existing state first. If it exists, try to attach to
1471 top_waiter
= futex_top_waiter(hb
, key
);
1473 return attach_to_pi_state(uaddr
, uval
, top_waiter
->pi_state
, ps
);
1476 * No waiter and user TID is 0. We are here because the
1477 * waiters or the owner died bit is set or called from
1478 * requeue_cmp_pi or for whatever reason something took the
1481 if (!(uval
& FUTEX_TID_MASK
)) {
1483 * We take over the futex. No other waiters and the user space
1484 * TID is 0. We preserve the owner died bit.
1486 newval
= uval
& FUTEX_OWNER_DIED
;
1489 /* The futex requeue_pi code can enforce the waiters bit */
1491 newval
|= FUTEX_WAITERS
;
1493 ret
= lock_pi_update_atomic(uaddr
, uval
, newval
);
1494 /* If the take over worked, return 1 */
1495 return ret
< 0 ? ret
: 1;
1499 * First waiter. Set the waiters bit before attaching ourself to
1500 * the owner. If owner tries to unlock, it will be forced into
1501 * the kernel and blocked on hb->lock.
1503 newval
= uval
| FUTEX_WAITERS
;
1504 ret
= lock_pi_update_atomic(uaddr
, uval
, newval
);
1508 * If the update of the user space value succeeded, we try to
1509 * attach to the owner. If that fails, no harm done, we only
1510 * set the FUTEX_WAITERS bit in the user space variable.
1512 return attach_to_pi_owner(uaddr
, newval
, key
, ps
, exiting
);
1516 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1517 * @q: The futex_q to unqueue
1519 * The q->lock_ptr must not be NULL and must be held by the caller.
1521 static void __unqueue_futex(struct futex_q
*q
)
1523 struct futex_hash_bucket
*hb
;
1525 if (WARN_ON_SMP(!q
->lock_ptr
) || WARN_ON(plist_node_empty(&q
->list
)))
1527 lockdep_assert_held(q
->lock_ptr
);
1529 hb
= container_of(q
->lock_ptr
, struct futex_hash_bucket
, lock
);
1530 plist_del(&q
->list
, &hb
->chain
);
1535 * The hash bucket lock must be held when this is called.
1536 * Afterwards, the futex_q must not be accessed. Callers
1537 * must ensure to later call wake_up_q() for the actual
1540 static void mark_wake_futex(struct wake_q_head
*wake_q
, struct futex_q
*q
)
1542 struct task_struct
*p
= q
->task
;
1544 if (WARN(q
->pi_state
|| q
->rt_waiter
, "refusing to wake PI futex\n"))
1550 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1551 * is written, without taking any locks. This is possible in the event
1552 * of a spurious wakeup, for example. A memory barrier is required here
1553 * to prevent the following store to lock_ptr from getting ahead of the
1554 * plist_del in __unqueue_futex().
1556 smp_store_release(&q
->lock_ptr
, NULL
);
1559 * Queue the task for later wakeup for after we've released
1560 * the hb->lock. wake_q_add() grabs reference to p.
1562 wake_q_add_safe(wake_q
, p
);
1566 * Caller must hold a reference on @pi_state.
1568 static int wake_futex_pi(u32 __user
*uaddr
, u32 uval
, struct futex_pi_state
*pi_state
)
1570 u32
uninitialized_var(curval
), newval
;
1571 struct task_struct
*new_owner
;
1572 bool postunlock
= false;
1573 DEFINE_WAKE_Q(wake_q
);
1576 new_owner
= rt_mutex_next_owner(&pi_state
->pi_mutex
);
1577 if (WARN_ON_ONCE(!new_owner
)) {
1579 * As per the comment in futex_unlock_pi() this should not happen.
1581 * When this happens, give up our locks and try again, giving
1582 * the futex_lock_pi() instance time to complete, either by
1583 * waiting on the rtmutex or removing itself from the futex
1591 * We pass it to the next owner. The WAITERS bit is always kept
1592 * enabled while there is PI state around. We cleanup the owner
1593 * died bit, because we are the owner.
1595 newval
= FUTEX_WAITERS
| task_pid_vnr(new_owner
);
1597 if (unlikely(should_fail_futex(true)))
1600 ret
= cmpxchg_futex_value_locked(&curval
, uaddr
, uval
, newval
);
1601 if (!ret
&& (curval
!= uval
)) {
1603 * If a unconditional UNLOCK_PI operation (user space did not
1604 * try the TID->0 transition) raced with a waiter setting the
1605 * FUTEX_WAITERS flag between get_user() and locking the hash
1606 * bucket lock, retry the operation.
1608 if ((FUTEX_TID_MASK
& curval
) == uval
)
1618 * This is a point of no return; once we modify the uval there is no
1619 * going back and subsequent operations must not fail.
1622 raw_spin_lock(&pi_state
->owner
->pi_lock
);
1623 WARN_ON(list_empty(&pi_state
->list
));
1624 list_del_init(&pi_state
->list
);
1625 raw_spin_unlock(&pi_state
->owner
->pi_lock
);
1627 raw_spin_lock(&new_owner
->pi_lock
);
1628 WARN_ON(!list_empty(&pi_state
->list
));
1629 list_add(&pi_state
->list
, &new_owner
->pi_state_list
);
1630 pi_state
->owner
= new_owner
;
1631 raw_spin_unlock(&new_owner
->pi_lock
);
1633 postunlock
= __rt_mutex_futex_unlock(&pi_state
->pi_mutex
, &wake_q
);
1636 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
1639 rt_mutex_postunlock(&wake_q
);
1645 * Express the locking dependencies for lockdep:
1648 double_lock_hb(struct futex_hash_bucket
*hb1
, struct futex_hash_bucket
*hb2
)
1651 spin_lock(&hb1
->lock
);
1653 spin_lock_nested(&hb2
->lock
, SINGLE_DEPTH_NESTING
);
1654 } else { /* hb1 > hb2 */
1655 spin_lock(&hb2
->lock
);
1656 spin_lock_nested(&hb1
->lock
, SINGLE_DEPTH_NESTING
);
1661 double_unlock_hb(struct futex_hash_bucket
*hb1
, struct futex_hash_bucket
*hb2
)
1663 spin_unlock(&hb1
->lock
);
1665 spin_unlock(&hb2
->lock
);
1669 * Wake up waiters matching bitset queued on this futex (uaddr).
1672 futex_wake(u32 __user
*uaddr
, unsigned int flags
, int nr_wake
, u32 bitset
)
1674 struct futex_hash_bucket
*hb
;
1675 struct futex_q
*this, *next
;
1676 union futex_key key
= FUTEX_KEY_INIT
;
1678 DEFINE_WAKE_Q(wake_q
);
1683 ret
= get_futex_key(uaddr
, flags
& FLAGS_SHARED
, &key
, FUTEX_READ
);
1684 if (unlikely(ret
!= 0))
1687 hb
= hash_futex(&key
);
1689 /* Make sure we really have tasks to wakeup */
1690 if (!hb_waiters_pending(hb
))
1693 spin_lock(&hb
->lock
);
1695 plist_for_each_entry_safe(this, next
, &hb
->chain
, list
) {
1696 if (match_futex (&this->key
, &key
)) {
1697 if (this->pi_state
|| this->rt_waiter
) {
1702 /* Check if one of the bits is set in both bitsets */
1703 if (!(this->bitset
& bitset
))
1706 mark_wake_futex(&wake_q
, this);
1707 if (++ret
>= nr_wake
)
1712 spin_unlock(&hb
->lock
);
1715 put_futex_key(&key
);
1720 static int futex_atomic_op_inuser(unsigned int encoded_op
, u32 __user
*uaddr
)
1722 unsigned int op
= (encoded_op
& 0x70000000) >> 28;
1723 unsigned int cmp
= (encoded_op
& 0x0f000000) >> 24;
1724 int oparg
= sign_extend32((encoded_op
& 0x00fff000) >> 12, 11);
1725 int cmparg
= sign_extend32(encoded_op
& 0x00000fff, 11);
1728 if (encoded_op
& (FUTEX_OP_OPARG_SHIFT
<< 28)) {
1729 if (oparg
< 0 || oparg
> 31) {
1730 char comm
[sizeof(current
->comm
)];
1732 * kill this print and return -EINVAL when userspace
1735 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1736 get_task_comm(comm
, current
), oparg
);
1742 if (!access_ok(uaddr
, sizeof(u32
)))
1745 ret
= arch_futex_atomic_op_inuser(op
, oparg
, &oldval
, uaddr
);
1750 case FUTEX_OP_CMP_EQ
:
1751 return oldval
== cmparg
;
1752 case FUTEX_OP_CMP_NE
:
1753 return oldval
!= cmparg
;
1754 case FUTEX_OP_CMP_LT
:
1755 return oldval
< cmparg
;
1756 case FUTEX_OP_CMP_GE
:
1757 return oldval
>= cmparg
;
1758 case FUTEX_OP_CMP_LE
:
1759 return oldval
<= cmparg
;
1760 case FUTEX_OP_CMP_GT
:
1761 return oldval
> cmparg
;
1768 * Wake up all waiters hashed on the physical page that is mapped
1769 * to this virtual address:
1772 futex_wake_op(u32 __user
*uaddr1
, unsigned int flags
, u32 __user
*uaddr2
,
1773 int nr_wake
, int nr_wake2
, int op
)
1775 union futex_key key1
= FUTEX_KEY_INIT
, key2
= FUTEX_KEY_INIT
;
1776 struct futex_hash_bucket
*hb1
, *hb2
;
1777 struct futex_q
*this, *next
;
1779 DEFINE_WAKE_Q(wake_q
);
1782 ret
= get_futex_key(uaddr1
, flags
& FLAGS_SHARED
, &key1
, FUTEX_READ
);
1783 if (unlikely(ret
!= 0))
1785 ret
= get_futex_key(uaddr2
, flags
& FLAGS_SHARED
, &key2
, FUTEX_WRITE
);
1786 if (unlikely(ret
!= 0))
1789 hb1
= hash_futex(&key1
);
1790 hb2
= hash_futex(&key2
);
1793 double_lock_hb(hb1
, hb2
);
1794 op_ret
= futex_atomic_op_inuser(op
, uaddr2
);
1795 if (unlikely(op_ret
< 0)) {
1796 double_unlock_hb(hb1
, hb2
);
1798 if (!IS_ENABLED(CONFIG_MMU
) ||
1799 unlikely(op_ret
!= -EFAULT
&& op_ret
!= -EAGAIN
)) {
1801 * we don't get EFAULT from MMU faults if we don't have
1802 * an MMU, but we might get them from range checking
1808 if (op_ret
== -EFAULT
) {
1809 ret
= fault_in_user_writeable(uaddr2
);
1814 if (!(flags
& FLAGS_SHARED
)) {
1819 put_futex_key(&key2
);
1820 put_futex_key(&key1
);
1825 plist_for_each_entry_safe(this, next
, &hb1
->chain
, list
) {
1826 if (match_futex (&this->key
, &key1
)) {
1827 if (this->pi_state
|| this->rt_waiter
) {
1831 mark_wake_futex(&wake_q
, this);
1832 if (++ret
>= nr_wake
)
1839 plist_for_each_entry_safe(this, next
, &hb2
->chain
, list
) {
1840 if (match_futex (&this->key
, &key2
)) {
1841 if (this->pi_state
|| this->rt_waiter
) {
1845 mark_wake_futex(&wake_q
, this);
1846 if (++op_ret
>= nr_wake2
)
1854 double_unlock_hb(hb1
, hb2
);
1857 put_futex_key(&key2
);
1859 put_futex_key(&key1
);
1865 * requeue_futex() - Requeue a futex_q from one hb to another
1866 * @q: the futex_q to requeue
1867 * @hb1: the source hash_bucket
1868 * @hb2: the target hash_bucket
1869 * @key2: the new key for the requeued futex_q
1872 void requeue_futex(struct futex_q
*q
, struct futex_hash_bucket
*hb1
,
1873 struct futex_hash_bucket
*hb2
, union futex_key
*key2
)
1877 * If key1 and key2 hash to the same bucket, no need to
1880 if (likely(&hb1
->chain
!= &hb2
->chain
)) {
1881 plist_del(&q
->list
, &hb1
->chain
);
1882 hb_waiters_dec(hb1
);
1883 hb_waiters_inc(hb2
);
1884 plist_add(&q
->list
, &hb2
->chain
);
1885 q
->lock_ptr
= &hb2
->lock
;
1887 get_futex_key_refs(key2
);
1892 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1894 * @key: the key of the requeue target futex
1895 * @hb: the hash_bucket of the requeue target futex
1897 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1898 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1899 * to the requeue target futex so the waiter can detect the wakeup on the right
1900 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1901 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1902 * to protect access to the pi_state to fixup the owner later. Must be called
1903 * with both q->lock_ptr and hb->lock held.
1906 void requeue_pi_wake_futex(struct futex_q
*q
, union futex_key
*key
,
1907 struct futex_hash_bucket
*hb
)
1909 get_futex_key_refs(key
);
1914 WARN_ON(!q
->rt_waiter
);
1915 q
->rt_waiter
= NULL
;
1917 q
->lock_ptr
= &hb
->lock
;
1919 wake_up_state(q
->task
, TASK_NORMAL
);
1923 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1924 * @pifutex: the user address of the to futex
1925 * @hb1: the from futex hash bucket, must be locked by the caller
1926 * @hb2: the to futex hash bucket, must be locked by the caller
1927 * @key1: the from futex key
1928 * @key2: the to futex key
1929 * @ps: address to store the pi_state pointer
1930 * @exiting: Pointer to store the task pointer of the owner task
1931 * which is in the middle of exiting
1932 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1934 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1935 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1936 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1937 * hb1 and hb2 must be held by the caller.
1939 * @exiting is only set when the return value is -EBUSY. If so, this holds
1940 * a refcount on the exiting task on return and the caller needs to drop it
1941 * after waiting for the exit to complete.
1944 * - 0 - failed to acquire the lock atomically;
1945 * - >0 - acquired the lock, return value is vpid of the top_waiter
1949 futex_proxy_trylock_atomic(u32 __user
*pifutex
, struct futex_hash_bucket
*hb1
,
1950 struct futex_hash_bucket
*hb2
, union futex_key
*key1
,
1951 union futex_key
*key2
, struct futex_pi_state
**ps
,
1952 struct task_struct
**exiting
, int set_waiters
)
1954 struct futex_q
*top_waiter
= NULL
;
1958 if (get_futex_value_locked(&curval
, pifutex
))
1961 if (unlikely(should_fail_futex(true)))
1965 * Find the top_waiter and determine if there are additional waiters.
1966 * If the caller intends to requeue more than 1 waiter to pifutex,
1967 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1968 * as we have means to handle the possible fault. If not, don't set
1969 * the bit unecessarily as it will force the subsequent unlock to enter
1972 top_waiter
= futex_top_waiter(hb1
, key1
);
1974 /* There are no waiters, nothing for us to do. */
1978 /* Ensure we requeue to the expected futex. */
1979 if (!match_futex(top_waiter
->requeue_pi_key
, key2
))
1983 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1984 * the contended case or if set_waiters is 1. The pi_state is returned
1985 * in ps in contended cases.
1987 vpid
= task_pid_vnr(top_waiter
->task
);
1988 ret
= futex_lock_pi_atomic(pifutex
, hb2
, key2
, ps
, top_waiter
->task
,
1989 exiting
, set_waiters
);
1991 requeue_pi_wake_futex(top_waiter
, key2
, hb2
);
1998 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1999 * @uaddr1: source futex user address
2000 * @flags: futex flags (FLAGS_SHARED, etc.)
2001 * @uaddr2: target futex user address
2002 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
2003 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
2004 * @cmpval: @uaddr1 expected value (or %NULL)
2005 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
2006 * pi futex (pi to pi requeue is not supported)
2008 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
2009 * uaddr2 atomically on behalf of the top waiter.
2012 * - >=0 - on success, the number of tasks requeued or woken;
2015 static int futex_requeue(u32 __user
*uaddr1
, unsigned int flags
,
2016 u32 __user
*uaddr2
, int nr_wake
, int nr_requeue
,
2017 u32
*cmpval
, int requeue_pi
)
2019 union futex_key key1
= FUTEX_KEY_INIT
, key2
= FUTEX_KEY_INIT
;
2020 int drop_count
= 0, task_count
= 0, ret
;
2021 struct futex_pi_state
*pi_state
= NULL
;
2022 struct futex_hash_bucket
*hb1
, *hb2
;
2023 struct futex_q
*this, *next
;
2024 DEFINE_WAKE_Q(wake_q
);
2026 if (nr_wake
< 0 || nr_requeue
< 0)
2030 * When PI not supported: return -ENOSYS if requeue_pi is true,
2031 * consequently the compiler knows requeue_pi is always false past
2032 * this point which will optimize away all the conditional code
2035 if (!IS_ENABLED(CONFIG_FUTEX_PI
) && requeue_pi
)
2040 * Requeue PI only works on two distinct uaddrs. This
2041 * check is only valid for private futexes. See below.
2043 if (uaddr1
== uaddr2
)
2047 * requeue_pi requires a pi_state, try to allocate it now
2048 * without any locks in case it fails.
2050 if (refill_pi_state_cache())
2053 * requeue_pi must wake as many tasks as it can, up to nr_wake
2054 * + nr_requeue, since it acquires the rt_mutex prior to
2055 * returning to userspace, so as to not leave the rt_mutex with
2056 * waiters and no owner. However, second and third wake-ups
2057 * cannot be predicted as they involve race conditions with the
2058 * first wake and a fault while looking up the pi_state. Both
2059 * pthread_cond_signal() and pthread_cond_broadcast() should
2067 ret
= get_futex_key(uaddr1
, flags
& FLAGS_SHARED
, &key1
, FUTEX_READ
);
2068 if (unlikely(ret
!= 0))
2070 ret
= get_futex_key(uaddr2
, flags
& FLAGS_SHARED
, &key2
,
2071 requeue_pi
? FUTEX_WRITE
: FUTEX_READ
);
2072 if (unlikely(ret
!= 0))
2076 * The check above which compares uaddrs is not sufficient for
2077 * shared futexes. We need to compare the keys:
2079 if (requeue_pi
&& match_futex(&key1
, &key2
)) {
2084 hb1
= hash_futex(&key1
);
2085 hb2
= hash_futex(&key2
);
2088 hb_waiters_inc(hb2
);
2089 double_lock_hb(hb1
, hb2
);
2091 if (likely(cmpval
!= NULL
)) {
2094 ret
= get_futex_value_locked(&curval
, uaddr1
);
2096 if (unlikely(ret
)) {
2097 double_unlock_hb(hb1
, hb2
);
2098 hb_waiters_dec(hb2
);
2100 ret
= get_user(curval
, uaddr1
);
2104 if (!(flags
& FLAGS_SHARED
))
2107 put_futex_key(&key2
);
2108 put_futex_key(&key1
);
2111 if (curval
!= *cmpval
) {
2117 if (requeue_pi
&& (task_count
- nr_wake
< nr_requeue
)) {
2118 struct task_struct
*exiting
= NULL
;
2121 * Attempt to acquire uaddr2 and wake the top waiter. If we
2122 * intend to requeue waiters, force setting the FUTEX_WAITERS
2123 * bit. We force this here where we are able to easily handle
2124 * faults rather in the requeue loop below.
2126 ret
= futex_proxy_trylock_atomic(uaddr2
, hb1
, hb2
, &key1
,
2128 &exiting
, nr_requeue
);
2131 * At this point the top_waiter has either taken uaddr2 or is
2132 * waiting on it. If the former, then the pi_state will not
2133 * exist yet, look it up one more time to ensure we have a
2134 * reference to it. If the lock was taken, ret contains the
2135 * vpid of the top waiter task.
2136 * If the lock was not taken, we have pi_state and an initial
2137 * refcount on it. In case of an error we have nothing.
2144 * If we acquired the lock, then the user space value
2145 * of uaddr2 should be vpid. It cannot be changed by
2146 * the top waiter as it is blocked on hb2 lock if it
2147 * tries to do so. If something fiddled with it behind
2148 * our back the pi state lookup might unearth it. So
2149 * we rather use the known value than rereading and
2150 * handing potential crap to lookup_pi_state.
2152 * If that call succeeds then we have pi_state and an
2153 * initial refcount on it.
2155 ret
= lookup_pi_state(uaddr2
, ret
, hb2
, &key2
,
2156 &pi_state
, &exiting
);
2161 /* We hold a reference on the pi state. */
2164 /* If the above failed, then pi_state is NULL */
2166 double_unlock_hb(hb1
, hb2
);
2167 hb_waiters_dec(hb2
);
2168 put_futex_key(&key2
);
2169 put_futex_key(&key1
);
2170 ret
= fault_in_user_writeable(uaddr2
);
2177 * Two reasons for this:
2178 * - EBUSY: Owner is exiting and we just wait for the
2180 * - EAGAIN: The user space value changed.
2182 double_unlock_hb(hb1
, hb2
);
2183 hb_waiters_dec(hb2
);
2184 put_futex_key(&key2
);
2185 put_futex_key(&key1
);
2187 * Handle the case where the owner is in the middle of
2188 * exiting. Wait for the exit to complete otherwise
2189 * this task might loop forever, aka. live lock.
2191 wait_for_owner_exiting(ret
, exiting
);
2199 plist_for_each_entry_safe(this, next
, &hb1
->chain
, list
) {
2200 if (task_count
- nr_wake
>= nr_requeue
)
2203 if (!match_futex(&this->key
, &key1
))
2207 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2208 * be paired with each other and no other futex ops.
2210 * We should never be requeueing a futex_q with a pi_state,
2211 * which is awaiting a futex_unlock_pi().
2213 if ((requeue_pi
&& !this->rt_waiter
) ||
2214 (!requeue_pi
&& this->rt_waiter
) ||
2221 * Wake nr_wake waiters. For requeue_pi, if we acquired the
2222 * lock, we already woke the top_waiter. If not, it will be
2223 * woken by futex_unlock_pi().
2225 if (++task_count
<= nr_wake
&& !requeue_pi
) {
2226 mark_wake_futex(&wake_q
, this);
2230 /* Ensure we requeue to the expected futex for requeue_pi. */
2231 if (requeue_pi
&& !match_futex(this->requeue_pi_key
, &key2
)) {
2237 * Requeue nr_requeue waiters and possibly one more in the case
2238 * of requeue_pi if we couldn't acquire the lock atomically.
2242 * Prepare the waiter to take the rt_mutex. Take a
2243 * refcount on the pi_state and store the pointer in
2244 * the futex_q object of the waiter.
2246 get_pi_state(pi_state
);
2247 this->pi_state
= pi_state
;
2248 ret
= rt_mutex_start_proxy_lock(&pi_state
->pi_mutex
,
2253 * We got the lock. We do neither drop the
2254 * refcount on pi_state nor clear
2255 * this->pi_state because the waiter needs the
2256 * pi_state for cleaning up the user space
2257 * value. It will drop the refcount after
2260 requeue_pi_wake_futex(this, &key2
, hb2
);
2265 * rt_mutex_start_proxy_lock() detected a
2266 * potential deadlock when we tried to queue
2267 * that waiter. Drop the pi_state reference
2268 * which we took above and remove the pointer
2269 * to the state from the waiters futex_q
2272 this->pi_state
= NULL
;
2273 put_pi_state(pi_state
);
2275 * We stop queueing more waiters and let user
2276 * space deal with the mess.
2281 requeue_futex(this, hb1
, hb2
, &key2
);
2286 * We took an extra initial reference to the pi_state either
2287 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2288 * need to drop it here again.
2290 put_pi_state(pi_state
);
2293 double_unlock_hb(hb1
, hb2
);
2295 hb_waiters_dec(hb2
);
2298 * drop_futex_key_refs() must be called outside the spinlocks. During
2299 * the requeue we moved futex_q's from the hash bucket at key1 to the
2300 * one at key2 and updated their key pointer. We no longer need to
2301 * hold the references to key1.
2303 while (--drop_count
>= 0)
2304 drop_futex_key_refs(&key1
);
2307 put_futex_key(&key2
);
2309 put_futex_key(&key1
);
2311 return ret
? ret
: task_count
;
2314 /* The key must be already stored in q->key. */
2315 static inline struct futex_hash_bucket
*queue_lock(struct futex_q
*q
)
2316 __acquires(&hb
->lock
)
2318 struct futex_hash_bucket
*hb
;
2320 hb
= hash_futex(&q
->key
);
2323 * Increment the counter before taking the lock so that
2324 * a potential waker won't miss a to-be-slept task that is
2325 * waiting for the spinlock. This is safe as all queue_lock()
2326 * users end up calling queue_me(). Similarly, for housekeeping,
2327 * decrement the counter at queue_unlock() when some error has
2328 * occurred and we don't end up adding the task to the list.
2330 hb_waiters_inc(hb
); /* implies smp_mb(); (A) */
2332 q
->lock_ptr
= &hb
->lock
;
2334 spin_lock(&hb
->lock
);
2339 queue_unlock(struct futex_hash_bucket
*hb
)
2340 __releases(&hb
->lock
)
2342 spin_unlock(&hb
->lock
);
2346 static inline void __queue_me(struct futex_q
*q
, struct futex_hash_bucket
*hb
)
2351 * The priority used to register this element is
2352 * - either the real thread-priority for the real-time threads
2353 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2354 * - or MAX_RT_PRIO for non-RT threads.
2355 * Thus, all RT-threads are woken first in priority order, and
2356 * the others are woken last, in FIFO order.
2358 prio
= min(current
->normal_prio
, MAX_RT_PRIO
);
2360 plist_node_init(&q
->list
, prio
);
2361 plist_add(&q
->list
, &hb
->chain
);
2366 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2367 * @q: The futex_q to enqueue
2368 * @hb: The destination hash bucket
2370 * The hb->lock must be held by the caller, and is released here. A call to
2371 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2372 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2373 * or nothing if the unqueue is done as part of the wake process and the unqueue
2374 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2377 static inline void queue_me(struct futex_q
*q
, struct futex_hash_bucket
*hb
)
2378 __releases(&hb
->lock
)
2381 spin_unlock(&hb
->lock
);
2385 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2386 * @q: The futex_q to unqueue
2388 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2389 * be paired with exactly one earlier call to queue_me().
2392 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2393 * - 0 - if the futex_q was already removed by the waking thread
2395 static int unqueue_me(struct futex_q
*q
)
2397 spinlock_t
*lock_ptr
;
2400 /* In the common case we don't take the spinlock, which is nice. */
2403 * q->lock_ptr can change between this read and the following spin_lock.
2404 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2405 * optimizing lock_ptr out of the logic below.
2407 lock_ptr
= READ_ONCE(q
->lock_ptr
);
2408 if (lock_ptr
!= NULL
) {
2409 spin_lock(lock_ptr
);
2411 * q->lock_ptr can change between reading it and
2412 * spin_lock(), causing us to take the wrong lock. This
2413 * corrects the race condition.
2415 * Reasoning goes like this: if we have the wrong lock,
2416 * q->lock_ptr must have changed (maybe several times)
2417 * between reading it and the spin_lock(). It can
2418 * change again after the spin_lock() but only if it was
2419 * already changed before the spin_lock(). It cannot,
2420 * however, change back to the original value. Therefore
2421 * we can detect whether we acquired the correct lock.
2423 if (unlikely(lock_ptr
!= q
->lock_ptr
)) {
2424 spin_unlock(lock_ptr
);
2429 BUG_ON(q
->pi_state
);
2431 spin_unlock(lock_ptr
);
2435 drop_futex_key_refs(&q
->key
);
2440 * PI futexes can not be requeued and must remove themself from the
2441 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2444 static void unqueue_me_pi(struct futex_q
*q
)
2445 __releases(q
->lock_ptr
)
2449 BUG_ON(!q
->pi_state
);
2450 put_pi_state(q
->pi_state
);
2453 spin_unlock(q
->lock_ptr
);
2456 static int fixup_pi_state_owner(u32 __user
*uaddr
, struct futex_q
*q
,
2457 struct task_struct
*argowner
)
2459 struct futex_pi_state
*pi_state
= q
->pi_state
;
2460 u32 uval
, uninitialized_var(curval
), newval
;
2461 struct task_struct
*oldowner
, *newowner
;
2465 lockdep_assert_held(q
->lock_ptr
);
2467 raw_spin_lock_irq(&pi_state
->pi_mutex
.wait_lock
);
2469 oldowner
= pi_state
->owner
;
2472 * We are here because either:
2474 * - we stole the lock and pi_state->owner needs updating to reflect
2475 * that (@argowner == current),
2479 * - someone stole our lock and we need to fix things to point to the
2480 * new owner (@argowner == NULL).
2482 * Either way, we have to replace the TID in the user space variable.
2483 * This must be atomic as we have to preserve the owner died bit here.
2485 * Note: We write the user space value _before_ changing the pi_state
2486 * because we can fault here. Imagine swapped out pages or a fork
2487 * that marked all the anonymous memory readonly for cow.
2489 * Modifying pi_state _before_ the user space value would leave the
2490 * pi_state in an inconsistent state when we fault here, because we
2491 * need to drop the locks to handle the fault. This might be observed
2492 * in the PID check in lookup_pi_state.
2496 if (oldowner
!= current
) {
2498 * We raced against a concurrent self; things are
2499 * already fixed up. Nothing to do.
2505 if (__rt_mutex_futex_trylock(&pi_state
->pi_mutex
)) {
2506 /* We got the lock after all, nothing to fix. */
2512 * Since we just failed the trylock; there must be an owner.
2514 newowner
= rt_mutex_owner(&pi_state
->pi_mutex
);
2517 WARN_ON_ONCE(argowner
!= current
);
2518 if (oldowner
== current
) {
2520 * We raced against a concurrent self; things are
2521 * already fixed up. Nothing to do.
2526 newowner
= argowner
;
2529 newtid
= task_pid_vnr(newowner
) | FUTEX_WAITERS
;
2531 if (!pi_state
->owner
)
2532 newtid
|= FUTEX_OWNER_DIED
;
2534 err
= get_futex_value_locked(&uval
, uaddr
);
2539 newval
= (uval
& FUTEX_OWNER_DIED
) | newtid
;
2541 err
= cmpxchg_futex_value_locked(&curval
, uaddr
, uval
, newval
);
2551 * We fixed up user space. Now we need to fix the pi_state
2554 if (pi_state
->owner
!= NULL
) {
2555 raw_spin_lock(&pi_state
->owner
->pi_lock
);
2556 WARN_ON(list_empty(&pi_state
->list
));
2557 list_del_init(&pi_state
->list
);
2558 raw_spin_unlock(&pi_state
->owner
->pi_lock
);
2561 pi_state
->owner
= newowner
;
2563 raw_spin_lock(&newowner
->pi_lock
);
2564 WARN_ON(!list_empty(&pi_state
->list
));
2565 list_add(&pi_state
->list
, &newowner
->pi_state_list
);
2566 raw_spin_unlock(&newowner
->pi_lock
);
2567 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
2572 * In order to reschedule or handle a page fault, we need to drop the
2573 * locks here. In the case of a fault, this gives the other task
2574 * (either the highest priority waiter itself or the task which stole
2575 * the rtmutex) the chance to try the fixup of the pi_state. So once we
2576 * are back from handling the fault we need to check the pi_state after
2577 * reacquiring the locks and before trying to do another fixup. When
2578 * the fixup has been done already we simply return.
2580 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2581 * drop hb->lock since the caller owns the hb -> futex_q relation.
2582 * Dropping the pi_mutex->wait_lock requires the state revalidate.
2585 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
2586 spin_unlock(q
->lock_ptr
);
2590 ret
= fault_in_user_writeable(uaddr
);
2604 spin_lock(q
->lock_ptr
);
2605 raw_spin_lock_irq(&pi_state
->pi_mutex
.wait_lock
);
2608 * Check if someone else fixed it for us:
2610 if (pi_state
->owner
!= oldowner
) {
2621 raw_spin_unlock_irq(&pi_state
->pi_mutex
.wait_lock
);
2625 static long futex_wait_restart(struct restart_block
*restart
);
2628 * fixup_owner() - Post lock pi_state and corner case management
2629 * @uaddr: user address of the futex
2630 * @q: futex_q (contains pi_state and access to the rt_mutex)
2631 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2633 * After attempting to lock an rt_mutex, this function is called to cleanup
2634 * the pi_state owner as well as handle race conditions that may allow us to
2635 * acquire the lock. Must be called with the hb lock held.
2638 * - 1 - success, lock taken;
2639 * - 0 - success, lock not taken;
2640 * - <0 - on error (-EFAULT)
2642 static int fixup_owner(u32 __user
*uaddr
, struct futex_q
*q
, int locked
)
2648 * Got the lock. We might not be the anticipated owner if we
2649 * did a lock-steal - fix up the PI-state in that case:
2651 * Speculative pi_state->owner read (we don't hold wait_lock);
2652 * since we own the lock pi_state->owner == current is the
2653 * stable state, anything else needs more attention.
2655 if (q
->pi_state
->owner
!= current
)
2656 ret
= fixup_pi_state_owner(uaddr
, q
, current
);
2661 * If we didn't get the lock; check if anybody stole it from us. In
2662 * that case, we need to fix up the uval to point to them instead of
2663 * us, otherwise bad things happen. [10]
2665 * Another speculative read; pi_state->owner == current is unstable
2666 * but needs our attention.
2668 if (q
->pi_state
->owner
== current
) {
2669 ret
= fixup_pi_state_owner(uaddr
, q
, NULL
);
2674 * Paranoia check. If we did not take the lock, then we should not be
2675 * the owner of the rt_mutex.
2677 if (rt_mutex_owner(&q
->pi_state
->pi_mutex
) == current
) {
2678 printk(KERN_ERR
"fixup_owner: ret = %d pi-mutex: %p "
2679 "pi-state %p\n", ret
,
2680 q
->pi_state
->pi_mutex
.owner
,
2681 q
->pi_state
->owner
);
2685 return ret
? ret
: locked
;
2689 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2690 * @hb: the futex hash bucket, must be locked by the caller
2691 * @q: the futex_q to queue up on
2692 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2694 static void futex_wait_queue_me(struct futex_hash_bucket
*hb
, struct futex_q
*q
,
2695 struct hrtimer_sleeper
*timeout
)
2698 * The task state is guaranteed to be set before another task can
2699 * wake it. set_current_state() is implemented using smp_store_mb() and
2700 * queue_me() calls spin_unlock() upon completion, both serializing
2701 * access to the hash list and forcing another memory barrier.
2703 set_current_state(TASK_INTERRUPTIBLE
);
2708 hrtimer_sleeper_start_expires(timeout
, HRTIMER_MODE_ABS
);
2711 * If we have been removed from the hash list, then another task
2712 * has tried to wake us, and we can skip the call to schedule().
2714 if (likely(!plist_node_empty(&q
->list
))) {
2716 * If the timer has already expired, current will already be
2717 * flagged for rescheduling. Only call schedule if there
2718 * is no timeout, or if it has yet to expire.
2720 if (!timeout
|| timeout
->task
)
2721 freezable_schedule();
2723 __set_current_state(TASK_RUNNING
);
2727 * futex_wait_setup() - Prepare to wait on a futex
2728 * @uaddr: the futex userspace address
2729 * @val: the expected value
2730 * @flags: futex flags (FLAGS_SHARED, etc.)
2731 * @q: the associated futex_q
2732 * @hb: storage for hash_bucket pointer to be returned to caller
2734 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2735 * compare it with the expected value. Handle atomic faults internally.
2736 * Return with the hb lock held and a q.key reference on success, and unlocked
2737 * with no q.key reference on failure.
2740 * - 0 - uaddr contains val and hb has been locked;
2741 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2743 static int futex_wait_setup(u32 __user
*uaddr
, u32 val
, unsigned int flags
,
2744 struct futex_q
*q
, struct futex_hash_bucket
**hb
)
2750 * Access the page AFTER the hash-bucket is locked.
2751 * Order is important:
2753 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2754 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2756 * The basic logical guarantee of a futex is that it blocks ONLY
2757 * if cond(var) is known to be true at the time of blocking, for
2758 * any cond. If we locked the hash-bucket after testing *uaddr, that
2759 * would open a race condition where we could block indefinitely with
2760 * cond(var) false, which would violate the guarantee.
2762 * On the other hand, we insert q and release the hash-bucket only
2763 * after testing *uaddr. This guarantees that futex_wait() will NOT
2764 * absorb a wakeup if *uaddr does not match the desired values
2765 * while the syscall executes.
2768 ret
= get_futex_key(uaddr
, flags
& FLAGS_SHARED
, &q
->key
, FUTEX_READ
);
2769 if (unlikely(ret
!= 0))
2773 *hb
= queue_lock(q
);
2775 ret
= get_futex_value_locked(&uval
, uaddr
);
2780 ret
= get_user(uval
, uaddr
);
2784 if (!(flags
& FLAGS_SHARED
))
2787 put_futex_key(&q
->key
);
2798 put_futex_key(&q
->key
);
2802 static int futex_wait(u32 __user
*uaddr
, unsigned int flags
, u32 val
,
2803 ktime_t
*abs_time
, u32 bitset
)
2805 struct hrtimer_sleeper timeout
, *to
;
2806 struct restart_block
*restart
;
2807 struct futex_hash_bucket
*hb
;
2808 struct futex_q q
= futex_q_init
;
2815 to
= futex_setup_timer(abs_time
, &timeout
, flags
,
2816 current
->timer_slack_ns
);
2819 * Prepare to wait on uaddr. On success, holds hb lock and increments
2822 ret
= futex_wait_setup(uaddr
, val
, flags
, &q
, &hb
);
2826 /* queue_me and wait for wakeup, timeout, or a signal. */
2827 futex_wait_queue_me(hb
, &q
, to
);
2829 /* If we were woken (and unqueued), we succeeded, whatever. */
2831 /* unqueue_me() drops q.key ref */
2832 if (!unqueue_me(&q
))
2835 if (to
&& !to
->task
)
2839 * We expect signal_pending(current), but we might be the
2840 * victim of a spurious wakeup as well.
2842 if (!signal_pending(current
))
2849 restart
= ¤t
->restart_block
;
2850 restart
->fn
= futex_wait_restart
;
2851 restart
->futex
.uaddr
= uaddr
;
2852 restart
->futex
.val
= val
;
2853 restart
->futex
.time
= *abs_time
;
2854 restart
->futex
.bitset
= bitset
;
2855 restart
->futex
.flags
= flags
| FLAGS_HAS_TIMEOUT
;
2857 ret
= -ERESTART_RESTARTBLOCK
;
2861 hrtimer_cancel(&to
->timer
);
2862 destroy_hrtimer_on_stack(&to
->timer
);
2868 static long futex_wait_restart(struct restart_block
*restart
)
2870 u32 __user
*uaddr
= restart
->futex
.uaddr
;
2871 ktime_t t
, *tp
= NULL
;
2873 if (restart
->futex
.flags
& FLAGS_HAS_TIMEOUT
) {
2874 t
= restart
->futex
.time
;
2877 restart
->fn
= do_no_restart_syscall
;
2879 return (long)futex_wait(uaddr
, restart
->futex
.flags
,
2880 restart
->futex
.val
, tp
, restart
->futex
.bitset
);
2885 * Userspace tried a 0 -> TID atomic transition of the futex value
2886 * and failed. The kernel side here does the whole locking operation:
2887 * if there are waiters then it will block as a consequence of relying
2888 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2889 * a 0 value of the futex too.).
2891 * Also serves as futex trylock_pi()'ing, and due semantics.
2893 static int futex_lock_pi(u32 __user
*uaddr
, unsigned int flags
,
2894 ktime_t
*time
, int trylock
)
2896 struct hrtimer_sleeper timeout
, *to
;
2897 struct futex_pi_state
*pi_state
= NULL
;
2898 struct task_struct
*exiting
= NULL
;
2899 struct rt_mutex_waiter rt_waiter
;
2900 struct futex_hash_bucket
*hb
;
2901 struct futex_q q
= futex_q_init
;
2904 if (!IS_ENABLED(CONFIG_FUTEX_PI
))
2907 if (refill_pi_state_cache())
2910 to
= futex_setup_timer(time
, &timeout
, FLAGS_CLOCKRT
, 0);
2913 ret
= get_futex_key(uaddr
, flags
& FLAGS_SHARED
, &q
.key
, FUTEX_WRITE
);
2914 if (unlikely(ret
!= 0))
2918 hb
= queue_lock(&q
);
2920 ret
= futex_lock_pi_atomic(uaddr
, hb
, &q
.key
, &q
.pi_state
, current
,
2922 if (unlikely(ret
)) {
2924 * Atomic work succeeded and we got the lock,
2925 * or failed. Either way, we do _not_ block.
2929 /* We got the lock. */
2931 goto out_unlock_put_key
;
2937 * Two reasons for this:
2938 * - EBUSY: Task is exiting and we just wait for the
2940 * - EAGAIN: The user space value changed.
2943 put_futex_key(&q
.key
);
2945 * Handle the case where the owner is in the middle of
2946 * exiting. Wait for the exit to complete otherwise
2947 * this task might loop forever, aka. live lock.
2949 wait_for_owner_exiting(ret
, exiting
);
2953 goto out_unlock_put_key
;
2957 WARN_ON(!q
.pi_state
);
2960 * Only actually queue now that the atomic ops are done:
2965 ret
= rt_mutex_futex_trylock(&q
.pi_state
->pi_mutex
);
2966 /* Fixup the trylock return value: */
2967 ret
= ret
? 0 : -EWOULDBLOCK
;
2971 rt_mutex_init_waiter(&rt_waiter
);
2974 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2975 * hold it while doing rt_mutex_start_proxy(), because then it will
2976 * include hb->lock in the blocking chain, even through we'll not in
2977 * fact hold it while blocking. This will lead it to report -EDEADLK
2978 * and BUG when futex_unlock_pi() interleaves with this.
2980 * Therefore acquire wait_lock while holding hb->lock, but drop the
2981 * latter before calling __rt_mutex_start_proxy_lock(). This
2982 * interleaves with futex_unlock_pi() -- which does a similar lock
2983 * handoff -- such that the latter can observe the futex_q::pi_state
2984 * before __rt_mutex_start_proxy_lock() is done.
2986 raw_spin_lock_irq(&q
.pi_state
->pi_mutex
.wait_lock
);
2987 spin_unlock(q
.lock_ptr
);
2989 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2990 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2991 * it sees the futex_q::pi_state.
2993 ret
= __rt_mutex_start_proxy_lock(&q
.pi_state
->pi_mutex
, &rt_waiter
, current
);
2994 raw_spin_unlock_irq(&q
.pi_state
->pi_mutex
.wait_lock
);
3003 hrtimer_sleeper_start_expires(to
, HRTIMER_MODE_ABS
);
3005 ret
= rt_mutex_wait_proxy_lock(&q
.pi_state
->pi_mutex
, to
, &rt_waiter
);
3008 spin_lock(q
.lock_ptr
);
3010 * If we failed to acquire the lock (deadlock/signal/timeout), we must
3011 * first acquire the hb->lock before removing the lock from the
3012 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
3015 * In particular; it is important that futex_unlock_pi() can not
3016 * observe this inconsistency.
3018 if (ret
&& !rt_mutex_cleanup_proxy_lock(&q
.pi_state
->pi_mutex
, &rt_waiter
))
3023 * Fixup the pi_state owner and possibly acquire the lock if we
3026 res
= fixup_owner(uaddr
, &q
, !ret
);
3028 * If fixup_owner() returned an error, proprogate that. If it acquired
3029 * the lock, clear our -ETIMEDOUT or -EINTR.
3032 ret
= (res
< 0) ? res
: 0;
3035 * If fixup_owner() faulted and was unable to handle the fault, unlock
3036 * it and return the fault to userspace.
3038 if (ret
&& (rt_mutex_owner(&q
.pi_state
->pi_mutex
) == current
)) {
3039 pi_state
= q
.pi_state
;
3040 get_pi_state(pi_state
);
3043 /* Unqueue and drop the lock */
3047 rt_mutex_futex_unlock(&pi_state
->pi_mutex
);
3048 put_pi_state(pi_state
);
3057 put_futex_key(&q
.key
);
3060 hrtimer_cancel(&to
->timer
);
3061 destroy_hrtimer_on_stack(&to
->timer
);
3063 return ret
!= -EINTR
? ret
: -ERESTARTNOINTR
;
3068 ret
= fault_in_user_writeable(uaddr
);
3072 if (!(flags
& FLAGS_SHARED
))
3075 put_futex_key(&q
.key
);
3080 * Userspace attempted a TID -> 0 atomic transition, and failed.
3081 * This is the in-kernel slowpath: we look up the PI state (if any),
3082 * and do the rt-mutex unlock.
3084 static int futex_unlock_pi(u32 __user
*uaddr
, unsigned int flags
)
3086 u32
uninitialized_var(curval
), uval
, vpid
= task_pid_vnr(current
);
3087 union futex_key key
= FUTEX_KEY_INIT
;
3088 struct futex_hash_bucket
*hb
;
3089 struct futex_q
*top_waiter
;
3092 if (!IS_ENABLED(CONFIG_FUTEX_PI
))
3096 if (get_user(uval
, uaddr
))
3099 * We release only a lock we actually own:
3101 if ((uval
& FUTEX_TID_MASK
) != vpid
)
3104 ret
= get_futex_key(uaddr
, flags
& FLAGS_SHARED
, &key
, FUTEX_WRITE
);
3108 hb
= hash_futex(&key
);
3109 spin_lock(&hb
->lock
);
3112 * Check waiters first. We do not trust user space values at
3113 * all and we at least want to know if user space fiddled
3114 * with the futex value instead of blindly unlocking.
3116 top_waiter
= futex_top_waiter(hb
, &key
);
3118 struct futex_pi_state
*pi_state
= top_waiter
->pi_state
;
3125 * If current does not own the pi_state then the futex is
3126 * inconsistent and user space fiddled with the futex value.
3128 if (pi_state
->owner
!= current
)
3131 get_pi_state(pi_state
);
3133 * By taking wait_lock while still holding hb->lock, we ensure
3134 * there is no point where we hold neither; and therefore
3135 * wake_futex_pi() must observe a state consistent with what we
3138 * In particular; this forces __rt_mutex_start_proxy() to
3139 * complete such that we're guaranteed to observe the
3140 * rt_waiter. Also see the WARN in wake_futex_pi().
3142 raw_spin_lock_irq(&pi_state
->pi_mutex
.wait_lock
);
3143 spin_unlock(&hb
->lock
);
3145 /* drops pi_state->pi_mutex.wait_lock */
3146 ret
= wake_futex_pi(uaddr
, uval
, pi_state
);
3148 put_pi_state(pi_state
);
3151 * Success, we're done! No tricky corner cases.
3156 * The atomic access to the futex value generated a
3157 * pagefault, so retry the user-access and the wakeup:
3162 * A unconditional UNLOCK_PI op raced against a waiter
3163 * setting the FUTEX_WAITERS bit. Try again.
3168 * wake_futex_pi has detected invalid state. Tell user
3175 * We have no kernel internal state, i.e. no waiters in the
3176 * kernel. Waiters which are about to queue themselves are stuck
3177 * on hb->lock. So we can safely ignore them. We do neither
3178 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3181 if ((ret
= cmpxchg_futex_value_locked(&curval
, uaddr
, uval
, 0))) {
3182 spin_unlock(&hb
->lock
);
3197 * If uval has changed, let user space handle it.
3199 ret
= (curval
== uval
) ? 0 : -EAGAIN
;
3202 spin_unlock(&hb
->lock
);
3204 put_futex_key(&key
);
3208 put_futex_key(&key
);
3213 put_futex_key(&key
);
3215 ret
= fault_in_user_writeable(uaddr
);
3223 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3224 * @hb: the hash_bucket futex_q was original enqueued on
3225 * @q: the futex_q woken while waiting to be requeued
3226 * @key2: the futex_key of the requeue target futex
3227 * @timeout: the timeout associated with the wait (NULL if none)
3229 * Detect if the task was woken on the initial futex as opposed to the requeue
3230 * target futex. If so, determine if it was a timeout or a signal that caused
3231 * the wakeup and return the appropriate error code to the caller. Must be
3232 * called with the hb lock held.
3235 * - 0 = no early wakeup detected;
3236 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3239 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket
*hb
,
3240 struct futex_q
*q
, union futex_key
*key2
,
3241 struct hrtimer_sleeper
*timeout
)
3246 * With the hb lock held, we avoid races while we process the wakeup.
3247 * We only need to hold hb (and not hb2) to ensure atomicity as the
3248 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3249 * It can't be requeued from uaddr2 to something else since we don't
3250 * support a PI aware source futex for requeue.
3252 if (!match_futex(&q
->key
, key2
)) {
3253 WARN_ON(q
->lock_ptr
&& (&hb
->lock
!= q
->lock_ptr
));
3255 * We were woken prior to requeue by a timeout or a signal.
3256 * Unqueue the futex_q and determine which it was.
3258 plist_del(&q
->list
, &hb
->chain
);
3261 /* Handle spurious wakeups gracefully */
3263 if (timeout
&& !timeout
->task
)
3265 else if (signal_pending(current
))
3266 ret
= -ERESTARTNOINTR
;
3272 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3273 * @uaddr: the futex we initially wait on (non-pi)
3274 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3275 * the same type, no requeueing from private to shared, etc.
3276 * @val: the expected value of uaddr
3277 * @abs_time: absolute timeout
3278 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
3279 * @uaddr2: the pi futex we will take prior to returning to user-space
3281 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3282 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3283 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3284 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3285 * without one, the pi logic would not know which task to boost/deboost, if
3286 * there was a need to.
3288 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3289 * via the following--
3290 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3291 * 2) wakeup on uaddr2 after a requeue
3295 * If 3, cleanup and return -ERESTARTNOINTR.
3297 * If 2, we may then block on trying to take the rt_mutex and return via:
3298 * 5) successful lock
3301 * 8) other lock acquisition failure
3303 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3305 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3311 static int futex_wait_requeue_pi(u32 __user
*uaddr
, unsigned int flags
,
3312 u32 val
, ktime_t
*abs_time
, u32 bitset
,
3315 struct hrtimer_sleeper timeout
, *to
;
3316 struct futex_pi_state
*pi_state
= NULL
;
3317 struct rt_mutex_waiter rt_waiter
;
3318 struct futex_hash_bucket
*hb
;
3319 union futex_key key2
= FUTEX_KEY_INIT
;
3320 struct futex_q q
= futex_q_init
;
3323 if (!IS_ENABLED(CONFIG_FUTEX_PI
))
3326 if (uaddr
== uaddr2
)
3332 to
= futex_setup_timer(abs_time
, &timeout
, flags
,
3333 current
->timer_slack_ns
);
3336 * The waiter is allocated on our stack, manipulated by the requeue
3337 * code while we sleep on uaddr.
3339 rt_mutex_init_waiter(&rt_waiter
);
3341 ret
= get_futex_key(uaddr2
, flags
& FLAGS_SHARED
, &key2
, FUTEX_WRITE
);
3342 if (unlikely(ret
!= 0))
3346 q
.rt_waiter
= &rt_waiter
;
3347 q
.requeue_pi_key
= &key2
;
3350 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3353 ret
= futex_wait_setup(uaddr
, val
, flags
, &q
, &hb
);
3358 * The check above which compares uaddrs is not sufficient for
3359 * shared futexes. We need to compare the keys:
3361 if (match_futex(&q
.key
, &key2
)) {
3367 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
3368 futex_wait_queue_me(hb
, &q
, to
);
3370 spin_lock(&hb
->lock
);
3371 ret
= handle_early_requeue_pi_wakeup(hb
, &q
, &key2
, to
);
3372 spin_unlock(&hb
->lock
);
3377 * In order for us to be here, we know our q.key == key2, and since
3378 * we took the hb->lock above, we also know that futex_requeue() has
3379 * completed and we no longer have to concern ourselves with a wakeup
3380 * race with the atomic proxy lock acquisition by the requeue code. The
3381 * futex_requeue dropped our key1 reference and incremented our key2
3385 /* Check if the requeue code acquired the second futex for us. */
3388 * Got the lock. We might not be the anticipated owner if we
3389 * did a lock-steal - fix up the PI-state in that case.
3391 if (q
.pi_state
&& (q
.pi_state
->owner
!= current
)) {
3392 spin_lock(q
.lock_ptr
);
3393 ret
= fixup_pi_state_owner(uaddr2
, &q
, current
);
3394 if (ret
&& rt_mutex_owner(&q
.pi_state
->pi_mutex
) == current
) {
3395 pi_state
= q
.pi_state
;
3396 get_pi_state(pi_state
);
3399 * Drop the reference to the pi state which
3400 * the requeue_pi() code acquired for us.
3402 put_pi_state(q
.pi_state
);
3403 spin_unlock(q
.lock_ptr
);
3406 struct rt_mutex
*pi_mutex
;
3409 * We have been woken up by futex_unlock_pi(), a timeout, or a
3410 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3413 WARN_ON(!q
.pi_state
);
3414 pi_mutex
= &q
.pi_state
->pi_mutex
;
3415 ret
= rt_mutex_wait_proxy_lock(pi_mutex
, to
, &rt_waiter
);
3417 spin_lock(q
.lock_ptr
);
3418 if (ret
&& !rt_mutex_cleanup_proxy_lock(pi_mutex
, &rt_waiter
))
3421 debug_rt_mutex_free_waiter(&rt_waiter
);
3423 * Fixup the pi_state owner and possibly acquire the lock if we
3426 res
= fixup_owner(uaddr2
, &q
, !ret
);
3428 * If fixup_owner() returned an error, proprogate that. If it
3429 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3432 ret
= (res
< 0) ? res
: 0;
3435 * If fixup_pi_state_owner() faulted and was unable to handle
3436 * the fault, unlock the rt_mutex and return the fault to
3439 if (ret
&& rt_mutex_owner(&q
.pi_state
->pi_mutex
) == current
) {
3440 pi_state
= q
.pi_state
;
3441 get_pi_state(pi_state
);
3444 /* Unqueue and drop the lock. */
3449 rt_mutex_futex_unlock(&pi_state
->pi_mutex
);
3450 put_pi_state(pi_state
);
3453 if (ret
== -EINTR
) {
3455 * We've already been requeued, but cannot restart by calling
3456 * futex_lock_pi() directly. We could restart this syscall, but
3457 * it would detect that the user space "val" changed and return
3458 * -EWOULDBLOCK. Save the overhead of the restart and return
3459 * -EWOULDBLOCK directly.
3465 put_futex_key(&q
.key
);
3467 put_futex_key(&key2
);
3471 hrtimer_cancel(&to
->timer
);
3472 destroy_hrtimer_on_stack(&to
->timer
);
3478 * Support for robust futexes: the kernel cleans up held futexes at
3481 * Implementation: user-space maintains a per-thread list of locks it
3482 * is holding. Upon do_exit(), the kernel carefully walks this list,
3483 * and marks all locks that are owned by this thread with the
3484 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3485 * always manipulated with the lock held, so the list is private and
3486 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3487 * field, to allow the kernel to clean up if the thread dies after
3488 * acquiring the lock, but just before it could have added itself to
3489 * the list. There can only be one such pending lock.
3493 * sys_set_robust_list() - Set the robust-futex list head of a task
3494 * @head: pointer to the list-head
3495 * @len: length of the list-head, as userspace expects
3497 SYSCALL_DEFINE2(set_robust_list
, struct robust_list_head __user
*, head
,
3500 if (!futex_cmpxchg_enabled
)
3503 * The kernel knows only one size for now:
3505 if (unlikely(len
!= sizeof(*head
)))
3508 current
->robust_list
= head
;
3514 * sys_get_robust_list() - Get the robust-futex list head of a task
3515 * @pid: pid of the process [zero for current task]
3516 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3517 * @len_ptr: pointer to a length field, the kernel fills in the header size
3519 SYSCALL_DEFINE3(get_robust_list
, int, pid
,
3520 struct robust_list_head __user
* __user
*, head_ptr
,
3521 size_t __user
*, len_ptr
)
3523 struct robust_list_head __user
*head
;
3525 struct task_struct
*p
;
3527 if (!futex_cmpxchg_enabled
)
3536 p
= find_task_by_vpid(pid
);
3542 if (!ptrace_may_access(p
, PTRACE_MODE_READ_REALCREDS
))
3545 head
= p
->robust_list
;
3548 if (put_user(sizeof(*head
), len_ptr
))
3550 return put_user(head
, head_ptr
);
3558 /* Constants for the pending_op argument of handle_futex_death */
3559 #define HANDLE_DEATH_PENDING true
3560 #define HANDLE_DEATH_LIST false
3563 * Process a futex-list entry, check whether it's owned by the
3564 * dying task, and do notification if so:
3566 static int handle_futex_death(u32 __user
*uaddr
, struct task_struct
*curr
,
3567 bool pi
, bool pending_op
)
3569 u32 uval
, uninitialized_var(nval
), mval
;
3572 /* Futex address must be 32bit aligned */
3573 if ((((unsigned long)uaddr
) % sizeof(*uaddr
)) != 0)
3577 if (get_user(uval
, uaddr
))
3581 * Special case for regular (non PI) futexes. The unlock path in
3582 * user space has two race scenarios:
3584 * 1. The unlock path releases the user space futex value and
3585 * before it can execute the futex() syscall to wake up
3586 * waiters it is killed.
3588 * 2. A woken up waiter is killed before it can acquire the
3589 * futex in user space.
3591 * In both cases the TID validation below prevents a wakeup of
3592 * potential waiters which can cause these waiters to block
3595 * In both cases the following conditions are met:
3597 * 1) task->robust_list->list_op_pending != NULL
3598 * @pending_op == true
3599 * 2) User space futex value == 0
3600 * 3) Regular futex: @pi == false
3602 * If these conditions are met, it is safe to attempt waking up a
3603 * potential waiter without touching the user space futex value and
3604 * trying to set the OWNER_DIED bit. The user space futex value is
3605 * uncontended and the rest of the user space mutex state is
3606 * consistent, so a woken waiter will just take over the
3607 * uncontended futex. Setting the OWNER_DIED bit would create
3608 * inconsistent state and malfunction of the user space owner died
3611 if (pending_op
&& !pi
&& !uval
) {
3612 futex_wake(uaddr
, 1, 1, FUTEX_BITSET_MATCH_ANY
);
3616 if ((uval
& FUTEX_TID_MASK
) != task_pid_vnr(curr
))
3620 * Ok, this dying thread is truly holding a futex
3621 * of interest. Set the OWNER_DIED bit atomically
3622 * via cmpxchg, and if the value had FUTEX_WAITERS
3623 * set, wake up a waiter (if any). (We have to do a
3624 * futex_wake() even if OWNER_DIED is already set -
3625 * to handle the rare but possible case of recursive
3626 * thread-death.) The rest of the cleanup is done in
3629 mval
= (uval
& FUTEX_WAITERS
) | FUTEX_OWNER_DIED
;
3632 * We are not holding a lock here, but we want to have
3633 * the pagefault_disable/enable() protection because
3634 * we want to handle the fault gracefully. If the
3635 * access fails we try to fault in the futex with R/W
3636 * verification via get_user_pages. get_user() above
3637 * does not guarantee R/W access. If that fails we
3638 * give up and leave the futex locked.
3640 if ((err
= cmpxchg_futex_value_locked(&nval
, uaddr
, uval
, mval
))) {
3643 if (fault_in_user_writeable(uaddr
))
3661 * Wake robust non-PI futexes here. The wakeup of
3662 * PI futexes happens in exit_pi_state():
3664 if (!pi
&& (uval
& FUTEX_WAITERS
))
3665 futex_wake(uaddr
, 1, 1, FUTEX_BITSET_MATCH_ANY
);
3671 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3673 static inline int fetch_robust_entry(struct robust_list __user
**entry
,
3674 struct robust_list __user
* __user
*head
,
3677 unsigned long uentry
;
3679 if (get_user(uentry
, (unsigned long __user
*)head
))
3682 *entry
= (void __user
*)(uentry
& ~1UL);
3689 * Walk curr->robust_list (very carefully, it's a userspace list!)
3690 * and mark any locks found there dead, and notify any waiters.
3692 * We silently return on any sign of list-walking problem.
3694 static void exit_robust_list(struct task_struct
*curr
)
3696 struct robust_list_head __user
*head
= curr
->robust_list
;
3697 struct robust_list __user
*entry
, *next_entry
, *pending
;
3698 unsigned int limit
= ROBUST_LIST_LIMIT
, pi
, pip
;
3699 unsigned int uninitialized_var(next_pi
);
3700 unsigned long futex_offset
;
3703 if (!futex_cmpxchg_enabled
)
3707 * Fetch the list head (which was registered earlier, via
3708 * sys_set_robust_list()):
3710 if (fetch_robust_entry(&entry
, &head
->list
.next
, &pi
))
3713 * Fetch the relative futex offset:
3715 if (get_user(futex_offset
, &head
->futex_offset
))
3718 * Fetch any possibly pending lock-add first, and handle it
3721 if (fetch_robust_entry(&pending
, &head
->list_op_pending
, &pip
))
3724 next_entry
= NULL
; /* avoid warning with gcc */
3725 while (entry
!= &head
->list
) {
3727 * Fetch the next entry in the list before calling
3728 * handle_futex_death:
3730 rc
= fetch_robust_entry(&next_entry
, &entry
->next
, &next_pi
);
3732 * A pending lock might already be on the list, so
3733 * don't process it twice:
3735 if (entry
!= pending
) {
3736 if (handle_futex_death((void __user
*)entry
+ futex_offset
,
3737 curr
, pi
, HANDLE_DEATH_LIST
))
3745 * Avoid excessively long or circular lists:
3754 handle_futex_death((void __user
*)pending
+ futex_offset
,
3755 curr
, pip
, HANDLE_DEATH_PENDING
);
3759 static void futex_cleanup(struct task_struct
*tsk
)
3761 if (unlikely(tsk
->robust_list
)) {
3762 exit_robust_list(tsk
);
3763 tsk
->robust_list
= NULL
;
3766 #ifdef CONFIG_COMPAT
3767 if (unlikely(tsk
->compat_robust_list
)) {
3768 compat_exit_robust_list(tsk
);
3769 tsk
->compat_robust_list
= NULL
;
3773 if (unlikely(!list_empty(&tsk
->pi_state_list
)))
3774 exit_pi_state_list(tsk
);
3778 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
3779 * @tsk: task to set the state on
3781 * Set the futex exit state of the task lockless. The futex waiter code
3782 * observes that state when a task is exiting and loops until the task has
3783 * actually finished the futex cleanup. The worst case for this is that the
3784 * waiter runs through the wait loop until the state becomes visible.
3786 * This is called from the recursive fault handling path in do_exit().
3788 * This is best effort. Either the futex exit code has run already or
3789 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
3790 * take it over. If not, the problem is pushed back to user space. If the
3791 * futex exit code did not run yet, then an already queued waiter might
3792 * block forever, but there is nothing which can be done about that.
3794 void futex_exit_recursive(struct task_struct
*tsk
)
3796 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
3797 if (tsk
->futex_state
== FUTEX_STATE_EXITING
)
3798 mutex_unlock(&tsk
->futex_exit_mutex
);
3799 tsk
->futex_state
= FUTEX_STATE_DEAD
;
3802 static void futex_cleanup_begin(struct task_struct
*tsk
)
3805 * Prevent various race issues against a concurrent incoming waiter
3806 * including live locks by forcing the waiter to block on
3807 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
3808 * attach_to_pi_owner().
3810 mutex_lock(&tsk
->futex_exit_mutex
);
3813 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
3815 * This ensures that all subsequent checks of tsk->futex_state in
3816 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
3817 * tsk->pi_lock held.
3819 * It guarantees also that a pi_state which was queued right before
3820 * the state change under tsk->pi_lock by a concurrent waiter must
3821 * be observed in exit_pi_state_list().
3823 raw_spin_lock_irq(&tsk
->pi_lock
);
3824 tsk
->futex_state
= FUTEX_STATE_EXITING
;
3825 raw_spin_unlock_irq(&tsk
->pi_lock
);
3828 static void futex_cleanup_end(struct task_struct
*tsk
, int state
)
3831 * Lockless store. The only side effect is that an observer might
3832 * take another loop until it becomes visible.
3834 tsk
->futex_state
= state
;
3836 * Drop the exit protection. This unblocks waiters which observed
3837 * FUTEX_STATE_EXITING to reevaluate the state.
3839 mutex_unlock(&tsk
->futex_exit_mutex
);
3842 void futex_exec_release(struct task_struct
*tsk
)
3845 * The state handling is done for consistency, but in the case of
3846 * exec() there is no way to prevent futher damage as the PID stays
3847 * the same. But for the unlikely and arguably buggy case that a
3848 * futex is held on exec(), this provides at least as much state
3849 * consistency protection which is possible.
3851 futex_cleanup_begin(tsk
);
3854 * Reset the state to FUTEX_STATE_OK. The task is alive and about
3855 * exec a new binary.
3857 futex_cleanup_end(tsk
, FUTEX_STATE_OK
);
3860 void futex_exit_release(struct task_struct
*tsk
)
3862 futex_cleanup_begin(tsk
);
3864 futex_cleanup_end(tsk
, FUTEX_STATE_DEAD
);
3867 long do_futex(u32 __user
*uaddr
, int op
, u32 val
, ktime_t
*timeout
,
3868 u32 __user
*uaddr2
, u32 val2
, u32 val3
)
3870 int cmd
= op
& FUTEX_CMD_MASK
;
3871 unsigned int flags
= 0;
3873 if (!(op
& FUTEX_PRIVATE_FLAG
))
3874 flags
|= FLAGS_SHARED
;
3876 if (op
& FUTEX_CLOCK_REALTIME
) {
3877 flags
|= FLAGS_CLOCKRT
;
3878 if (cmd
!= FUTEX_WAIT
&& cmd
!= FUTEX_WAIT_BITSET
&& \
3879 cmd
!= FUTEX_WAIT_REQUEUE_PI
)
3885 case FUTEX_UNLOCK_PI
:
3886 case FUTEX_TRYLOCK_PI
:
3887 case FUTEX_WAIT_REQUEUE_PI
:
3888 case FUTEX_CMP_REQUEUE_PI
:
3889 if (!futex_cmpxchg_enabled
)
3895 val3
= FUTEX_BITSET_MATCH_ANY
;
3897 case FUTEX_WAIT_BITSET
:
3898 return futex_wait(uaddr
, flags
, val
, timeout
, val3
);
3900 val3
= FUTEX_BITSET_MATCH_ANY
;
3902 case FUTEX_WAKE_BITSET
:
3903 return futex_wake(uaddr
, flags
, val
, val3
);
3905 return futex_requeue(uaddr
, flags
, uaddr2
, val
, val2
, NULL
, 0);
3906 case FUTEX_CMP_REQUEUE
:
3907 return futex_requeue(uaddr
, flags
, uaddr2
, val
, val2
, &val3
, 0);
3909 return futex_wake_op(uaddr
, flags
, uaddr2
, val
, val2
, val3
);
3911 return futex_lock_pi(uaddr
, flags
, timeout
, 0);
3912 case FUTEX_UNLOCK_PI
:
3913 return futex_unlock_pi(uaddr
, flags
);
3914 case FUTEX_TRYLOCK_PI
:
3915 return futex_lock_pi(uaddr
, flags
, NULL
, 1);
3916 case FUTEX_WAIT_REQUEUE_PI
:
3917 val3
= FUTEX_BITSET_MATCH_ANY
;
3918 return futex_wait_requeue_pi(uaddr
, flags
, val
, timeout
, val3
,
3920 case FUTEX_CMP_REQUEUE_PI
:
3921 return futex_requeue(uaddr
, flags
, uaddr2
, val
, val2
, &val3
, 1);
3927 SYSCALL_DEFINE6(futex
, u32 __user
*, uaddr
, int, op
, u32
, val
,
3928 struct __kernel_timespec __user
*, utime
, u32 __user
*, uaddr2
,
3931 struct timespec64 ts
;
3932 ktime_t t
, *tp
= NULL
;
3934 int cmd
= op
& FUTEX_CMD_MASK
;
3936 if (utime
&& (cmd
== FUTEX_WAIT
|| cmd
== FUTEX_LOCK_PI
||
3937 cmd
== FUTEX_WAIT_BITSET
||
3938 cmd
== FUTEX_WAIT_REQUEUE_PI
)) {
3939 if (unlikely(should_fail_futex(!(op
& FUTEX_PRIVATE_FLAG
))))
3941 if (get_timespec64(&ts
, utime
))
3943 if (!timespec64_valid(&ts
))
3946 t
= timespec64_to_ktime(ts
);
3947 if (cmd
== FUTEX_WAIT
)
3948 t
= ktime_add_safe(ktime_get(), t
);
3952 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3953 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3955 if (cmd
== FUTEX_REQUEUE
|| cmd
== FUTEX_CMP_REQUEUE
||
3956 cmd
== FUTEX_CMP_REQUEUE_PI
|| cmd
== FUTEX_WAKE_OP
)
3957 val2
= (u32
) (unsigned long) utime
;
3959 return do_futex(uaddr
, op
, val
, tp
, uaddr2
, val2
, val3
);
3962 #ifdef CONFIG_COMPAT
3964 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3967 compat_fetch_robust_entry(compat_uptr_t
*uentry
, struct robust_list __user
**entry
,
3968 compat_uptr_t __user
*head
, unsigned int *pi
)
3970 if (get_user(*uentry
, head
))
3973 *entry
= compat_ptr((*uentry
) & ~1);
3974 *pi
= (unsigned int)(*uentry
) & 1;
3979 static void __user
*futex_uaddr(struct robust_list __user
*entry
,
3980 compat_long_t futex_offset
)
3982 compat_uptr_t base
= ptr_to_compat(entry
);
3983 void __user
*uaddr
= compat_ptr(base
+ futex_offset
);
3989 * Walk curr->robust_list (very carefully, it's a userspace list!)
3990 * and mark any locks found there dead, and notify any waiters.
3992 * We silently return on any sign of list-walking problem.
3994 static void compat_exit_robust_list(struct task_struct
*curr
)
3996 struct compat_robust_list_head __user
*head
= curr
->compat_robust_list
;
3997 struct robust_list __user
*entry
, *next_entry
, *pending
;
3998 unsigned int limit
= ROBUST_LIST_LIMIT
, pi
, pip
;
3999 unsigned int uninitialized_var(next_pi
);
4000 compat_uptr_t uentry
, next_uentry
, upending
;
4001 compat_long_t futex_offset
;
4004 if (!futex_cmpxchg_enabled
)
4008 * Fetch the list head (which was registered earlier, via
4009 * sys_set_robust_list()):
4011 if (compat_fetch_robust_entry(&uentry
, &entry
, &head
->list
.next
, &pi
))
4014 * Fetch the relative futex offset:
4016 if (get_user(futex_offset
, &head
->futex_offset
))
4019 * Fetch any possibly pending lock-add first, and handle it
4022 if (compat_fetch_robust_entry(&upending
, &pending
,
4023 &head
->list_op_pending
, &pip
))
4026 next_entry
= NULL
; /* avoid warning with gcc */
4027 while (entry
!= (struct robust_list __user
*) &head
->list
) {
4029 * Fetch the next entry in the list before calling
4030 * handle_futex_death:
4032 rc
= compat_fetch_robust_entry(&next_uentry
, &next_entry
,
4033 (compat_uptr_t __user
*)&entry
->next
, &next_pi
);
4035 * A pending lock might already be on the list, so
4036 * dont process it twice:
4038 if (entry
!= pending
) {
4039 void __user
*uaddr
= futex_uaddr(entry
, futex_offset
);
4041 if (handle_futex_death(uaddr
, curr
, pi
,
4047 uentry
= next_uentry
;
4051 * Avoid excessively long or circular lists:
4059 void __user
*uaddr
= futex_uaddr(pending
, futex_offset
);
4061 handle_futex_death(uaddr
, curr
, pip
, HANDLE_DEATH_PENDING
);
4065 COMPAT_SYSCALL_DEFINE2(set_robust_list
,
4066 struct compat_robust_list_head __user
*, head
,
4069 if (!futex_cmpxchg_enabled
)
4072 if (unlikely(len
!= sizeof(*head
)))
4075 current
->compat_robust_list
= head
;
4080 COMPAT_SYSCALL_DEFINE3(get_robust_list
, int, pid
,
4081 compat_uptr_t __user
*, head_ptr
,
4082 compat_size_t __user
*, len_ptr
)
4084 struct compat_robust_list_head __user
*head
;
4086 struct task_struct
*p
;
4088 if (!futex_cmpxchg_enabled
)
4097 p
= find_task_by_vpid(pid
);
4103 if (!ptrace_may_access(p
, PTRACE_MODE_READ_REALCREDS
))
4106 head
= p
->compat_robust_list
;
4109 if (put_user(sizeof(*head
), len_ptr
))
4111 return put_user(ptr_to_compat(head
), head_ptr
);
4118 #endif /* CONFIG_COMPAT */
4120 #ifdef CONFIG_COMPAT_32BIT_TIME
4121 SYSCALL_DEFINE6(futex_time32
, u32 __user
*, uaddr
, int, op
, u32
, val
,
4122 struct old_timespec32 __user
*, utime
, u32 __user
*, uaddr2
,
4125 struct timespec64 ts
;
4126 ktime_t t
, *tp
= NULL
;
4128 int cmd
= op
& FUTEX_CMD_MASK
;
4130 if (utime
&& (cmd
== FUTEX_WAIT
|| cmd
== FUTEX_LOCK_PI
||
4131 cmd
== FUTEX_WAIT_BITSET
||
4132 cmd
== FUTEX_WAIT_REQUEUE_PI
)) {
4133 if (get_old_timespec32(&ts
, utime
))
4135 if (!timespec64_valid(&ts
))
4138 t
= timespec64_to_ktime(ts
);
4139 if (cmd
== FUTEX_WAIT
)
4140 t
= ktime_add_safe(ktime_get(), t
);
4143 if (cmd
== FUTEX_REQUEUE
|| cmd
== FUTEX_CMP_REQUEUE
||
4144 cmd
== FUTEX_CMP_REQUEUE_PI
|| cmd
== FUTEX_WAKE_OP
)
4145 val2
= (int) (unsigned long) utime
;
4147 return do_futex(uaddr
, op
, val
, tp
, uaddr2
, val2
, val3
);
4149 #endif /* CONFIG_COMPAT_32BIT_TIME */
4151 static void __init
futex_detect_cmpxchg(void)
4153 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
4157 * This will fail and we want it. Some arch implementations do
4158 * runtime detection of the futex_atomic_cmpxchg_inatomic()
4159 * functionality. We want to know that before we call in any
4160 * of the complex code paths. Also we want to prevent
4161 * registration of robust lists in that case. NULL is
4162 * guaranteed to fault and we get -EFAULT on functional
4163 * implementation, the non-functional ones will return
4166 if (cmpxchg_futex_value_locked(&curval
, NULL
, 0, 0) == -EFAULT
)
4167 futex_cmpxchg_enabled
= 1;
4171 static int __init
futex_init(void)
4173 unsigned int futex_shift
;
4176 #if CONFIG_BASE_SMALL
4177 futex_hashsize
= 16;
4179 futex_hashsize
= roundup_pow_of_two(256 * num_possible_cpus());
4182 futex_queues
= alloc_large_system_hash("futex", sizeof(*futex_queues
),
4184 futex_hashsize
< 256 ? HASH_SMALL
: 0,
4186 futex_hashsize
, futex_hashsize
);
4187 futex_hashsize
= 1UL << futex_shift
;
4189 futex_detect_cmpxchg();
4191 for (i
= 0; i
< futex_hashsize
; i
++) {
4192 atomic_set(&futex_queues
[i
].waiters
, 0);
4193 plist_head_init(&futex_queues
[i
].chain
);
4194 spin_lock_init(&futex_queues
[i
].lock
);
4199 core_initcall(futex_init
);