]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/futex.c
Merge tag 'copy-file-range-fixes-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[mirror_ubuntu-jammy-kernel.git] / kernel / futex.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
5 *
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 *
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
11 *
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 *
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 *
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 *
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 *
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
30 *
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
33 */
34 #include <linux/compat.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fs.h>
38 #include <linux/file.h>
39 #include <linux/jhash.h>
40 #include <linux/init.h>
41 #include <linux/futex.h>
42 #include <linux/mount.h>
43 #include <linux/pagemap.h>
44 #include <linux/syscalls.h>
45 #include <linux/signal.h>
46 #include <linux/export.h>
47 #include <linux/magic.h>
48 #include <linux/pid.h>
49 #include <linux/nsproxy.h>
50 #include <linux/ptrace.h>
51 #include <linux/sched/rt.h>
52 #include <linux/sched/wake_q.h>
53 #include <linux/sched/mm.h>
54 #include <linux/hugetlb.h>
55 #include <linux/freezer.h>
56 #include <linux/memblock.h>
57 #include <linux/fault-inject.h>
58 #include <linux/refcount.h>
59
60 #include <asm/futex.h>
61
62 #include "locking/rtmutex_common.h"
63
64 /*
65 * READ this before attempting to hack on futexes!
66 *
67 * Basic futex operation and ordering guarantees
68 * =============================================
69 *
70 * The waiter reads the futex value in user space and calls
71 * futex_wait(). This function computes the hash bucket and acquires
72 * the hash bucket lock. After that it reads the futex user space value
73 * again and verifies that the data has not changed. If it has not changed
74 * it enqueues itself into the hash bucket, releases the hash bucket lock
75 * and schedules.
76 *
77 * The waker side modifies the user space value of the futex and calls
78 * futex_wake(). This function computes the hash bucket and acquires the
79 * hash bucket lock. Then it looks for waiters on that futex in the hash
80 * bucket and wakes them.
81 *
82 * In futex wake up scenarios where no tasks are blocked on a futex, taking
83 * the hb spinlock can be avoided and simply return. In order for this
84 * optimization to work, ordering guarantees must exist so that the waiter
85 * being added to the list is acknowledged when the list is concurrently being
86 * checked by the waker, avoiding scenarios like the following:
87 *
88 * CPU 0 CPU 1
89 * val = *futex;
90 * sys_futex(WAIT, futex, val);
91 * futex_wait(futex, val);
92 * uval = *futex;
93 * *futex = newval;
94 * sys_futex(WAKE, futex);
95 * futex_wake(futex);
96 * if (queue_empty())
97 * return;
98 * if (uval == val)
99 * lock(hash_bucket(futex));
100 * queue();
101 * unlock(hash_bucket(futex));
102 * schedule();
103 *
104 * This would cause the waiter on CPU 0 to wait forever because it
105 * missed the transition of the user space value from val to newval
106 * and the waker did not find the waiter in the hash bucket queue.
107 *
108 * The correct serialization ensures that a waiter either observes
109 * the changed user space value before blocking or is woken by a
110 * concurrent waker:
111 *
112 * CPU 0 CPU 1
113 * val = *futex;
114 * sys_futex(WAIT, futex, val);
115 * futex_wait(futex, val);
116 *
117 * waiters++; (a)
118 * smp_mb(); (A) <-- paired with -.
119 * |
120 * lock(hash_bucket(futex)); |
121 * |
122 * uval = *futex; |
123 * | *futex = newval;
124 * | sys_futex(WAKE, futex);
125 * | futex_wake(futex);
126 * |
127 * `--------> smp_mb(); (B)
128 * if (uval == val)
129 * queue();
130 * unlock(hash_bucket(futex));
131 * schedule(); if (waiters)
132 * lock(hash_bucket(futex));
133 * else wake_waiters(futex);
134 * waiters--; (b) unlock(hash_bucket(futex));
135 *
136 * Where (A) orders the waiters increment and the futex value read through
137 * atomic operations (see hb_waiters_inc) and where (B) orders the write
138 * to futex and the waiters read -- this is done by the barriers for both
139 * shared and private futexes in get_futex_key_refs().
140 *
141 * This yields the following case (where X:=waiters, Y:=futex):
142 *
143 * X = Y = 0
144 *
145 * w[X]=1 w[Y]=1
146 * MB MB
147 * r[Y]=y r[X]=x
148 *
149 * Which guarantees that x==0 && y==0 is impossible; which translates back into
150 * the guarantee that we cannot both miss the futex variable change and the
151 * enqueue.
152 *
153 * Note that a new waiter is accounted for in (a) even when it is possible that
154 * the wait call can return error, in which case we backtrack from it in (b).
155 * Refer to the comment in queue_lock().
156 *
157 * Similarly, in order to account for waiters being requeued on another
158 * address we always increment the waiters for the destination bucket before
159 * acquiring the lock. It then decrements them again after releasing it -
160 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
161 * will do the additional required waiter count housekeeping. This is done for
162 * double_lock_hb() and double_unlock_hb(), respectively.
163 */
164
165 #ifdef CONFIG_HAVE_FUTEX_CMPXCHG
166 #define futex_cmpxchg_enabled 1
167 #else
168 static int __read_mostly futex_cmpxchg_enabled;
169 #endif
170
171 /*
172 * Futex flags used to encode options to functions and preserve them across
173 * restarts.
174 */
175 #ifdef CONFIG_MMU
176 # define FLAGS_SHARED 0x01
177 #else
178 /*
179 * NOMMU does not have per process address space. Let the compiler optimize
180 * code away.
181 */
182 # define FLAGS_SHARED 0x00
183 #endif
184 #define FLAGS_CLOCKRT 0x02
185 #define FLAGS_HAS_TIMEOUT 0x04
186
187 /*
188 * Priority Inheritance state:
189 */
190 struct futex_pi_state {
191 /*
192 * list of 'owned' pi_state instances - these have to be
193 * cleaned up in do_exit() if the task exits prematurely:
194 */
195 struct list_head list;
196
197 /*
198 * The PI object:
199 */
200 struct rt_mutex pi_mutex;
201
202 struct task_struct *owner;
203 refcount_t refcount;
204
205 union futex_key key;
206 } __randomize_layout;
207
208 /**
209 * struct futex_q - The hashed futex queue entry, one per waiting task
210 * @list: priority-sorted list of tasks waiting on this futex
211 * @task: the task waiting on the futex
212 * @lock_ptr: the hash bucket lock
213 * @key: the key the futex is hashed on
214 * @pi_state: optional priority inheritance state
215 * @rt_waiter: rt_waiter storage for use with requeue_pi
216 * @requeue_pi_key: the requeue_pi target futex key
217 * @bitset: bitset for the optional bitmasked wakeup
218 *
219 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
220 * we can wake only the relevant ones (hashed queues may be shared).
221 *
222 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
223 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
224 * The order of wakeup is always to make the first condition true, then
225 * the second.
226 *
227 * PI futexes are typically woken before they are removed from the hash list via
228 * the rt_mutex code. See unqueue_me_pi().
229 */
230 struct futex_q {
231 struct plist_node list;
232
233 struct task_struct *task;
234 spinlock_t *lock_ptr;
235 union futex_key key;
236 struct futex_pi_state *pi_state;
237 struct rt_mutex_waiter *rt_waiter;
238 union futex_key *requeue_pi_key;
239 u32 bitset;
240 } __randomize_layout;
241
242 static const struct futex_q futex_q_init = {
243 /* list gets initialized in queue_me()*/
244 .key = FUTEX_KEY_INIT,
245 .bitset = FUTEX_BITSET_MATCH_ANY
246 };
247
248 /*
249 * Hash buckets are shared by all the futex_keys that hash to the same
250 * location. Each key may have multiple futex_q structures, one for each task
251 * waiting on a futex.
252 */
253 struct futex_hash_bucket {
254 atomic_t waiters;
255 spinlock_t lock;
256 struct plist_head chain;
257 } ____cacheline_aligned_in_smp;
258
259 /*
260 * The base of the bucket array and its size are always used together
261 * (after initialization only in hash_futex()), so ensure that they
262 * reside in the same cacheline.
263 */
264 static struct {
265 struct futex_hash_bucket *queues;
266 unsigned long hashsize;
267 } __futex_data __read_mostly __aligned(2*sizeof(long));
268 #define futex_queues (__futex_data.queues)
269 #define futex_hashsize (__futex_data.hashsize)
270
271
272 /*
273 * Fault injections for futexes.
274 */
275 #ifdef CONFIG_FAIL_FUTEX
276
277 static struct {
278 struct fault_attr attr;
279
280 bool ignore_private;
281 } fail_futex = {
282 .attr = FAULT_ATTR_INITIALIZER,
283 .ignore_private = false,
284 };
285
286 static int __init setup_fail_futex(char *str)
287 {
288 return setup_fault_attr(&fail_futex.attr, str);
289 }
290 __setup("fail_futex=", setup_fail_futex);
291
292 static bool should_fail_futex(bool fshared)
293 {
294 if (fail_futex.ignore_private && !fshared)
295 return false;
296
297 return should_fail(&fail_futex.attr, 1);
298 }
299
300 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
301
302 static int __init fail_futex_debugfs(void)
303 {
304 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
305 struct dentry *dir;
306
307 dir = fault_create_debugfs_attr("fail_futex", NULL,
308 &fail_futex.attr);
309 if (IS_ERR(dir))
310 return PTR_ERR(dir);
311
312 debugfs_create_bool("ignore-private", mode, dir,
313 &fail_futex.ignore_private);
314 return 0;
315 }
316
317 late_initcall(fail_futex_debugfs);
318
319 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
320
321 #else
322 static inline bool should_fail_futex(bool fshared)
323 {
324 return false;
325 }
326 #endif /* CONFIG_FAIL_FUTEX */
327
328 static inline void futex_get_mm(union futex_key *key)
329 {
330 mmgrab(key->private.mm);
331 /*
332 * Ensure futex_get_mm() implies a full barrier such that
333 * get_futex_key() implies a full barrier. This is relied upon
334 * as smp_mb(); (B), see the ordering comment above.
335 */
336 smp_mb__after_atomic();
337 }
338
339 /*
340 * Reflects a new waiter being added to the waitqueue.
341 */
342 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
343 {
344 #ifdef CONFIG_SMP
345 atomic_inc(&hb->waiters);
346 /*
347 * Full barrier (A), see the ordering comment above.
348 */
349 smp_mb__after_atomic();
350 #endif
351 }
352
353 /*
354 * Reflects a waiter being removed from the waitqueue by wakeup
355 * paths.
356 */
357 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
358 {
359 #ifdef CONFIG_SMP
360 atomic_dec(&hb->waiters);
361 #endif
362 }
363
364 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
365 {
366 #ifdef CONFIG_SMP
367 return atomic_read(&hb->waiters);
368 #else
369 return 1;
370 #endif
371 }
372
373 /**
374 * hash_futex - Return the hash bucket in the global hash
375 * @key: Pointer to the futex key for which the hash is calculated
376 *
377 * We hash on the keys returned from get_futex_key (see below) and return the
378 * corresponding hash bucket in the global hash.
379 */
380 static struct futex_hash_bucket *hash_futex(union futex_key *key)
381 {
382 u32 hash = jhash2((u32*)&key->both.word,
383 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
384 key->both.offset);
385 return &futex_queues[hash & (futex_hashsize - 1)];
386 }
387
388
389 /**
390 * match_futex - Check whether two futex keys are equal
391 * @key1: Pointer to key1
392 * @key2: Pointer to key2
393 *
394 * Return 1 if two futex_keys are equal, 0 otherwise.
395 */
396 static inline int match_futex(union futex_key *key1, union futex_key *key2)
397 {
398 return (key1 && key2
399 && key1->both.word == key2->both.word
400 && key1->both.ptr == key2->both.ptr
401 && key1->both.offset == key2->both.offset);
402 }
403
404 /*
405 * Take a reference to the resource addressed by a key.
406 * Can be called while holding spinlocks.
407 *
408 */
409 static void get_futex_key_refs(union futex_key *key)
410 {
411 if (!key->both.ptr)
412 return;
413
414 /*
415 * On MMU less systems futexes are always "private" as there is no per
416 * process address space. We need the smp wmb nevertheless - yes,
417 * arch/blackfin has MMU less SMP ...
418 */
419 if (!IS_ENABLED(CONFIG_MMU)) {
420 smp_mb(); /* explicit smp_mb(); (B) */
421 return;
422 }
423
424 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
425 case FUT_OFF_INODE:
426 ihold(key->shared.inode); /* implies smp_mb(); (B) */
427 break;
428 case FUT_OFF_MMSHARED:
429 futex_get_mm(key); /* implies smp_mb(); (B) */
430 break;
431 default:
432 /*
433 * Private futexes do not hold reference on an inode or
434 * mm, therefore the only purpose of calling get_futex_key_refs
435 * is because we need the barrier for the lockless waiter check.
436 */
437 smp_mb(); /* explicit smp_mb(); (B) */
438 }
439 }
440
441 /*
442 * Drop a reference to the resource addressed by a key.
443 * The hash bucket spinlock must not be held. This is
444 * a no-op for private futexes, see comment in the get
445 * counterpart.
446 */
447 static void drop_futex_key_refs(union futex_key *key)
448 {
449 if (!key->both.ptr) {
450 /* If we're here then we tried to put a key we failed to get */
451 WARN_ON_ONCE(1);
452 return;
453 }
454
455 if (!IS_ENABLED(CONFIG_MMU))
456 return;
457
458 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
459 case FUT_OFF_INODE:
460 iput(key->shared.inode);
461 break;
462 case FUT_OFF_MMSHARED:
463 mmdrop(key->private.mm);
464 break;
465 }
466 }
467
468 enum futex_access {
469 FUTEX_READ,
470 FUTEX_WRITE
471 };
472
473 /**
474 * futex_setup_timer - set up the sleeping hrtimer.
475 * @time: ptr to the given timeout value
476 * @timeout: the hrtimer_sleeper structure to be set up
477 * @flags: futex flags
478 * @range_ns: optional range in ns
479 *
480 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
481 * value given
482 */
483 static inline struct hrtimer_sleeper *
484 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
485 int flags, u64 range_ns)
486 {
487 if (!time)
488 return NULL;
489
490 hrtimer_init_on_stack(&timeout->timer, (flags & FLAGS_CLOCKRT) ?
491 CLOCK_REALTIME : CLOCK_MONOTONIC,
492 HRTIMER_MODE_ABS);
493 hrtimer_init_sleeper(timeout, current);
494
495 /*
496 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
497 * effectively the same as calling hrtimer_set_expires().
498 */
499 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
500
501 return timeout;
502 }
503
504 /**
505 * get_futex_key() - Get parameters which are the keys for a futex
506 * @uaddr: virtual address of the futex
507 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
508 * @key: address where result is stored.
509 * @rw: mapping needs to be read/write (values: FUTEX_READ,
510 * FUTEX_WRITE)
511 *
512 * Return: a negative error code or 0
513 *
514 * The key words are stored in @key on success.
515 *
516 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
517 * offset_within_page). For private mappings, it's (uaddr, current->mm).
518 * We can usually work out the index without swapping in the page.
519 *
520 * lock_page() might sleep, the caller should not hold a spinlock.
521 */
522 static int
523 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
524 {
525 unsigned long address = (unsigned long)uaddr;
526 struct mm_struct *mm = current->mm;
527 struct page *page, *tail;
528 struct address_space *mapping;
529 int err, ro = 0;
530
531 /*
532 * The futex address must be "naturally" aligned.
533 */
534 key->both.offset = address % PAGE_SIZE;
535 if (unlikely((address % sizeof(u32)) != 0))
536 return -EINVAL;
537 address -= key->both.offset;
538
539 if (unlikely(!access_ok(uaddr, sizeof(u32))))
540 return -EFAULT;
541
542 if (unlikely(should_fail_futex(fshared)))
543 return -EFAULT;
544
545 /*
546 * PROCESS_PRIVATE futexes are fast.
547 * As the mm cannot disappear under us and the 'key' only needs
548 * virtual address, we dont even have to find the underlying vma.
549 * Note : We do have to check 'uaddr' is a valid user address,
550 * but access_ok() should be faster than find_vma()
551 */
552 if (!fshared) {
553 key->private.mm = mm;
554 key->private.address = address;
555 get_futex_key_refs(key); /* implies smp_mb(); (B) */
556 return 0;
557 }
558
559 again:
560 /* Ignore any VERIFY_READ mapping (futex common case) */
561 if (unlikely(should_fail_futex(fshared)))
562 return -EFAULT;
563
564 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
565 /*
566 * If write access is not required (eg. FUTEX_WAIT), try
567 * and get read-only access.
568 */
569 if (err == -EFAULT && rw == FUTEX_READ) {
570 err = get_user_pages_fast(address, 1, 0, &page);
571 ro = 1;
572 }
573 if (err < 0)
574 return err;
575 else
576 err = 0;
577
578 /*
579 * The treatment of mapping from this point on is critical. The page
580 * lock protects many things but in this context the page lock
581 * stabilizes mapping, prevents inode freeing in the shared
582 * file-backed region case and guards against movement to swap cache.
583 *
584 * Strictly speaking the page lock is not needed in all cases being
585 * considered here and page lock forces unnecessarily serialization
586 * From this point on, mapping will be re-verified if necessary and
587 * page lock will be acquired only if it is unavoidable
588 *
589 * Mapping checks require the head page for any compound page so the
590 * head page and mapping is looked up now. For anonymous pages, it
591 * does not matter if the page splits in the future as the key is
592 * based on the address. For filesystem-backed pages, the tail is
593 * required as the index of the page determines the key. For
594 * base pages, there is no tail page and tail == page.
595 */
596 tail = page;
597 page = compound_head(page);
598 mapping = READ_ONCE(page->mapping);
599
600 /*
601 * If page->mapping is NULL, then it cannot be a PageAnon
602 * page; but it might be the ZERO_PAGE or in the gate area or
603 * in a special mapping (all cases which we are happy to fail);
604 * or it may have been a good file page when get_user_pages_fast
605 * found it, but truncated or holepunched or subjected to
606 * invalidate_complete_page2 before we got the page lock (also
607 * cases which we are happy to fail). And we hold a reference,
608 * so refcount care in invalidate_complete_page's remove_mapping
609 * prevents drop_caches from setting mapping to NULL beneath us.
610 *
611 * The case we do have to guard against is when memory pressure made
612 * shmem_writepage move it from filecache to swapcache beneath us:
613 * an unlikely race, but we do need to retry for page->mapping.
614 */
615 if (unlikely(!mapping)) {
616 int shmem_swizzled;
617
618 /*
619 * Page lock is required to identify which special case above
620 * applies. If this is really a shmem page then the page lock
621 * will prevent unexpected transitions.
622 */
623 lock_page(page);
624 shmem_swizzled = PageSwapCache(page) || page->mapping;
625 unlock_page(page);
626 put_page(page);
627
628 if (shmem_swizzled)
629 goto again;
630
631 return -EFAULT;
632 }
633
634 /*
635 * Private mappings are handled in a simple way.
636 *
637 * If the futex key is stored on an anonymous page, then the associated
638 * object is the mm which is implicitly pinned by the calling process.
639 *
640 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
641 * it's a read-only handle, it's expected that futexes attach to
642 * the object not the particular process.
643 */
644 if (PageAnon(page)) {
645 /*
646 * A RO anonymous page will never change and thus doesn't make
647 * sense for futex operations.
648 */
649 if (unlikely(should_fail_futex(fshared)) || ro) {
650 err = -EFAULT;
651 goto out;
652 }
653
654 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
655 key->private.mm = mm;
656 key->private.address = address;
657
658 get_futex_key_refs(key); /* implies smp_mb(); (B) */
659
660 } else {
661 struct inode *inode;
662
663 /*
664 * The associated futex object in this case is the inode and
665 * the page->mapping must be traversed. Ordinarily this should
666 * be stabilised under page lock but it's not strictly
667 * necessary in this case as we just want to pin the inode, not
668 * update the radix tree or anything like that.
669 *
670 * The RCU read lock is taken as the inode is finally freed
671 * under RCU. If the mapping still matches expectations then the
672 * mapping->host can be safely accessed as being a valid inode.
673 */
674 rcu_read_lock();
675
676 if (READ_ONCE(page->mapping) != mapping) {
677 rcu_read_unlock();
678 put_page(page);
679
680 goto again;
681 }
682
683 inode = READ_ONCE(mapping->host);
684 if (!inode) {
685 rcu_read_unlock();
686 put_page(page);
687
688 goto again;
689 }
690
691 /*
692 * Take a reference unless it is about to be freed. Previously
693 * this reference was taken by ihold under the page lock
694 * pinning the inode in place so i_lock was unnecessary. The
695 * only way for this check to fail is if the inode was
696 * truncated in parallel which is almost certainly an
697 * application bug. In such a case, just retry.
698 *
699 * We are not calling into get_futex_key_refs() in file-backed
700 * cases, therefore a successful atomic_inc return below will
701 * guarantee that get_futex_key() will still imply smp_mb(); (B).
702 */
703 if (!atomic_inc_not_zero(&inode->i_count)) {
704 rcu_read_unlock();
705 put_page(page);
706
707 goto again;
708 }
709
710 /* Should be impossible but lets be paranoid for now */
711 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
712 err = -EFAULT;
713 rcu_read_unlock();
714 iput(inode);
715
716 goto out;
717 }
718
719 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
720 key->shared.inode = inode;
721 key->shared.pgoff = basepage_index(tail);
722 rcu_read_unlock();
723 }
724
725 out:
726 put_page(page);
727 return err;
728 }
729
730 static inline void put_futex_key(union futex_key *key)
731 {
732 drop_futex_key_refs(key);
733 }
734
735 /**
736 * fault_in_user_writeable() - Fault in user address and verify RW access
737 * @uaddr: pointer to faulting user space address
738 *
739 * Slow path to fixup the fault we just took in the atomic write
740 * access to @uaddr.
741 *
742 * We have no generic implementation of a non-destructive write to the
743 * user address. We know that we faulted in the atomic pagefault
744 * disabled section so we can as well avoid the #PF overhead by
745 * calling get_user_pages() right away.
746 */
747 static int fault_in_user_writeable(u32 __user *uaddr)
748 {
749 struct mm_struct *mm = current->mm;
750 int ret;
751
752 down_read(&mm->mmap_sem);
753 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
754 FAULT_FLAG_WRITE, NULL);
755 up_read(&mm->mmap_sem);
756
757 return ret < 0 ? ret : 0;
758 }
759
760 /**
761 * futex_top_waiter() - Return the highest priority waiter on a futex
762 * @hb: the hash bucket the futex_q's reside in
763 * @key: the futex key (to distinguish it from other futex futex_q's)
764 *
765 * Must be called with the hb lock held.
766 */
767 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
768 union futex_key *key)
769 {
770 struct futex_q *this;
771
772 plist_for_each_entry(this, &hb->chain, list) {
773 if (match_futex(&this->key, key))
774 return this;
775 }
776 return NULL;
777 }
778
779 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
780 u32 uval, u32 newval)
781 {
782 int ret;
783
784 pagefault_disable();
785 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
786 pagefault_enable();
787
788 return ret;
789 }
790
791 static int get_futex_value_locked(u32 *dest, u32 __user *from)
792 {
793 int ret;
794
795 pagefault_disable();
796 ret = __get_user(*dest, from);
797 pagefault_enable();
798
799 return ret ? -EFAULT : 0;
800 }
801
802
803 /*
804 * PI code:
805 */
806 static int refill_pi_state_cache(void)
807 {
808 struct futex_pi_state *pi_state;
809
810 if (likely(current->pi_state_cache))
811 return 0;
812
813 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
814
815 if (!pi_state)
816 return -ENOMEM;
817
818 INIT_LIST_HEAD(&pi_state->list);
819 /* pi_mutex gets initialized later */
820 pi_state->owner = NULL;
821 refcount_set(&pi_state->refcount, 1);
822 pi_state->key = FUTEX_KEY_INIT;
823
824 current->pi_state_cache = pi_state;
825
826 return 0;
827 }
828
829 static struct futex_pi_state *alloc_pi_state(void)
830 {
831 struct futex_pi_state *pi_state = current->pi_state_cache;
832
833 WARN_ON(!pi_state);
834 current->pi_state_cache = NULL;
835
836 return pi_state;
837 }
838
839 static void get_pi_state(struct futex_pi_state *pi_state)
840 {
841 WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
842 }
843
844 /*
845 * Drops a reference to the pi_state object and frees or caches it
846 * when the last reference is gone.
847 */
848 static void put_pi_state(struct futex_pi_state *pi_state)
849 {
850 if (!pi_state)
851 return;
852
853 if (!refcount_dec_and_test(&pi_state->refcount))
854 return;
855
856 /*
857 * If pi_state->owner is NULL, the owner is most probably dying
858 * and has cleaned up the pi_state already
859 */
860 if (pi_state->owner) {
861 struct task_struct *owner;
862
863 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
864 owner = pi_state->owner;
865 if (owner) {
866 raw_spin_lock(&owner->pi_lock);
867 list_del_init(&pi_state->list);
868 raw_spin_unlock(&owner->pi_lock);
869 }
870 rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
871 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
872 }
873
874 if (current->pi_state_cache) {
875 kfree(pi_state);
876 } else {
877 /*
878 * pi_state->list is already empty.
879 * clear pi_state->owner.
880 * refcount is at 0 - put it back to 1.
881 */
882 pi_state->owner = NULL;
883 refcount_set(&pi_state->refcount, 1);
884 current->pi_state_cache = pi_state;
885 }
886 }
887
888 #ifdef CONFIG_FUTEX_PI
889
890 /*
891 * This task is holding PI mutexes at exit time => bad.
892 * Kernel cleans up PI-state, but userspace is likely hosed.
893 * (Robust-futex cleanup is separate and might save the day for userspace.)
894 */
895 void exit_pi_state_list(struct task_struct *curr)
896 {
897 struct list_head *next, *head = &curr->pi_state_list;
898 struct futex_pi_state *pi_state;
899 struct futex_hash_bucket *hb;
900 union futex_key key = FUTEX_KEY_INIT;
901
902 if (!futex_cmpxchg_enabled)
903 return;
904 /*
905 * We are a ZOMBIE and nobody can enqueue itself on
906 * pi_state_list anymore, but we have to be careful
907 * versus waiters unqueueing themselves:
908 */
909 raw_spin_lock_irq(&curr->pi_lock);
910 while (!list_empty(head)) {
911 next = head->next;
912 pi_state = list_entry(next, struct futex_pi_state, list);
913 key = pi_state->key;
914 hb = hash_futex(&key);
915
916 /*
917 * We can race against put_pi_state() removing itself from the
918 * list (a waiter going away). put_pi_state() will first
919 * decrement the reference count and then modify the list, so
920 * its possible to see the list entry but fail this reference
921 * acquire.
922 *
923 * In that case; drop the locks to let put_pi_state() make
924 * progress and retry the loop.
925 */
926 if (!refcount_inc_not_zero(&pi_state->refcount)) {
927 raw_spin_unlock_irq(&curr->pi_lock);
928 cpu_relax();
929 raw_spin_lock_irq(&curr->pi_lock);
930 continue;
931 }
932 raw_spin_unlock_irq(&curr->pi_lock);
933
934 spin_lock(&hb->lock);
935 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
936 raw_spin_lock(&curr->pi_lock);
937 /*
938 * We dropped the pi-lock, so re-check whether this
939 * task still owns the PI-state:
940 */
941 if (head->next != next) {
942 /* retain curr->pi_lock for the loop invariant */
943 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
944 spin_unlock(&hb->lock);
945 put_pi_state(pi_state);
946 continue;
947 }
948
949 WARN_ON(pi_state->owner != curr);
950 WARN_ON(list_empty(&pi_state->list));
951 list_del_init(&pi_state->list);
952 pi_state->owner = NULL;
953
954 raw_spin_unlock(&curr->pi_lock);
955 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
956 spin_unlock(&hb->lock);
957
958 rt_mutex_futex_unlock(&pi_state->pi_mutex);
959 put_pi_state(pi_state);
960
961 raw_spin_lock_irq(&curr->pi_lock);
962 }
963 raw_spin_unlock_irq(&curr->pi_lock);
964 }
965
966 #endif
967
968 /*
969 * We need to check the following states:
970 *
971 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
972 *
973 * [1] NULL | --- | --- | 0 | 0/1 | Valid
974 * [2] NULL | --- | --- | >0 | 0/1 | Valid
975 *
976 * [3] Found | NULL | -- | Any | 0/1 | Invalid
977 *
978 * [4] Found | Found | NULL | 0 | 1 | Valid
979 * [5] Found | Found | NULL | >0 | 1 | Invalid
980 *
981 * [6] Found | Found | task | 0 | 1 | Valid
982 *
983 * [7] Found | Found | NULL | Any | 0 | Invalid
984 *
985 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
986 * [9] Found | Found | task | 0 | 0 | Invalid
987 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
988 *
989 * [1] Indicates that the kernel can acquire the futex atomically. We
990 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
991 *
992 * [2] Valid, if TID does not belong to a kernel thread. If no matching
993 * thread is found then it indicates that the owner TID has died.
994 *
995 * [3] Invalid. The waiter is queued on a non PI futex
996 *
997 * [4] Valid state after exit_robust_list(), which sets the user space
998 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
999 *
1000 * [5] The user space value got manipulated between exit_robust_list()
1001 * and exit_pi_state_list()
1002 *
1003 * [6] Valid state after exit_pi_state_list() which sets the new owner in
1004 * the pi_state but cannot access the user space value.
1005 *
1006 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
1007 *
1008 * [8] Owner and user space value match
1009 *
1010 * [9] There is no transient state which sets the user space TID to 0
1011 * except exit_robust_list(), but this is indicated by the
1012 * FUTEX_OWNER_DIED bit. See [4]
1013 *
1014 * [10] There is no transient state which leaves owner and user space
1015 * TID out of sync.
1016 *
1017 *
1018 * Serialization and lifetime rules:
1019 *
1020 * hb->lock:
1021 *
1022 * hb -> futex_q, relation
1023 * futex_q -> pi_state, relation
1024 *
1025 * (cannot be raw because hb can contain arbitrary amount
1026 * of futex_q's)
1027 *
1028 * pi_mutex->wait_lock:
1029 *
1030 * {uval, pi_state}
1031 *
1032 * (and pi_mutex 'obviously')
1033 *
1034 * p->pi_lock:
1035 *
1036 * p->pi_state_list -> pi_state->list, relation
1037 *
1038 * pi_state->refcount:
1039 *
1040 * pi_state lifetime
1041 *
1042 *
1043 * Lock order:
1044 *
1045 * hb->lock
1046 * pi_mutex->wait_lock
1047 * p->pi_lock
1048 *
1049 */
1050
1051 /*
1052 * Validate that the existing waiter has a pi_state and sanity check
1053 * the pi_state against the user space value. If correct, attach to
1054 * it.
1055 */
1056 static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1057 struct futex_pi_state *pi_state,
1058 struct futex_pi_state **ps)
1059 {
1060 pid_t pid = uval & FUTEX_TID_MASK;
1061 u32 uval2;
1062 int ret;
1063
1064 /*
1065 * Userspace might have messed up non-PI and PI futexes [3]
1066 */
1067 if (unlikely(!pi_state))
1068 return -EINVAL;
1069
1070 /*
1071 * We get here with hb->lock held, and having found a
1072 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1073 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1074 * which in turn means that futex_lock_pi() still has a reference on
1075 * our pi_state.
1076 *
1077 * The waiter holding a reference on @pi_state also protects against
1078 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1079 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1080 * free pi_state before we can take a reference ourselves.
1081 */
1082 WARN_ON(!refcount_read(&pi_state->refcount));
1083
1084 /*
1085 * Now that we have a pi_state, we can acquire wait_lock
1086 * and do the state validation.
1087 */
1088 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1089
1090 /*
1091 * Since {uval, pi_state} is serialized by wait_lock, and our current
1092 * uval was read without holding it, it can have changed. Verify it
1093 * still is what we expect it to be, otherwise retry the entire
1094 * operation.
1095 */
1096 if (get_futex_value_locked(&uval2, uaddr))
1097 goto out_efault;
1098
1099 if (uval != uval2)
1100 goto out_eagain;
1101
1102 /*
1103 * Handle the owner died case:
1104 */
1105 if (uval & FUTEX_OWNER_DIED) {
1106 /*
1107 * exit_pi_state_list sets owner to NULL and wakes the
1108 * topmost waiter. The task which acquires the
1109 * pi_state->rt_mutex will fixup owner.
1110 */
1111 if (!pi_state->owner) {
1112 /*
1113 * No pi state owner, but the user space TID
1114 * is not 0. Inconsistent state. [5]
1115 */
1116 if (pid)
1117 goto out_einval;
1118 /*
1119 * Take a ref on the state and return success. [4]
1120 */
1121 goto out_attach;
1122 }
1123
1124 /*
1125 * If TID is 0, then either the dying owner has not
1126 * yet executed exit_pi_state_list() or some waiter
1127 * acquired the rtmutex in the pi state, but did not
1128 * yet fixup the TID in user space.
1129 *
1130 * Take a ref on the state and return success. [6]
1131 */
1132 if (!pid)
1133 goto out_attach;
1134 } else {
1135 /*
1136 * If the owner died bit is not set, then the pi_state
1137 * must have an owner. [7]
1138 */
1139 if (!pi_state->owner)
1140 goto out_einval;
1141 }
1142
1143 /*
1144 * Bail out if user space manipulated the futex value. If pi
1145 * state exists then the owner TID must be the same as the
1146 * user space TID. [9/10]
1147 */
1148 if (pid != task_pid_vnr(pi_state->owner))
1149 goto out_einval;
1150
1151 out_attach:
1152 get_pi_state(pi_state);
1153 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1154 *ps = pi_state;
1155 return 0;
1156
1157 out_einval:
1158 ret = -EINVAL;
1159 goto out_error;
1160
1161 out_eagain:
1162 ret = -EAGAIN;
1163 goto out_error;
1164
1165 out_efault:
1166 ret = -EFAULT;
1167 goto out_error;
1168
1169 out_error:
1170 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1171 return ret;
1172 }
1173
1174 static int handle_exit_race(u32 __user *uaddr, u32 uval,
1175 struct task_struct *tsk)
1176 {
1177 u32 uval2;
1178
1179 /*
1180 * If PF_EXITPIDONE is not yet set, then try again.
1181 */
1182 if (tsk && !(tsk->flags & PF_EXITPIDONE))
1183 return -EAGAIN;
1184
1185 /*
1186 * Reread the user space value to handle the following situation:
1187 *
1188 * CPU0 CPU1
1189 *
1190 * sys_exit() sys_futex()
1191 * do_exit() futex_lock_pi()
1192 * futex_lock_pi_atomic()
1193 * exit_signals(tsk) No waiters:
1194 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1195 * mm_release(tsk) Set waiter bit
1196 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1197 * Set owner died attach_to_pi_owner() {
1198 * *uaddr = 0xC0000000; tsk = get_task(PID);
1199 * } if (!tsk->flags & PF_EXITING) {
1200 * ... attach();
1201 * tsk->flags |= PF_EXITPIDONE; } else {
1202 * if (!(tsk->flags & PF_EXITPIDONE))
1203 * return -EAGAIN;
1204 * return -ESRCH; <--- FAIL
1205 * }
1206 *
1207 * Returning ESRCH unconditionally is wrong here because the
1208 * user space value has been changed by the exiting task.
1209 *
1210 * The same logic applies to the case where the exiting task is
1211 * already gone.
1212 */
1213 if (get_futex_value_locked(&uval2, uaddr))
1214 return -EFAULT;
1215
1216 /* If the user space value has changed, try again. */
1217 if (uval2 != uval)
1218 return -EAGAIN;
1219
1220 /*
1221 * The exiting task did not have a robust list, the robust list was
1222 * corrupted or the user space value in *uaddr is simply bogus.
1223 * Give up and tell user space.
1224 */
1225 return -ESRCH;
1226 }
1227
1228 /*
1229 * Lookup the task for the TID provided from user space and attach to
1230 * it after doing proper sanity checks.
1231 */
1232 static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1233 struct futex_pi_state **ps)
1234 {
1235 pid_t pid = uval & FUTEX_TID_MASK;
1236 struct futex_pi_state *pi_state;
1237 struct task_struct *p;
1238
1239 /*
1240 * We are the first waiter - try to look up the real owner and attach
1241 * the new pi_state to it, but bail out when TID = 0 [1]
1242 *
1243 * The !pid check is paranoid. None of the call sites should end up
1244 * with pid == 0, but better safe than sorry. Let the caller retry
1245 */
1246 if (!pid)
1247 return -EAGAIN;
1248 p = find_get_task_by_vpid(pid);
1249 if (!p)
1250 return handle_exit_race(uaddr, uval, NULL);
1251
1252 if (unlikely(p->flags & PF_KTHREAD)) {
1253 put_task_struct(p);
1254 return -EPERM;
1255 }
1256
1257 /*
1258 * We need to look at the task state flags to figure out,
1259 * whether the task is exiting. To protect against the do_exit
1260 * change of the task flags, we do this protected by
1261 * p->pi_lock:
1262 */
1263 raw_spin_lock_irq(&p->pi_lock);
1264 if (unlikely(p->flags & PF_EXITING)) {
1265 /*
1266 * The task is on the way out. When PF_EXITPIDONE is
1267 * set, we know that the task has finished the
1268 * cleanup:
1269 */
1270 int ret = handle_exit_race(uaddr, uval, p);
1271
1272 raw_spin_unlock_irq(&p->pi_lock);
1273 put_task_struct(p);
1274 return ret;
1275 }
1276
1277 /*
1278 * No existing pi state. First waiter. [2]
1279 *
1280 * This creates pi_state, we have hb->lock held, this means nothing can
1281 * observe this state, wait_lock is irrelevant.
1282 */
1283 pi_state = alloc_pi_state();
1284
1285 /*
1286 * Initialize the pi_mutex in locked state and make @p
1287 * the owner of it:
1288 */
1289 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1290
1291 /* Store the key for possible exit cleanups: */
1292 pi_state->key = *key;
1293
1294 WARN_ON(!list_empty(&pi_state->list));
1295 list_add(&pi_state->list, &p->pi_state_list);
1296 /*
1297 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1298 * because there is no concurrency as the object is not published yet.
1299 */
1300 pi_state->owner = p;
1301 raw_spin_unlock_irq(&p->pi_lock);
1302
1303 put_task_struct(p);
1304
1305 *ps = pi_state;
1306
1307 return 0;
1308 }
1309
1310 static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1311 struct futex_hash_bucket *hb,
1312 union futex_key *key, struct futex_pi_state **ps)
1313 {
1314 struct futex_q *top_waiter = futex_top_waiter(hb, key);
1315
1316 /*
1317 * If there is a waiter on that futex, validate it and
1318 * attach to the pi_state when the validation succeeds.
1319 */
1320 if (top_waiter)
1321 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1322
1323 /*
1324 * We are the first waiter - try to look up the owner based on
1325 * @uval and attach to it.
1326 */
1327 return attach_to_pi_owner(uaddr, uval, key, ps);
1328 }
1329
1330 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1331 {
1332 int err;
1333 u32 uninitialized_var(curval);
1334
1335 if (unlikely(should_fail_futex(true)))
1336 return -EFAULT;
1337
1338 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1339 if (unlikely(err))
1340 return err;
1341
1342 /* If user space value changed, let the caller retry */
1343 return curval != uval ? -EAGAIN : 0;
1344 }
1345
1346 /**
1347 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1348 * @uaddr: the pi futex user address
1349 * @hb: the pi futex hash bucket
1350 * @key: the futex key associated with uaddr and hb
1351 * @ps: the pi_state pointer where we store the result of the
1352 * lookup
1353 * @task: the task to perform the atomic lock work for. This will
1354 * be "current" except in the case of requeue pi.
1355 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1356 *
1357 * Return:
1358 * - 0 - ready to wait;
1359 * - 1 - acquired the lock;
1360 * - <0 - error
1361 *
1362 * The hb->lock and futex_key refs shall be held by the caller.
1363 */
1364 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1365 union futex_key *key,
1366 struct futex_pi_state **ps,
1367 struct task_struct *task, int set_waiters)
1368 {
1369 u32 uval, newval, vpid = task_pid_vnr(task);
1370 struct futex_q *top_waiter;
1371 int ret;
1372
1373 /*
1374 * Read the user space value first so we can validate a few
1375 * things before proceeding further.
1376 */
1377 if (get_futex_value_locked(&uval, uaddr))
1378 return -EFAULT;
1379
1380 if (unlikely(should_fail_futex(true)))
1381 return -EFAULT;
1382
1383 /*
1384 * Detect deadlocks.
1385 */
1386 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1387 return -EDEADLK;
1388
1389 if ((unlikely(should_fail_futex(true))))
1390 return -EDEADLK;
1391
1392 /*
1393 * Lookup existing state first. If it exists, try to attach to
1394 * its pi_state.
1395 */
1396 top_waiter = futex_top_waiter(hb, key);
1397 if (top_waiter)
1398 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1399
1400 /*
1401 * No waiter and user TID is 0. We are here because the
1402 * waiters or the owner died bit is set or called from
1403 * requeue_cmp_pi or for whatever reason something took the
1404 * syscall.
1405 */
1406 if (!(uval & FUTEX_TID_MASK)) {
1407 /*
1408 * We take over the futex. No other waiters and the user space
1409 * TID is 0. We preserve the owner died bit.
1410 */
1411 newval = uval & FUTEX_OWNER_DIED;
1412 newval |= vpid;
1413
1414 /* The futex requeue_pi code can enforce the waiters bit */
1415 if (set_waiters)
1416 newval |= FUTEX_WAITERS;
1417
1418 ret = lock_pi_update_atomic(uaddr, uval, newval);
1419 /* If the take over worked, return 1 */
1420 return ret < 0 ? ret : 1;
1421 }
1422
1423 /*
1424 * First waiter. Set the waiters bit before attaching ourself to
1425 * the owner. If owner tries to unlock, it will be forced into
1426 * the kernel and blocked on hb->lock.
1427 */
1428 newval = uval | FUTEX_WAITERS;
1429 ret = lock_pi_update_atomic(uaddr, uval, newval);
1430 if (ret)
1431 return ret;
1432 /*
1433 * If the update of the user space value succeeded, we try to
1434 * attach to the owner. If that fails, no harm done, we only
1435 * set the FUTEX_WAITERS bit in the user space variable.
1436 */
1437 return attach_to_pi_owner(uaddr, newval, key, ps);
1438 }
1439
1440 /**
1441 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1442 * @q: The futex_q to unqueue
1443 *
1444 * The q->lock_ptr must not be NULL and must be held by the caller.
1445 */
1446 static void __unqueue_futex(struct futex_q *q)
1447 {
1448 struct futex_hash_bucket *hb;
1449
1450 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1451 return;
1452 lockdep_assert_held(q->lock_ptr);
1453
1454 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1455 plist_del(&q->list, &hb->chain);
1456 hb_waiters_dec(hb);
1457 }
1458
1459 /*
1460 * The hash bucket lock must be held when this is called.
1461 * Afterwards, the futex_q must not be accessed. Callers
1462 * must ensure to later call wake_up_q() for the actual
1463 * wakeups to occur.
1464 */
1465 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1466 {
1467 struct task_struct *p = q->task;
1468
1469 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1470 return;
1471
1472 get_task_struct(p);
1473 __unqueue_futex(q);
1474 /*
1475 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1476 * is written, without taking any locks. This is possible in the event
1477 * of a spurious wakeup, for example. A memory barrier is required here
1478 * to prevent the following store to lock_ptr from getting ahead of the
1479 * plist_del in __unqueue_futex().
1480 */
1481 smp_store_release(&q->lock_ptr, NULL);
1482
1483 /*
1484 * Queue the task for later wakeup for after we've released
1485 * the hb->lock. wake_q_add() grabs reference to p.
1486 */
1487 wake_q_add_safe(wake_q, p);
1488 }
1489
1490 /*
1491 * Caller must hold a reference on @pi_state.
1492 */
1493 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1494 {
1495 u32 uninitialized_var(curval), newval;
1496 struct task_struct *new_owner;
1497 bool postunlock = false;
1498 DEFINE_WAKE_Q(wake_q);
1499 int ret = 0;
1500
1501 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1502 if (WARN_ON_ONCE(!new_owner)) {
1503 /*
1504 * As per the comment in futex_unlock_pi() this should not happen.
1505 *
1506 * When this happens, give up our locks and try again, giving
1507 * the futex_lock_pi() instance time to complete, either by
1508 * waiting on the rtmutex or removing itself from the futex
1509 * queue.
1510 */
1511 ret = -EAGAIN;
1512 goto out_unlock;
1513 }
1514
1515 /*
1516 * We pass it to the next owner. The WAITERS bit is always kept
1517 * enabled while there is PI state around. We cleanup the owner
1518 * died bit, because we are the owner.
1519 */
1520 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1521
1522 if (unlikely(should_fail_futex(true)))
1523 ret = -EFAULT;
1524
1525 ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1526 if (!ret && (curval != uval)) {
1527 /*
1528 * If a unconditional UNLOCK_PI operation (user space did not
1529 * try the TID->0 transition) raced with a waiter setting the
1530 * FUTEX_WAITERS flag between get_user() and locking the hash
1531 * bucket lock, retry the operation.
1532 */
1533 if ((FUTEX_TID_MASK & curval) == uval)
1534 ret = -EAGAIN;
1535 else
1536 ret = -EINVAL;
1537 }
1538
1539 if (ret)
1540 goto out_unlock;
1541
1542 /*
1543 * This is a point of no return; once we modify the uval there is no
1544 * going back and subsequent operations must not fail.
1545 */
1546
1547 raw_spin_lock(&pi_state->owner->pi_lock);
1548 WARN_ON(list_empty(&pi_state->list));
1549 list_del_init(&pi_state->list);
1550 raw_spin_unlock(&pi_state->owner->pi_lock);
1551
1552 raw_spin_lock(&new_owner->pi_lock);
1553 WARN_ON(!list_empty(&pi_state->list));
1554 list_add(&pi_state->list, &new_owner->pi_state_list);
1555 pi_state->owner = new_owner;
1556 raw_spin_unlock(&new_owner->pi_lock);
1557
1558 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1559
1560 out_unlock:
1561 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1562
1563 if (postunlock)
1564 rt_mutex_postunlock(&wake_q);
1565
1566 return ret;
1567 }
1568
1569 /*
1570 * Express the locking dependencies for lockdep:
1571 */
1572 static inline void
1573 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1574 {
1575 if (hb1 <= hb2) {
1576 spin_lock(&hb1->lock);
1577 if (hb1 < hb2)
1578 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1579 } else { /* hb1 > hb2 */
1580 spin_lock(&hb2->lock);
1581 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1582 }
1583 }
1584
1585 static inline void
1586 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1587 {
1588 spin_unlock(&hb1->lock);
1589 if (hb1 != hb2)
1590 spin_unlock(&hb2->lock);
1591 }
1592
1593 /*
1594 * Wake up waiters matching bitset queued on this futex (uaddr).
1595 */
1596 static int
1597 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1598 {
1599 struct futex_hash_bucket *hb;
1600 struct futex_q *this, *next;
1601 union futex_key key = FUTEX_KEY_INIT;
1602 int ret;
1603 DEFINE_WAKE_Q(wake_q);
1604
1605 if (!bitset)
1606 return -EINVAL;
1607
1608 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
1609 if (unlikely(ret != 0))
1610 goto out;
1611
1612 hb = hash_futex(&key);
1613
1614 /* Make sure we really have tasks to wakeup */
1615 if (!hb_waiters_pending(hb))
1616 goto out_put_key;
1617
1618 spin_lock(&hb->lock);
1619
1620 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1621 if (match_futex (&this->key, &key)) {
1622 if (this->pi_state || this->rt_waiter) {
1623 ret = -EINVAL;
1624 break;
1625 }
1626
1627 /* Check if one of the bits is set in both bitsets */
1628 if (!(this->bitset & bitset))
1629 continue;
1630
1631 mark_wake_futex(&wake_q, this);
1632 if (++ret >= nr_wake)
1633 break;
1634 }
1635 }
1636
1637 spin_unlock(&hb->lock);
1638 wake_up_q(&wake_q);
1639 out_put_key:
1640 put_futex_key(&key);
1641 out:
1642 return ret;
1643 }
1644
1645 static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1646 {
1647 unsigned int op = (encoded_op & 0x70000000) >> 28;
1648 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1649 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1650 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1651 int oldval, ret;
1652
1653 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1654 if (oparg < 0 || oparg > 31) {
1655 char comm[sizeof(current->comm)];
1656 /*
1657 * kill this print and return -EINVAL when userspace
1658 * is sane again
1659 */
1660 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1661 get_task_comm(comm, current), oparg);
1662 oparg &= 31;
1663 }
1664 oparg = 1 << oparg;
1665 }
1666
1667 if (!access_ok(uaddr, sizeof(u32)))
1668 return -EFAULT;
1669
1670 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1671 if (ret)
1672 return ret;
1673
1674 switch (cmp) {
1675 case FUTEX_OP_CMP_EQ:
1676 return oldval == cmparg;
1677 case FUTEX_OP_CMP_NE:
1678 return oldval != cmparg;
1679 case FUTEX_OP_CMP_LT:
1680 return oldval < cmparg;
1681 case FUTEX_OP_CMP_GE:
1682 return oldval >= cmparg;
1683 case FUTEX_OP_CMP_LE:
1684 return oldval <= cmparg;
1685 case FUTEX_OP_CMP_GT:
1686 return oldval > cmparg;
1687 default:
1688 return -ENOSYS;
1689 }
1690 }
1691
1692 /*
1693 * Wake up all waiters hashed on the physical page that is mapped
1694 * to this virtual address:
1695 */
1696 static int
1697 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1698 int nr_wake, int nr_wake2, int op)
1699 {
1700 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1701 struct futex_hash_bucket *hb1, *hb2;
1702 struct futex_q *this, *next;
1703 int ret, op_ret;
1704 DEFINE_WAKE_Q(wake_q);
1705
1706 retry:
1707 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1708 if (unlikely(ret != 0))
1709 goto out;
1710 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1711 if (unlikely(ret != 0))
1712 goto out_put_key1;
1713
1714 hb1 = hash_futex(&key1);
1715 hb2 = hash_futex(&key2);
1716
1717 retry_private:
1718 double_lock_hb(hb1, hb2);
1719 op_ret = futex_atomic_op_inuser(op, uaddr2);
1720 if (unlikely(op_ret < 0)) {
1721 double_unlock_hb(hb1, hb2);
1722
1723 if (!IS_ENABLED(CONFIG_MMU) ||
1724 unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
1725 /*
1726 * we don't get EFAULT from MMU faults if we don't have
1727 * an MMU, but we might get them from range checking
1728 */
1729 ret = op_ret;
1730 goto out_put_keys;
1731 }
1732
1733 if (op_ret == -EFAULT) {
1734 ret = fault_in_user_writeable(uaddr2);
1735 if (ret)
1736 goto out_put_keys;
1737 }
1738
1739 if (!(flags & FLAGS_SHARED)) {
1740 cond_resched();
1741 goto retry_private;
1742 }
1743
1744 put_futex_key(&key2);
1745 put_futex_key(&key1);
1746 cond_resched();
1747 goto retry;
1748 }
1749
1750 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1751 if (match_futex (&this->key, &key1)) {
1752 if (this->pi_state || this->rt_waiter) {
1753 ret = -EINVAL;
1754 goto out_unlock;
1755 }
1756 mark_wake_futex(&wake_q, this);
1757 if (++ret >= nr_wake)
1758 break;
1759 }
1760 }
1761
1762 if (op_ret > 0) {
1763 op_ret = 0;
1764 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1765 if (match_futex (&this->key, &key2)) {
1766 if (this->pi_state || this->rt_waiter) {
1767 ret = -EINVAL;
1768 goto out_unlock;
1769 }
1770 mark_wake_futex(&wake_q, this);
1771 if (++op_ret >= nr_wake2)
1772 break;
1773 }
1774 }
1775 ret += op_ret;
1776 }
1777
1778 out_unlock:
1779 double_unlock_hb(hb1, hb2);
1780 wake_up_q(&wake_q);
1781 out_put_keys:
1782 put_futex_key(&key2);
1783 out_put_key1:
1784 put_futex_key(&key1);
1785 out:
1786 return ret;
1787 }
1788
1789 /**
1790 * requeue_futex() - Requeue a futex_q from one hb to another
1791 * @q: the futex_q to requeue
1792 * @hb1: the source hash_bucket
1793 * @hb2: the target hash_bucket
1794 * @key2: the new key for the requeued futex_q
1795 */
1796 static inline
1797 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1798 struct futex_hash_bucket *hb2, union futex_key *key2)
1799 {
1800
1801 /*
1802 * If key1 and key2 hash to the same bucket, no need to
1803 * requeue.
1804 */
1805 if (likely(&hb1->chain != &hb2->chain)) {
1806 plist_del(&q->list, &hb1->chain);
1807 hb_waiters_dec(hb1);
1808 hb_waiters_inc(hb2);
1809 plist_add(&q->list, &hb2->chain);
1810 q->lock_ptr = &hb2->lock;
1811 }
1812 get_futex_key_refs(key2);
1813 q->key = *key2;
1814 }
1815
1816 /**
1817 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1818 * @q: the futex_q
1819 * @key: the key of the requeue target futex
1820 * @hb: the hash_bucket of the requeue target futex
1821 *
1822 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1823 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1824 * to the requeue target futex so the waiter can detect the wakeup on the right
1825 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1826 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1827 * to protect access to the pi_state to fixup the owner later. Must be called
1828 * with both q->lock_ptr and hb->lock held.
1829 */
1830 static inline
1831 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1832 struct futex_hash_bucket *hb)
1833 {
1834 get_futex_key_refs(key);
1835 q->key = *key;
1836
1837 __unqueue_futex(q);
1838
1839 WARN_ON(!q->rt_waiter);
1840 q->rt_waiter = NULL;
1841
1842 q->lock_ptr = &hb->lock;
1843
1844 wake_up_state(q->task, TASK_NORMAL);
1845 }
1846
1847 /**
1848 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1849 * @pifutex: the user address of the to futex
1850 * @hb1: the from futex hash bucket, must be locked by the caller
1851 * @hb2: the to futex hash bucket, must be locked by the caller
1852 * @key1: the from futex key
1853 * @key2: the to futex key
1854 * @ps: address to store the pi_state pointer
1855 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1856 *
1857 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1858 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1859 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1860 * hb1 and hb2 must be held by the caller.
1861 *
1862 * Return:
1863 * - 0 - failed to acquire the lock atomically;
1864 * - >0 - acquired the lock, return value is vpid of the top_waiter
1865 * - <0 - error
1866 */
1867 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1868 struct futex_hash_bucket *hb1,
1869 struct futex_hash_bucket *hb2,
1870 union futex_key *key1, union futex_key *key2,
1871 struct futex_pi_state **ps, int set_waiters)
1872 {
1873 struct futex_q *top_waiter = NULL;
1874 u32 curval;
1875 int ret, vpid;
1876
1877 if (get_futex_value_locked(&curval, pifutex))
1878 return -EFAULT;
1879
1880 if (unlikely(should_fail_futex(true)))
1881 return -EFAULT;
1882
1883 /*
1884 * Find the top_waiter and determine if there are additional waiters.
1885 * If the caller intends to requeue more than 1 waiter to pifutex,
1886 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1887 * as we have means to handle the possible fault. If not, don't set
1888 * the bit unecessarily as it will force the subsequent unlock to enter
1889 * the kernel.
1890 */
1891 top_waiter = futex_top_waiter(hb1, key1);
1892
1893 /* There are no waiters, nothing for us to do. */
1894 if (!top_waiter)
1895 return 0;
1896
1897 /* Ensure we requeue to the expected futex. */
1898 if (!match_futex(top_waiter->requeue_pi_key, key2))
1899 return -EINVAL;
1900
1901 /*
1902 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1903 * the contended case or if set_waiters is 1. The pi_state is returned
1904 * in ps in contended cases.
1905 */
1906 vpid = task_pid_vnr(top_waiter->task);
1907 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1908 set_waiters);
1909 if (ret == 1) {
1910 requeue_pi_wake_futex(top_waiter, key2, hb2);
1911 return vpid;
1912 }
1913 return ret;
1914 }
1915
1916 /**
1917 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1918 * @uaddr1: source futex user address
1919 * @flags: futex flags (FLAGS_SHARED, etc.)
1920 * @uaddr2: target futex user address
1921 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1922 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1923 * @cmpval: @uaddr1 expected value (or %NULL)
1924 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1925 * pi futex (pi to pi requeue is not supported)
1926 *
1927 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1928 * uaddr2 atomically on behalf of the top waiter.
1929 *
1930 * Return:
1931 * - >=0 - on success, the number of tasks requeued or woken;
1932 * - <0 - on error
1933 */
1934 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1935 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1936 u32 *cmpval, int requeue_pi)
1937 {
1938 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1939 int drop_count = 0, task_count = 0, ret;
1940 struct futex_pi_state *pi_state = NULL;
1941 struct futex_hash_bucket *hb1, *hb2;
1942 struct futex_q *this, *next;
1943 DEFINE_WAKE_Q(wake_q);
1944
1945 if (nr_wake < 0 || nr_requeue < 0)
1946 return -EINVAL;
1947
1948 /*
1949 * When PI not supported: return -ENOSYS if requeue_pi is true,
1950 * consequently the compiler knows requeue_pi is always false past
1951 * this point which will optimize away all the conditional code
1952 * further down.
1953 */
1954 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
1955 return -ENOSYS;
1956
1957 if (requeue_pi) {
1958 /*
1959 * Requeue PI only works on two distinct uaddrs. This
1960 * check is only valid for private futexes. See below.
1961 */
1962 if (uaddr1 == uaddr2)
1963 return -EINVAL;
1964
1965 /*
1966 * requeue_pi requires a pi_state, try to allocate it now
1967 * without any locks in case it fails.
1968 */
1969 if (refill_pi_state_cache())
1970 return -ENOMEM;
1971 /*
1972 * requeue_pi must wake as many tasks as it can, up to nr_wake
1973 * + nr_requeue, since it acquires the rt_mutex prior to
1974 * returning to userspace, so as to not leave the rt_mutex with
1975 * waiters and no owner. However, second and third wake-ups
1976 * cannot be predicted as they involve race conditions with the
1977 * first wake and a fault while looking up the pi_state. Both
1978 * pthread_cond_signal() and pthread_cond_broadcast() should
1979 * use nr_wake=1.
1980 */
1981 if (nr_wake != 1)
1982 return -EINVAL;
1983 }
1984
1985 retry:
1986 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1987 if (unlikely(ret != 0))
1988 goto out;
1989 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1990 requeue_pi ? FUTEX_WRITE : FUTEX_READ);
1991 if (unlikely(ret != 0))
1992 goto out_put_key1;
1993
1994 /*
1995 * The check above which compares uaddrs is not sufficient for
1996 * shared futexes. We need to compare the keys:
1997 */
1998 if (requeue_pi && match_futex(&key1, &key2)) {
1999 ret = -EINVAL;
2000 goto out_put_keys;
2001 }
2002
2003 hb1 = hash_futex(&key1);
2004 hb2 = hash_futex(&key2);
2005
2006 retry_private:
2007 hb_waiters_inc(hb2);
2008 double_lock_hb(hb1, hb2);
2009
2010 if (likely(cmpval != NULL)) {
2011 u32 curval;
2012
2013 ret = get_futex_value_locked(&curval, uaddr1);
2014
2015 if (unlikely(ret)) {
2016 double_unlock_hb(hb1, hb2);
2017 hb_waiters_dec(hb2);
2018
2019 ret = get_user(curval, uaddr1);
2020 if (ret)
2021 goto out_put_keys;
2022
2023 if (!(flags & FLAGS_SHARED))
2024 goto retry_private;
2025
2026 put_futex_key(&key2);
2027 put_futex_key(&key1);
2028 goto retry;
2029 }
2030 if (curval != *cmpval) {
2031 ret = -EAGAIN;
2032 goto out_unlock;
2033 }
2034 }
2035
2036 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2037 /*
2038 * Attempt to acquire uaddr2 and wake the top waiter. If we
2039 * intend to requeue waiters, force setting the FUTEX_WAITERS
2040 * bit. We force this here where we are able to easily handle
2041 * faults rather in the requeue loop below.
2042 */
2043 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2044 &key2, &pi_state, nr_requeue);
2045
2046 /*
2047 * At this point the top_waiter has either taken uaddr2 or is
2048 * waiting on it. If the former, then the pi_state will not
2049 * exist yet, look it up one more time to ensure we have a
2050 * reference to it. If the lock was taken, ret contains the
2051 * vpid of the top waiter task.
2052 * If the lock was not taken, we have pi_state and an initial
2053 * refcount on it. In case of an error we have nothing.
2054 */
2055 if (ret > 0) {
2056 WARN_ON(pi_state);
2057 drop_count++;
2058 task_count++;
2059 /*
2060 * If we acquired the lock, then the user space value
2061 * of uaddr2 should be vpid. It cannot be changed by
2062 * the top waiter as it is blocked on hb2 lock if it
2063 * tries to do so. If something fiddled with it behind
2064 * our back the pi state lookup might unearth it. So
2065 * we rather use the known value than rereading and
2066 * handing potential crap to lookup_pi_state.
2067 *
2068 * If that call succeeds then we have pi_state and an
2069 * initial refcount on it.
2070 */
2071 ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
2072 }
2073
2074 switch (ret) {
2075 case 0:
2076 /* We hold a reference on the pi state. */
2077 break;
2078
2079 /* If the above failed, then pi_state is NULL */
2080 case -EFAULT:
2081 double_unlock_hb(hb1, hb2);
2082 hb_waiters_dec(hb2);
2083 put_futex_key(&key2);
2084 put_futex_key(&key1);
2085 ret = fault_in_user_writeable(uaddr2);
2086 if (!ret)
2087 goto retry;
2088 goto out;
2089 case -EAGAIN:
2090 /*
2091 * Two reasons for this:
2092 * - Owner is exiting and we just wait for the
2093 * exit to complete.
2094 * - The user space value changed.
2095 */
2096 double_unlock_hb(hb1, hb2);
2097 hb_waiters_dec(hb2);
2098 put_futex_key(&key2);
2099 put_futex_key(&key1);
2100 cond_resched();
2101 goto retry;
2102 default:
2103 goto out_unlock;
2104 }
2105 }
2106
2107 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2108 if (task_count - nr_wake >= nr_requeue)
2109 break;
2110
2111 if (!match_futex(&this->key, &key1))
2112 continue;
2113
2114 /*
2115 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2116 * be paired with each other and no other futex ops.
2117 *
2118 * We should never be requeueing a futex_q with a pi_state,
2119 * which is awaiting a futex_unlock_pi().
2120 */
2121 if ((requeue_pi && !this->rt_waiter) ||
2122 (!requeue_pi && this->rt_waiter) ||
2123 this->pi_state) {
2124 ret = -EINVAL;
2125 break;
2126 }
2127
2128 /*
2129 * Wake nr_wake waiters. For requeue_pi, if we acquired the
2130 * lock, we already woke the top_waiter. If not, it will be
2131 * woken by futex_unlock_pi().
2132 */
2133 if (++task_count <= nr_wake && !requeue_pi) {
2134 mark_wake_futex(&wake_q, this);
2135 continue;
2136 }
2137
2138 /* Ensure we requeue to the expected futex for requeue_pi. */
2139 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
2140 ret = -EINVAL;
2141 break;
2142 }
2143
2144 /*
2145 * Requeue nr_requeue waiters and possibly one more in the case
2146 * of requeue_pi if we couldn't acquire the lock atomically.
2147 */
2148 if (requeue_pi) {
2149 /*
2150 * Prepare the waiter to take the rt_mutex. Take a
2151 * refcount on the pi_state and store the pointer in
2152 * the futex_q object of the waiter.
2153 */
2154 get_pi_state(pi_state);
2155 this->pi_state = pi_state;
2156 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2157 this->rt_waiter,
2158 this->task);
2159 if (ret == 1) {
2160 /*
2161 * We got the lock. We do neither drop the
2162 * refcount on pi_state nor clear
2163 * this->pi_state because the waiter needs the
2164 * pi_state for cleaning up the user space
2165 * value. It will drop the refcount after
2166 * doing so.
2167 */
2168 requeue_pi_wake_futex(this, &key2, hb2);
2169 drop_count++;
2170 continue;
2171 } else if (ret) {
2172 /*
2173 * rt_mutex_start_proxy_lock() detected a
2174 * potential deadlock when we tried to queue
2175 * that waiter. Drop the pi_state reference
2176 * which we took above and remove the pointer
2177 * to the state from the waiters futex_q
2178 * object.
2179 */
2180 this->pi_state = NULL;
2181 put_pi_state(pi_state);
2182 /*
2183 * We stop queueing more waiters and let user
2184 * space deal with the mess.
2185 */
2186 break;
2187 }
2188 }
2189 requeue_futex(this, hb1, hb2, &key2);
2190 drop_count++;
2191 }
2192
2193 /*
2194 * We took an extra initial reference to the pi_state either
2195 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2196 * need to drop it here again.
2197 */
2198 put_pi_state(pi_state);
2199
2200 out_unlock:
2201 double_unlock_hb(hb1, hb2);
2202 wake_up_q(&wake_q);
2203 hb_waiters_dec(hb2);
2204
2205 /*
2206 * drop_futex_key_refs() must be called outside the spinlocks. During
2207 * the requeue we moved futex_q's from the hash bucket at key1 to the
2208 * one at key2 and updated their key pointer. We no longer need to
2209 * hold the references to key1.
2210 */
2211 while (--drop_count >= 0)
2212 drop_futex_key_refs(&key1);
2213
2214 out_put_keys:
2215 put_futex_key(&key2);
2216 out_put_key1:
2217 put_futex_key(&key1);
2218 out:
2219 return ret ? ret : task_count;
2220 }
2221
2222 /* The key must be already stored in q->key. */
2223 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2224 __acquires(&hb->lock)
2225 {
2226 struct futex_hash_bucket *hb;
2227
2228 hb = hash_futex(&q->key);
2229
2230 /*
2231 * Increment the counter before taking the lock so that
2232 * a potential waker won't miss a to-be-slept task that is
2233 * waiting for the spinlock. This is safe as all queue_lock()
2234 * users end up calling queue_me(). Similarly, for housekeeping,
2235 * decrement the counter at queue_unlock() when some error has
2236 * occurred and we don't end up adding the task to the list.
2237 */
2238 hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2239
2240 q->lock_ptr = &hb->lock;
2241
2242 spin_lock(&hb->lock);
2243 return hb;
2244 }
2245
2246 static inline void
2247 queue_unlock(struct futex_hash_bucket *hb)
2248 __releases(&hb->lock)
2249 {
2250 spin_unlock(&hb->lock);
2251 hb_waiters_dec(hb);
2252 }
2253
2254 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2255 {
2256 int prio;
2257
2258 /*
2259 * The priority used to register this element is
2260 * - either the real thread-priority for the real-time threads
2261 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2262 * - or MAX_RT_PRIO for non-RT threads.
2263 * Thus, all RT-threads are woken first in priority order, and
2264 * the others are woken last, in FIFO order.
2265 */
2266 prio = min(current->normal_prio, MAX_RT_PRIO);
2267
2268 plist_node_init(&q->list, prio);
2269 plist_add(&q->list, &hb->chain);
2270 q->task = current;
2271 }
2272
2273 /**
2274 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2275 * @q: The futex_q to enqueue
2276 * @hb: The destination hash bucket
2277 *
2278 * The hb->lock must be held by the caller, and is released here. A call to
2279 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2280 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2281 * or nothing if the unqueue is done as part of the wake process and the unqueue
2282 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2283 * an example).
2284 */
2285 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2286 __releases(&hb->lock)
2287 {
2288 __queue_me(q, hb);
2289 spin_unlock(&hb->lock);
2290 }
2291
2292 /**
2293 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2294 * @q: The futex_q to unqueue
2295 *
2296 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2297 * be paired with exactly one earlier call to queue_me().
2298 *
2299 * Return:
2300 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2301 * - 0 - if the futex_q was already removed by the waking thread
2302 */
2303 static int unqueue_me(struct futex_q *q)
2304 {
2305 spinlock_t *lock_ptr;
2306 int ret = 0;
2307
2308 /* In the common case we don't take the spinlock, which is nice. */
2309 retry:
2310 /*
2311 * q->lock_ptr can change between this read and the following spin_lock.
2312 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2313 * optimizing lock_ptr out of the logic below.
2314 */
2315 lock_ptr = READ_ONCE(q->lock_ptr);
2316 if (lock_ptr != NULL) {
2317 spin_lock(lock_ptr);
2318 /*
2319 * q->lock_ptr can change between reading it and
2320 * spin_lock(), causing us to take the wrong lock. This
2321 * corrects the race condition.
2322 *
2323 * Reasoning goes like this: if we have the wrong lock,
2324 * q->lock_ptr must have changed (maybe several times)
2325 * between reading it and the spin_lock(). It can
2326 * change again after the spin_lock() but only if it was
2327 * already changed before the spin_lock(). It cannot,
2328 * however, change back to the original value. Therefore
2329 * we can detect whether we acquired the correct lock.
2330 */
2331 if (unlikely(lock_ptr != q->lock_ptr)) {
2332 spin_unlock(lock_ptr);
2333 goto retry;
2334 }
2335 __unqueue_futex(q);
2336
2337 BUG_ON(q->pi_state);
2338
2339 spin_unlock(lock_ptr);
2340 ret = 1;
2341 }
2342
2343 drop_futex_key_refs(&q->key);
2344 return ret;
2345 }
2346
2347 /*
2348 * PI futexes can not be requeued and must remove themself from the
2349 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2350 * and dropped here.
2351 */
2352 static void unqueue_me_pi(struct futex_q *q)
2353 __releases(q->lock_ptr)
2354 {
2355 __unqueue_futex(q);
2356
2357 BUG_ON(!q->pi_state);
2358 put_pi_state(q->pi_state);
2359 q->pi_state = NULL;
2360
2361 spin_unlock(q->lock_ptr);
2362 }
2363
2364 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2365 struct task_struct *argowner)
2366 {
2367 struct futex_pi_state *pi_state = q->pi_state;
2368 u32 uval, uninitialized_var(curval), newval;
2369 struct task_struct *oldowner, *newowner;
2370 u32 newtid;
2371 int ret, err = 0;
2372
2373 lockdep_assert_held(q->lock_ptr);
2374
2375 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2376
2377 oldowner = pi_state->owner;
2378
2379 /*
2380 * We are here because either:
2381 *
2382 * - we stole the lock and pi_state->owner needs updating to reflect
2383 * that (@argowner == current),
2384 *
2385 * or:
2386 *
2387 * - someone stole our lock and we need to fix things to point to the
2388 * new owner (@argowner == NULL).
2389 *
2390 * Either way, we have to replace the TID in the user space variable.
2391 * This must be atomic as we have to preserve the owner died bit here.
2392 *
2393 * Note: We write the user space value _before_ changing the pi_state
2394 * because we can fault here. Imagine swapped out pages or a fork
2395 * that marked all the anonymous memory readonly for cow.
2396 *
2397 * Modifying pi_state _before_ the user space value would leave the
2398 * pi_state in an inconsistent state when we fault here, because we
2399 * need to drop the locks to handle the fault. This might be observed
2400 * in the PID check in lookup_pi_state.
2401 */
2402 retry:
2403 if (!argowner) {
2404 if (oldowner != current) {
2405 /*
2406 * We raced against a concurrent self; things are
2407 * already fixed up. Nothing to do.
2408 */
2409 ret = 0;
2410 goto out_unlock;
2411 }
2412
2413 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2414 /* We got the lock after all, nothing to fix. */
2415 ret = 0;
2416 goto out_unlock;
2417 }
2418
2419 /*
2420 * Since we just failed the trylock; there must be an owner.
2421 */
2422 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2423 BUG_ON(!newowner);
2424 } else {
2425 WARN_ON_ONCE(argowner != current);
2426 if (oldowner == current) {
2427 /*
2428 * We raced against a concurrent self; things are
2429 * already fixed up. Nothing to do.
2430 */
2431 ret = 0;
2432 goto out_unlock;
2433 }
2434 newowner = argowner;
2435 }
2436
2437 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2438 /* Owner died? */
2439 if (!pi_state->owner)
2440 newtid |= FUTEX_OWNER_DIED;
2441
2442 err = get_futex_value_locked(&uval, uaddr);
2443 if (err)
2444 goto handle_err;
2445
2446 for (;;) {
2447 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2448
2449 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
2450 if (err)
2451 goto handle_err;
2452
2453 if (curval == uval)
2454 break;
2455 uval = curval;
2456 }
2457
2458 /*
2459 * We fixed up user space. Now we need to fix the pi_state
2460 * itself.
2461 */
2462 if (pi_state->owner != NULL) {
2463 raw_spin_lock(&pi_state->owner->pi_lock);
2464 WARN_ON(list_empty(&pi_state->list));
2465 list_del_init(&pi_state->list);
2466 raw_spin_unlock(&pi_state->owner->pi_lock);
2467 }
2468
2469 pi_state->owner = newowner;
2470
2471 raw_spin_lock(&newowner->pi_lock);
2472 WARN_ON(!list_empty(&pi_state->list));
2473 list_add(&pi_state->list, &newowner->pi_state_list);
2474 raw_spin_unlock(&newowner->pi_lock);
2475 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2476
2477 return 0;
2478
2479 /*
2480 * In order to reschedule or handle a page fault, we need to drop the
2481 * locks here. In the case of a fault, this gives the other task
2482 * (either the highest priority waiter itself or the task which stole
2483 * the rtmutex) the chance to try the fixup of the pi_state. So once we
2484 * are back from handling the fault we need to check the pi_state after
2485 * reacquiring the locks and before trying to do another fixup. When
2486 * the fixup has been done already we simply return.
2487 *
2488 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2489 * drop hb->lock since the caller owns the hb -> futex_q relation.
2490 * Dropping the pi_mutex->wait_lock requires the state revalidate.
2491 */
2492 handle_err:
2493 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2494 spin_unlock(q->lock_ptr);
2495
2496 switch (err) {
2497 case -EFAULT:
2498 ret = fault_in_user_writeable(uaddr);
2499 break;
2500
2501 case -EAGAIN:
2502 cond_resched();
2503 ret = 0;
2504 break;
2505
2506 default:
2507 WARN_ON_ONCE(1);
2508 ret = err;
2509 break;
2510 }
2511
2512 spin_lock(q->lock_ptr);
2513 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2514
2515 /*
2516 * Check if someone else fixed it for us:
2517 */
2518 if (pi_state->owner != oldowner) {
2519 ret = 0;
2520 goto out_unlock;
2521 }
2522
2523 if (ret)
2524 goto out_unlock;
2525
2526 goto retry;
2527
2528 out_unlock:
2529 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2530 return ret;
2531 }
2532
2533 static long futex_wait_restart(struct restart_block *restart);
2534
2535 /**
2536 * fixup_owner() - Post lock pi_state and corner case management
2537 * @uaddr: user address of the futex
2538 * @q: futex_q (contains pi_state and access to the rt_mutex)
2539 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2540 *
2541 * After attempting to lock an rt_mutex, this function is called to cleanup
2542 * the pi_state owner as well as handle race conditions that may allow us to
2543 * acquire the lock. Must be called with the hb lock held.
2544 *
2545 * Return:
2546 * - 1 - success, lock taken;
2547 * - 0 - success, lock not taken;
2548 * - <0 - on error (-EFAULT)
2549 */
2550 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2551 {
2552 int ret = 0;
2553
2554 if (locked) {
2555 /*
2556 * Got the lock. We might not be the anticipated owner if we
2557 * did a lock-steal - fix up the PI-state in that case:
2558 *
2559 * Speculative pi_state->owner read (we don't hold wait_lock);
2560 * since we own the lock pi_state->owner == current is the
2561 * stable state, anything else needs more attention.
2562 */
2563 if (q->pi_state->owner != current)
2564 ret = fixup_pi_state_owner(uaddr, q, current);
2565 goto out;
2566 }
2567
2568 /*
2569 * If we didn't get the lock; check if anybody stole it from us. In
2570 * that case, we need to fix up the uval to point to them instead of
2571 * us, otherwise bad things happen. [10]
2572 *
2573 * Another speculative read; pi_state->owner == current is unstable
2574 * but needs our attention.
2575 */
2576 if (q->pi_state->owner == current) {
2577 ret = fixup_pi_state_owner(uaddr, q, NULL);
2578 goto out;
2579 }
2580
2581 /*
2582 * Paranoia check. If we did not take the lock, then we should not be
2583 * the owner of the rt_mutex.
2584 */
2585 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
2586 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2587 "pi-state %p\n", ret,
2588 q->pi_state->pi_mutex.owner,
2589 q->pi_state->owner);
2590 }
2591
2592 out:
2593 return ret ? ret : locked;
2594 }
2595
2596 /**
2597 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2598 * @hb: the futex hash bucket, must be locked by the caller
2599 * @q: the futex_q to queue up on
2600 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2601 */
2602 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2603 struct hrtimer_sleeper *timeout)
2604 {
2605 /*
2606 * The task state is guaranteed to be set before another task can
2607 * wake it. set_current_state() is implemented using smp_store_mb() and
2608 * queue_me() calls spin_unlock() upon completion, both serializing
2609 * access to the hash list and forcing another memory barrier.
2610 */
2611 set_current_state(TASK_INTERRUPTIBLE);
2612 queue_me(q, hb);
2613
2614 /* Arm the timer */
2615 if (timeout)
2616 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
2617
2618 /*
2619 * If we have been removed from the hash list, then another task
2620 * has tried to wake us, and we can skip the call to schedule().
2621 */
2622 if (likely(!plist_node_empty(&q->list))) {
2623 /*
2624 * If the timer has already expired, current will already be
2625 * flagged for rescheduling. Only call schedule if there
2626 * is no timeout, or if it has yet to expire.
2627 */
2628 if (!timeout || timeout->task)
2629 freezable_schedule();
2630 }
2631 __set_current_state(TASK_RUNNING);
2632 }
2633
2634 /**
2635 * futex_wait_setup() - Prepare to wait on a futex
2636 * @uaddr: the futex userspace address
2637 * @val: the expected value
2638 * @flags: futex flags (FLAGS_SHARED, etc.)
2639 * @q: the associated futex_q
2640 * @hb: storage for hash_bucket pointer to be returned to caller
2641 *
2642 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2643 * compare it with the expected value. Handle atomic faults internally.
2644 * Return with the hb lock held and a q.key reference on success, and unlocked
2645 * with no q.key reference on failure.
2646 *
2647 * Return:
2648 * - 0 - uaddr contains val and hb has been locked;
2649 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2650 */
2651 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2652 struct futex_q *q, struct futex_hash_bucket **hb)
2653 {
2654 u32 uval;
2655 int ret;
2656
2657 /*
2658 * Access the page AFTER the hash-bucket is locked.
2659 * Order is important:
2660 *
2661 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2662 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2663 *
2664 * The basic logical guarantee of a futex is that it blocks ONLY
2665 * if cond(var) is known to be true at the time of blocking, for
2666 * any cond. If we locked the hash-bucket after testing *uaddr, that
2667 * would open a race condition where we could block indefinitely with
2668 * cond(var) false, which would violate the guarantee.
2669 *
2670 * On the other hand, we insert q and release the hash-bucket only
2671 * after testing *uaddr. This guarantees that futex_wait() will NOT
2672 * absorb a wakeup if *uaddr does not match the desired values
2673 * while the syscall executes.
2674 */
2675 retry:
2676 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2677 if (unlikely(ret != 0))
2678 return ret;
2679
2680 retry_private:
2681 *hb = queue_lock(q);
2682
2683 ret = get_futex_value_locked(&uval, uaddr);
2684
2685 if (ret) {
2686 queue_unlock(*hb);
2687
2688 ret = get_user(uval, uaddr);
2689 if (ret)
2690 goto out;
2691
2692 if (!(flags & FLAGS_SHARED))
2693 goto retry_private;
2694
2695 put_futex_key(&q->key);
2696 goto retry;
2697 }
2698
2699 if (uval != val) {
2700 queue_unlock(*hb);
2701 ret = -EWOULDBLOCK;
2702 }
2703
2704 out:
2705 if (ret)
2706 put_futex_key(&q->key);
2707 return ret;
2708 }
2709
2710 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2711 ktime_t *abs_time, u32 bitset)
2712 {
2713 struct hrtimer_sleeper timeout, *to;
2714 struct restart_block *restart;
2715 struct futex_hash_bucket *hb;
2716 struct futex_q q = futex_q_init;
2717 int ret;
2718
2719 if (!bitset)
2720 return -EINVAL;
2721 q.bitset = bitset;
2722
2723 to = futex_setup_timer(abs_time, &timeout, flags,
2724 current->timer_slack_ns);
2725 retry:
2726 /*
2727 * Prepare to wait on uaddr. On success, holds hb lock and increments
2728 * q.key refs.
2729 */
2730 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2731 if (ret)
2732 goto out;
2733
2734 /* queue_me and wait for wakeup, timeout, or a signal. */
2735 futex_wait_queue_me(hb, &q, to);
2736
2737 /* If we were woken (and unqueued), we succeeded, whatever. */
2738 ret = 0;
2739 /* unqueue_me() drops q.key ref */
2740 if (!unqueue_me(&q))
2741 goto out;
2742 ret = -ETIMEDOUT;
2743 if (to && !to->task)
2744 goto out;
2745
2746 /*
2747 * We expect signal_pending(current), but we might be the
2748 * victim of a spurious wakeup as well.
2749 */
2750 if (!signal_pending(current))
2751 goto retry;
2752
2753 ret = -ERESTARTSYS;
2754 if (!abs_time)
2755 goto out;
2756
2757 restart = &current->restart_block;
2758 restart->fn = futex_wait_restart;
2759 restart->futex.uaddr = uaddr;
2760 restart->futex.val = val;
2761 restart->futex.time = *abs_time;
2762 restart->futex.bitset = bitset;
2763 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2764
2765 ret = -ERESTART_RESTARTBLOCK;
2766
2767 out:
2768 if (to) {
2769 hrtimer_cancel(&to->timer);
2770 destroy_hrtimer_on_stack(&to->timer);
2771 }
2772 return ret;
2773 }
2774
2775
2776 static long futex_wait_restart(struct restart_block *restart)
2777 {
2778 u32 __user *uaddr = restart->futex.uaddr;
2779 ktime_t t, *tp = NULL;
2780
2781 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2782 t = restart->futex.time;
2783 tp = &t;
2784 }
2785 restart->fn = do_no_restart_syscall;
2786
2787 return (long)futex_wait(uaddr, restart->futex.flags,
2788 restart->futex.val, tp, restart->futex.bitset);
2789 }
2790
2791
2792 /*
2793 * Userspace tried a 0 -> TID atomic transition of the futex value
2794 * and failed. The kernel side here does the whole locking operation:
2795 * if there are waiters then it will block as a consequence of relying
2796 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2797 * a 0 value of the futex too.).
2798 *
2799 * Also serves as futex trylock_pi()'ing, and due semantics.
2800 */
2801 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2802 ktime_t *time, int trylock)
2803 {
2804 struct hrtimer_sleeper timeout, *to;
2805 struct futex_pi_state *pi_state = NULL;
2806 struct rt_mutex_waiter rt_waiter;
2807 struct futex_hash_bucket *hb;
2808 struct futex_q q = futex_q_init;
2809 int res, ret;
2810
2811 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2812 return -ENOSYS;
2813
2814 if (refill_pi_state_cache())
2815 return -ENOMEM;
2816
2817 to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
2818
2819 retry:
2820 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2821 if (unlikely(ret != 0))
2822 goto out;
2823
2824 retry_private:
2825 hb = queue_lock(&q);
2826
2827 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2828 if (unlikely(ret)) {
2829 /*
2830 * Atomic work succeeded and we got the lock,
2831 * or failed. Either way, we do _not_ block.
2832 */
2833 switch (ret) {
2834 case 1:
2835 /* We got the lock. */
2836 ret = 0;
2837 goto out_unlock_put_key;
2838 case -EFAULT:
2839 goto uaddr_faulted;
2840 case -EAGAIN:
2841 /*
2842 * Two reasons for this:
2843 * - Task is exiting and we just wait for the
2844 * exit to complete.
2845 * - The user space value changed.
2846 */
2847 queue_unlock(hb);
2848 put_futex_key(&q.key);
2849 cond_resched();
2850 goto retry;
2851 default:
2852 goto out_unlock_put_key;
2853 }
2854 }
2855
2856 WARN_ON(!q.pi_state);
2857
2858 /*
2859 * Only actually queue now that the atomic ops are done:
2860 */
2861 __queue_me(&q, hb);
2862
2863 if (trylock) {
2864 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2865 /* Fixup the trylock return value: */
2866 ret = ret ? 0 : -EWOULDBLOCK;
2867 goto no_block;
2868 }
2869
2870 rt_mutex_init_waiter(&rt_waiter);
2871
2872 /*
2873 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2874 * hold it while doing rt_mutex_start_proxy(), because then it will
2875 * include hb->lock in the blocking chain, even through we'll not in
2876 * fact hold it while blocking. This will lead it to report -EDEADLK
2877 * and BUG when futex_unlock_pi() interleaves with this.
2878 *
2879 * Therefore acquire wait_lock while holding hb->lock, but drop the
2880 * latter before calling __rt_mutex_start_proxy_lock(). This
2881 * interleaves with futex_unlock_pi() -- which does a similar lock
2882 * handoff -- such that the latter can observe the futex_q::pi_state
2883 * before __rt_mutex_start_proxy_lock() is done.
2884 */
2885 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2886 spin_unlock(q.lock_ptr);
2887 /*
2888 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
2889 * such that futex_unlock_pi() is guaranteed to observe the waiter when
2890 * it sees the futex_q::pi_state.
2891 */
2892 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2893 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2894
2895 if (ret) {
2896 if (ret == 1)
2897 ret = 0;
2898 goto cleanup;
2899 }
2900
2901 if (unlikely(to))
2902 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
2903
2904 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2905
2906 cleanup:
2907 spin_lock(q.lock_ptr);
2908 /*
2909 * If we failed to acquire the lock (deadlock/signal/timeout), we must
2910 * first acquire the hb->lock before removing the lock from the
2911 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
2912 * lists consistent.
2913 *
2914 * In particular; it is important that futex_unlock_pi() can not
2915 * observe this inconsistency.
2916 */
2917 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2918 ret = 0;
2919
2920 no_block:
2921 /*
2922 * Fixup the pi_state owner and possibly acquire the lock if we
2923 * haven't already.
2924 */
2925 res = fixup_owner(uaddr, &q, !ret);
2926 /*
2927 * If fixup_owner() returned an error, proprogate that. If it acquired
2928 * the lock, clear our -ETIMEDOUT or -EINTR.
2929 */
2930 if (res)
2931 ret = (res < 0) ? res : 0;
2932
2933 /*
2934 * If fixup_owner() faulted and was unable to handle the fault, unlock
2935 * it and return the fault to userspace.
2936 */
2937 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
2938 pi_state = q.pi_state;
2939 get_pi_state(pi_state);
2940 }
2941
2942 /* Unqueue and drop the lock */
2943 unqueue_me_pi(&q);
2944
2945 if (pi_state) {
2946 rt_mutex_futex_unlock(&pi_state->pi_mutex);
2947 put_pi_state(pi_state);
2948 }
2949
2950 goto out_put_key;
2951
2952 out_unlock_put_key:
2953 queue_unlock(hb);
2954
2955 out_put_key:
2956 put_futex_key(&q.key);
2957 out:
2958 if (to) {
2959 hrtimer_cancel(&to->timer);
2960 destroy_hrtimer_on_stack(&to->timer);
2961 }
2962 return ret != -EINTR ? ret : -ERESTARTNOINTR;
2963
2964 uaddr_faulted:
2965 queue_unlock(hb);
2966
2967 ret = fault_in_user_writeable(uaddr);
2968 if (ret)
2969 goto out_put_key;
2970
2971 if (!(flags & FLAGS_SHARED))
2972 goto retry_private;
2973
2974 put_futex_key(&q.key);
2975 goto retry;
2976 }
2977
2978 /*
2979 * Userspace attempted a TID -> 0 atomic transition, and failed.
2980 * This is the in-kernel slowpath: we look up the PI state (if any),
2981 * and do the rt-mutex unlock.
2982 */
2983 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2984 {
2985 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
2986 union futex_key key = FUTEX_KEY_INIT;
2987 struct futex_hash_bucket *hb;
2988 struct futex_q *top_waiter;
2989 int ret;
2990
2991 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2992 return -ENOSYS;
2993
2994 retry:
2995 if (get_user(uval, uaddr))
2996 return -EFAULT;
2997 /*
2998 * We release only a lock we actually own:
2999 */
3000 if ((uval & FUTEX_TID_MASK) != vpid)
3001 return -EPERM;
3002
3003 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
3004 if (ret)
3005 return ret;
3006
3007 hb = hash_futex(&key);
3008 spin_lock(&hb->lock);
3009
3010 /*
3011 * Check waiters first. We do not trust user space values at
3012 * all and we at least want to know if user space fiddled
3013 * with the futex value instead of blindly unlocking.
3014 */
3015 top_waiter = futex_top_waiter(hb, &key);
3016 if (top_waiter) {
3017 struct futex_pi_state *pi_state = top_waiter->pi_state;
3018
3019 ret = -EINVAL;
3020 if (!pi_state)
3021 goto out_unlock;
3022
3023 /*
3024 * If current does not own the pi_state then the futex is
3025 * inconsistent and user space fiddled with the futex value.
3026 */
3027 if (pi_state->owner != current)
3028 goto out_unlock;
3029
3030 get_pi_state(pi_state);
3031 /*
3032 * By taking wait_lock while still holding hb->lock, we ensure
3033 * there is no point where we hold neither; and therefore
3034 * wake_futex_pi() must observe a state consistent with what we
3035 * observed.
3036 *
3037 * In particular; this forces __rt_mutex_start_proxy() to
3038 * complete such that we're guaranteed to observe the
3039 * rt_waiter. Also see the WARN in wake_futex_pi().
3040 */
3041 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3042 spin_unlock(&hb->lock);
3043
3044 /* drops pi_state->pi_mutex.wait_lock */
3045 ret = wake_futex_pi(uaddr, uval, pi_state);
3046
3047 put_pi_state(pi_state);
3048
3049 /*
3050 * Success, we're done! No tricky corner cases.
3051 */
3052 if (!ret)
3053 goto out_putkey;
3054 /*
3055 * The atomic access to the futex value generated a
3056 * pagefault, so retry the user-access and the wakeup:
3057 */
3058 if (ret == -EFAULT)
3059 goto pi_faulted;
3060 /*
3061 * A unconditional UNLOCK_PI op raced against a waiter
3062 * setting the FUTEX_WAITERS bit. Try again.
3063 */
3064 if (ret == -EAGAIN)
3065 goto pi_retry;
3066 /*
3067 * wake_futex_pi has detected invalid state. Tell user
3068 * space.
3069 */
3070 goto out_putkey;
3071 }
3072
3073 /*
3074 * We have no kernel internal state, i.e. no waiters in the
3075 * kernel. Waiters which are about to queue themselves are stuck
3076 * on hb->lock. So we can safely ignore them. We do neither
3077 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3078 * owner.
3079 */
3080 if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3081 spin_unlock(&hb->lock);
3082 switch (ret) {
3083 case -EFAULT:
3084 goto pi_faulted;
3085
3086 case -EAGAIN:
3087 goto pi_retry;
3088
3089 default:
3090 WARN_ON_ONCE(1);
3091 goto out_putkey;
3092 }
3093 }
3094
3095 /*
3096 * If uval has changed, let user space handle it.
3097 */
3098 ret = (curval == uval) ? 0 : -EAGAIN;
3099
3100 out_unlock:
3101 spin_unlock(&hb->lock);
3102 out_putkey:
3103 put_futex_key(&key);
3104 return ret;
3105
3106 pi_retry:
3107 put_futex_key(&key);
3108 cond_resched();
3109 goto retry;
3110
3111 pi_faulted:
3112 put_futex_key(&key);
3113
3114 ret = fault_in_user_writeable(uaddr);
3115 if (!ret)
3116 goto retry;
3117
3118 return ret;
3119 }
3120
3121 /**
3122 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3123 * @hb: the hash_bucket futex_q was original enqueued on
3124 * @q: the futex_q woken while waiting to be requeued
3125 * @key2: the futex_key of the requeue target futex
3126 * @timeout: the timeout associated with the wait (NULL if none)
3127 *
3128 * Detect if the task was woken on the initial futex as opposed to the requeue
3129 * target futex. If so, determine if it was a timeout or a signal that caused
3130 * the wakeup and return the appropriate error code to the caller. Must be
3131 * called with the hb lock held.
3132 *
3133 * Return:
3134 * - 0 = no early wakeup detected;
3135 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3136 */
3137 static inline
3138 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3139 struct futex_q *q, union futex_key *key2,
3140 struct hrtimer_sleeper *timeout)
3141 {
3142 int ret = 0;
3143
3144 /*
3145 * With the hb lock held, we avoid races while we process the wakeup.
3146 * We only need to hold hb (and not hb2) to ensure atomicity as the
3147 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3148 * It can't be requeued from uaddr2 to something else since we don't
3149 * support a PI aware source futex for requeue.
3150 */
3151 if (!match_futex(&q->key, key2)) {
3152 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
3153 /*
3154 * We were woken prior to requeue by a timeout or a signal.
3155 * Unqueue the futex_q and determine which it was.
3156 */
3157 plist_del(&q->list, &hb->chain);
3158 hb_waiters_dec(hb);
3159
3160 /* Handle spurious wakeups gracefully */
3161 ret = -EWOULDBLOCK;
3162 if (timeout && !timeout->task)
3163 ret = -ETIMEDOUT;
3164 else if (signal_pending(current))
3165 ret = -ERESTARTNOINTR;
3166 }
3167 return ret;
3168 }
3169
3170 /**
3171 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3172 * @uaddr: the futex we initially wait on (non-pi)
3173 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3174 * the same type, no requeueing from private to shared, etc.
3175 * @val: the expected value of uaddr
3176 * @abs_time: absolute timeout
3177 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
3178 * @uaddr2: the pi futex we will take prior to returning to user-space
3179 *
3180 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3181 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3182 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3183 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3184 * without one, the pi logic would not know which task to boost/deboost, if
3185 * there was a need to.
3186 *
3187 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3188 * via the following--
3189 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3190 * 2) wakeup on uaddr2 after a requeue
3191 * 3) signal
3192 * 4) timeout
3193 *
3194 * If 3, cleanup and return -ERESTARTNOINTR.
3195 *
3196 * If 2, we may then block on trying to take the rt_mutex and return via:
3197 * 5) successful lock
3198 * 6) signal
3199 * 7) timeout
3200 * 8) other lock acquisition failure
3201 *
3202 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3203 *
3204 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3205 *
3206 * Return:
3207 * - 0 - On success;
3208 * - <0 - On error
3209 */
3210 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3211 u32 val, ktime_t *abs_time, u32 bitset,
3212 u32 __user *uaddr2)
3213 {
3214 struct hrtimer_sleeper timeout, *to;
3215 struct futex_pi_state *pi_state = NULL;
3216 struct rt_mutex_waiter rt_waiter;
3217 struct futex_hash_bucket *hb;
3218 union futex_key key2 = FUTEX_KEY_INIT;
3219 struct futex_q q = futex_q_init;
3220 int res, ret;
3221
3222 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3223 return -ENOSYS;
3224
3225 if (uaddr == uaddr2)
3226 return -EINVAL;
3227
3228 if (!bitset)
3229 return -EINVAL;
3230
3231 to = futex_setup_timer(abs_time, &timeout, flags,
3232 current->timer_slack_ns);
3233
3234 /*
3235 * The waiter is allocated on our stack, manipulated by the requeue
3236 * code while we sleep on uaddr.
3237 */
3238 rt_mutex_init_waiter(&rt_waiter);
3239
3240 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3241 if (unlikely(ret != 0))
3242 goto out;
3243
3244 q.bitset = bitset;
3245 q.rt_waiter = &rt_waiter;
3246 q.requeue_pi_key = &key2;
3247
3248 /*
3249 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3250 * count.
3251 */
3252 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
3253 if (ret)
3254 goto out_key2;
3255
3256 /*
3257 * The check above which compares uaddrs is not sufficient for
3258 * shared futexes. We need to compare the keys:
3259 */
3260 if (match_futex(&q.key, &key2)) {
3261 queue_unlock(hb);
3262 ret = -EINVAL;
3263 goto out_put_keys;
3264 }
3265
3266 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
3267 futex_wait_queue_me(hb, &q, to);
3268
3269 spin_lock(&hb->lock);
3270 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3271 spin_unlock(&hb->lock);
3272 if (ret)
3273 goto out_put_keys;
3274
3275 /*
3276 * In order for us to be here, we know our q.key == key2, and since
3277 * we took the hb->lock above, we also know that futex_requeue() has
3278 * completed and we no longer have to concern ourselves with a wakeup
3279 * race with the atomic proxy lock acquisition by the requeue code. The
3280 * futex_requeue dropped our key1 reference and incremented our key2
3281 * reference count.
3282 */
3283
3284 /* Check if the requeue code acquired the second futex for us. */
3285 if (!q.rt_waiter) {
3286 /*
3287 * Got the lock. We might not be the anticipated owner if we
3288 * did a lock-steal - fix up the PI-state in that case.
3289 */
3290 if (q.pi_state && (q.pi_state->owner != current)) {
3291 spin_lock(q.lock_ptr);
3292 ret = fixup_pi_state_owner(uaddr2, &q, current);
3293 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3294 pi_state = q.pi_state;
3295 get_pi_state(pi_state);
3296 }
3297 /*
3298 * Drop the reference to the pi state which
3299 * the requeue_pi() code acquired for us.
3300 */
3301 put_pi_state(q.pi_state);
3302 spin_unlock(q.lock_ptr);
3303 }
3304 } else {
3305 struct rt_mutex *pi_mutex;
3306
3307 /*
3308 * We have been woken up by futex_unlock_pi(), a timeout, or a
3309 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3310 * the pi_state.
3311 */
3312 WARN_ON(!q.pi_state);
3313 pi_mutex = &q.pi_state->pi_mutex;
3314 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3315
3316 spin_lock(q.lock_ptr);
3317 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3318 ret = 0;
3319
3320 debug_rt_mutex_free_waiter(&rt_waiter);
3321 /*
3322 * Fixup the pi_state owner and possibly acquire the lock if we
3323 * haven't already.
3324 */
3325 res = fixup_owner(uaddr2, &q, !ret);
3326 /*
3327 * If fixup_owner() returned an error, proprogate that. If it
3328 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3329 */
3330 if (res)
3331 ret = (res < 0) ? res : 0;
3332
3333 /*
3334 * If fixup_pi_state_owner() faulted and was unable to handle
3335 * the fault, unlock the rt_mutex and return the fault to
3336 * userspace.
3337 */
3338 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3339 pi_state = q.pi_state;
3340 get_pi_state(pi_state);
3341 }
3342
3343 /* Unqueue and drop the lock. */
3344 unqueue_me_pi(&q);
3345 }
3346
3347 if (pi_state) {
3348 rt_mutex_futex_unlock(&pi_state->pi_mutex);
3349 put_pi_state(pi_state);
3350 }
3351
3352 if (ret == -EINTR) {
3353 /*
3354 * We've already been requeued, but cannot restart by calling
3355 * futex_lock_pi() directly. We could restart this syscall, but
3356 * it would detect that the user space "val" changed and return
3357 * -EWOULDBLOCK. Save the overhead of the restart and return
3358 * -EWOULDBLOCK directly.
3359 */
3360 ret = -EWOULDBLOCK;
3361 }
3362
3363 out_put_keys:
3364 put_futex_key(&q.key);
3365 out_key2:
3366 put_futex_key(&key2);
3367
3368 out:
3369 if (to) {
3370 hrtimer_cancel(&to->timer);
3371 destroy_hrtimer_on_stack(&to->timer);
3372 }
3373 return ret;
3374 }
3375
3376 /*
3377 * Support for robust futexes: the kernel cleans up held futexes at
3378 * thread exit time.
3379 *
3380 * Implementation: user-space maintains a per-thread list of locks it
3381 * is holding. Upon do_exit(), the kernel carefully walks this list,
3382 * and marks all locks that are owned by this thread with the
3383 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3384 * always manipulated with the lock held, so the list is private and
3385 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3386 * field, to allow the kernel to clean up if the thread dies after
3387 * acquiring the lock, but just before it could have added itself to
3388 * the list. There can only be one such pending lock.
3389 */
3390
3391 /**
3392 * sys_set_robust_list() - Set the robust-futex list head of a task
3393 * @head: pointer to the list-head
3394 * @len: length of the list-head, as userspace expects
3395 */
3396 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3397 size_t, len)
3398 {
3399 if (!futex_cmpxchg_enabled)
3400 return -ENOSYS;
3401 /*
3402 * The kernel knows only one size for now:
3403 */
3404 if (unlikely(len != sizeof(*head)))
3405 return -EINVAL;
3406
3407 current->robust_list = head;
3408
3409 return 0;
3410 }
3411
3412 /**
3413 * sys_get_robust_list() - Get the robust-futex list head of a task
3414 * @pid: pid of the process [zero for current task]
3415 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3416 * @len_ptr: pointer to a length field, the kernel fills in the header size
3417 */
3418 SYSCALL_DEFINE3(get_robust_list, int, pid,
3419 struct robust_list_head __user * __user *, head_ptr,
3420 size_t __user *, len_ptr)
3421 {
3422 struct robust_list_head __user *head;
3423 unsigned long ret;
3424 struct task_struct *p;
3425
3426 if (!futex_cmpxchg_enabled)
3427 return -ENOSYS;
3428
3429 rcu_read_lock();
3430
3431 ret = -ESRCH;
3432 if (!pid)
3433 p = current;
3434 else {
3435 p = find_task_by_vpid(pid);
3436 if (!p)
3437 goto err_unlock;
3438 }
3439
3440 ret = -EPERM;
3441 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3442 goto err_unlock;
3443
3444 head = p->robust_list;
3445 rcu_read_unlock();
3446
3447 if (put_user(sizeof(*head), len_ptr))
3448 return -EFAULT;
3449 return put_user(head, head_ptr);
3450
3451 err_unlock:
3452 rcu_read_unlock();
3453
3454 return ret;
3455 }
3456
3457 /*
3458 * Process a futex-list entry, check whether it's owned by the
3459 * dying task, and do notification if so:
3460 */
3461 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
3462 {
3463 u32 uval, uninitialized_var(nval), mval;
3464 int err;
3465
3466 /* Futex address must be 32bit aligned */
3467 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
3468 return -1;
3469
3470 retry:
3471 if (get_user(uval, uaddr))
3472 return -1;
3473
3474 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
3475 return 0;
3476
3477 /*
3478 * Ok, this dying thread is truly holding a futex
3479 * of interest. Set the OWNER_DIED bit atomically
3480 * via cmpxchg, and if the value had FUTEX_WAITERS
3481 * set, wake up a waiter (if any). (We have to do a
3482 * futex_wake() even if OWNER_DIED is already set -
3483 * to handle the rare but possible case of recursive
3484 * thread-death.) The rest of the cleanup is done in
3485 * userspace.
3486 */
3487 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3488
3489 /*
3490 * We are not holding a lock here, but we want to have
3491 * the pagefault_disable/enable() protection because
3492 * we want to handle the fault gracefully. If the
3493 * access fails we try to fault in the futex with R/W
3494 * verification via get_user_pages. get_user() above
3495 * does not guarantee R/W access. If that fails we
3496 * give up and leave the futex locked.
3497 */
3498 if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
3499 switch (err) {
3500 case -EFAULT:
3501 if (fault_in_user_writeable(uaddr))
3502 return -1;
3503 goto retry;
3504
3505 case -EAGAIN:
3506 cond_resched();
3507 goto retry;
3508
3509 default:
3510 WARN_ON_ONCE(1);
3511 return err;
3512 }
3513 }
3514
3515 if (nval != uval)
3516 goto retry;
3517
3518 /*
3519 * Wake robust non-PI futexes here. The wakeup of
3520 * PI futexes happens in exit_pi_state():
3521 */
3522 if (!pi && (uval & FUTEX_WAITERS))
3523 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3524
3525 return 0;
3526 }
3527
3528 /*
3529 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3530 */
3531 static inline int fetch_robust_entry(struct robust_list __user **entry,
3532 struct robust_list __user * __user *head,
3533 unsigned int *pi)
3534 {
3535 unsigned long uentry;
3536
3537 if (get_user(uentry, (unsigned long __user *)head))
3538 return -EFAULT;
3539
3540 *entry = (void __user *)(uentry & ~1UL);
3541 *pi = uentry & 1;
3542
3543 return 0;
3544 }
3545
3546 /*
3547 * Walk curr->robust_list (very carefully, it's a userspace list!)
3548 * and mark any locks found there dead, and notify any waiters.
3549 *
3550 * We silently return on any sign of list-walking problem.
3551 */
3552 void exit_robust_list(struct task_struct *curr)
3553 {
3554 struct robust_list_head __user *head = curr->robust_list;
3555 struct robust_list __user *entry, *next_entry, *pending;
3556 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3557 unsigned int uninitialized_var(next_pi);
3558 unsigned long futex_offset;
3559 int rc;
3560
3561 if (!futex_cmpxchg_enabled)
3562 return;
3563
3564 /*
3565 * Fetch the list head (which was registered earlier, via
3566 * sys_set_robust_list()):
3567 */
3568 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3569 return;
3570 /*
3571 * Fetch the relative futex offset:
3572 */
3573 if (get_user(futex_offset, &head->futex_offset))
3574 return;
3575 /*
3576 * Fetch any possibly pending lock-add first, and handle it
3577 * if it exists:
3578 */
3579 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3580 return;
3581
3582 next_entry = NULL; /* avoid warning with gcc */
3583 while (entry != &head->list) {
3584 /*
3585 * Fetch the next entry in the list before calling
3586 * handle_futex_death:
3587 */
3588 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3589 /*
3590 * A pending lock might already be on the list, so
3591 * don't process it twice:
3592 */
3593 if (entry != pending)
3594 if (handle_futex_death((void __user *)entry + futex_offset,
3595 curr, pi))
3596 return;
3597 if (rc)
3598 return;
3599 entry = next_entry;
3600 pi = next_pi;
3601 /*
3602 * Avoid excessively long or circular lists:
3603 */
3604 if (!--limit)
3605 break;
3606
3607 cond_resched();
3608 }
3609
3610 if (pending)
3611 handle_futex_death((void __user *)pending + futex_offset,
3612 curr, pip);
3613 }
3614
3615 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3616 u32 __user *uaddr2, u32 val2, u32 val3)
3617 {
3618 int cmd = op & FUTEX_CMD_MASK;
3619 unsigned int flags = 0;
3620
3621 if (!(op & FUTEX_PRIVATE_FLAG))
3622 flags |= FLAGS_SHARED;
3623
3624 if (op & FUTEX_CLOCK_REALTIME) {
3625 flags |= FLAGS_CLOCKRT;
3626 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3627 cmd != FUTEX_WAIT_REQUEUE_PI)
3628 return -ENOSYS;
3629 }
3630
3631 switch (cmd) {
3632 case FUTEX_LOCK_PI:
3633 case FUTEX_UNLOCK_PI:
3634 case FUTEX_TRYLOCK_PI:
3635 case FUTEX_WAIT_REQUEUE_PI:
3636 case FUTEX_CMP_REQUEUE_PI:
3637 if (!futex_cmpxchg_enabled)
3638 return -ENOSYS;
3639 }
3640
3641 switch (cmd) {
3642 case FUTEX_WAIT:
3643 val3 = FUTEX_BITSET_MATCH_ANY;
3644 /* fall through */
3645 case FUTEX_WAIT_BITSET:
3646 return futex_wait(uaddr, flags, val, timeout, val3);
3647 case FUTEX_WAKE:
3648 val3 = FUTEX_BITSET_MATCH_ANY;
3649 /* fall through */
3650 case FUTEX_WAKE_BITSET:
3651 return futex_wake(uaddr, flags, val, val3);
3652 case FUTEX_REQUEUE:
3653 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3654 case FUTEX_CMP_REQUEUE:
3655 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3656 case FUTEX_WAKE_OP:
3657 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3658 case FUTEX_LOCK_PI:
3659 return futex_lock_pi(uaddr, flags, timeout, 0);
3660 case FUTEX_UNLOCK_PI:
3661 return futex_unlock_pi(uaddr, flags);
3662 case FUTEX_TRYLOCK_PI:
3663 return futex_lock_pi(uaddr, flags, NULL, 1);
3664 case FUTEX_WAIT_REQUEUE_PI:
3665 val3 = FUTEX_BITSET_MATCH_ANY;
3666 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3667 uaddr2);
3668 case FUTEX_CMP_REQUEUE_PI:
3669 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3670 }
3671 return -ENOSYS;
3672 }
3673
3674
3675 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3676 struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3677 u32, val3)
3678 {
3679 struct timespec64 ts;
3680 ktime_t t, *tp = NULL;
3681 u32 val2 = 0;
3682 int cmd = op & FUTEX_CMD_MASK;
3683
3684 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3685 cmd == FUTEX_WAIT_BITSET ||
3686 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3687 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3688 return -EFAULT;
3689 if (get_timespec64(&ts, utime))
3690 return -EFAULT;
3691 if (!timespec64_valid(&ts))
3692 return -EINVAL;
3693
3694 t = timespec64_to_ktime(ts);
3695 if (cmd == FUTEX_WAIT)
3696 t = ktime_add_safe(ktime_get(), t);
3697 tp = &t;
3698 }
3699 /*
3700 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3701 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3702 */
3703 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3704 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3705 val2 = (u32) (unsigned long) utime;
3706
3707 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3708 }
3709
3710 #ifdef CONFIG_COMPAT
3711 /*
3712 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3713 */
3714 static inline int
3715 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
3716 compat_uptr_t __user *head, unsigned int *pi)
3717 {
3718 if (get_user(*uentry, head))
3719 return -EFAULT;
3720
3721 *entry = compat_ptr((*uentry) & ~1);
3722 *pi = (unsigned int)(*uentry) & 1;
3723
3724 return 0;
3725 }
3726
3727 static void __user *futex_uaddr(struct robust_list __user *entry,
3728 compat_long_t futex_offset)
3729 {
3730 compat_uptr_t base = ptr_to_compat(entry);
3731 void __user *uaddr = compat_ptr(base + futex_offset);
3732
3733 return uaddr;
3734 }
3735
3736 /*
3737 * Walk curr->robust_list (very carefully, it's a userspace list!)
3738 * and mark any locks found there dead, and notify any waiters.
3739 *
3740 * We silently return on any sign of list-walking problem.
3741 */
3742 void compat_exit_robust_list(struct task_struct *curr)
3743 {
3744 struct compat_robust_list_head __user *head = curr->compat_robust_list;
3745 struct robust_list __user *entry, *next_entry, *pending;
3746 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3747 unsigned int uninitialized_var(next_pi);
3748 compat_uptr_t uentry, next_uentry, upending;
3749 compat_long_t futex_offset;
3750 int rc;
3751
3752 if (!futex_cmpxchg_enabled)
3753 return;
3754
3755 /*
3756 * Fetch the list head (which was registered earlier, via
3757 * sys_set_robust_list()):
3758 */
3759 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
3760 return;
3761 /*
3762 * Fetch the relative futex offset:
3763 */
3764 if (get_user(futex_offset, &head->futex_offset))
3765 return;
3766 /*
3767 * Fetch any possibly pending lock-add first, and handle it
3768 * if it exists:
3769 */
3770 if (compat_fetch_robust_entry(&upending, &pending,
3771 &head->list_op_pending, &pip))
3772 return;
3773
3774 next_entry = NULL; /* avoid warning with gcc */
3775 while (entry != (struct robust_list __user *) &head->list) {
3776 /*
3777 * Fetch the next entry in the list before calling
3778 * handle_futex_death:
3779 */
3780 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
3781 (compat_uptr_t __user *)&entry->next, &next_pi);
3782 /*
3783 * A pending lock might already be on the list, so
3784 * dont process it twice:
3785 */
3786 if (entry != pending) {
3787 void __user *uaddr = futex_uaddr(entry, futex_offset);
3788
3789 if (handle_futex_death(uaddr, curr, pi))
3790 return;
3791 }
3792 if (rc)
3793 return;
3794 uentry = next_uentry;
3795 entry = next_entry;
3796 pi = next_pi;
3797 /*
3798 * Avoid excessively long or circular lists:
3799 */
3800 if (!--limit)
3801 break;
3802
3803 cond_resched();
3804 }
3805 if (pending) {
3806 void __user *uaddr = futex_uaddr(pending, futex_offset);
3807
3808 handle_futex_death(uaddr, curr, pip);
3809 }
3810 }
3811
3812 COMPAT_SYSCALL_DEFINE2(set_robust_list,
3813 struct compat_robust_list_head __user *, head,
3814 compat_size_t, len)
3815 {
3816 if (!futex_cmpxchg_enabled)
3817 return -ENOSYS;
3818
3819 if (unlikely(len != sizeof(*head)))
3820 return -EINVAL;
3821
3822 current->compat_robust_list = head;
3823
3824 return 0;
3825 }
3826
3827 COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
3828 compat_uptr_t __user *, head_ptr,
3829 compat_size_t __user *, len_ptr)
3830 {
3831 struct compat_robust_list_head __user *head;
3832 unsigned long ret;
3833 struct task_struct *p;
3834
3835 if (!futex_cmpxchg_enabled)
3836 return -ENOSYS;
3837
3838 rcu_read_lock();
3839
3840 ret = -ESRCH;
3841 if (!pid)
3842 p = current;
3843 else {
3844 p = find_task_by_vpid(pid);
3845 if (!p)
3846 goto err_unlock;
3847 }
3848
3849 ret = -EPERM;
3850 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3851 goto err_unlock;
3852
3853 head = p->compat_robust_list;
3854 rcu_read_unlock();
3855
3856 if (put_user(sizeof(*head), len_ptr))
3857 return -EFAULT;
3858 return put_user(ptr_to_compat(head), head_ptr);
3859
3860 err_unlock:
3861 rcu_read_unlock();
3862
3863 return ret;
3864 }
3865 #endif /* CONFIG_COMPAT */
3866
3867 #ifdef CONFIG_COMPAT_32BIT_TIME
3868 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
3869 struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
3870 u32, val3)
3871 {
3872 struct timespec64 ts;
3873 ktime_t t, *tp = NULL;
3874 int val2 = 0;
3875 int cmd = op & FUTEX_CMD_MASK;
3876
3877 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3878 cmd == FUTEX_WAIT_BITSET ||
3879 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3880 if (get_old_timespec32(&ts, utime))
3881 return -EFAULT;
3882 if (!timespec64_valid(&ts))
3883 return -EINVAL;
3884
3885 t = timespec64_to_ktime(ts);
3886 if (cmd == FUTEX_WAIT)
3887 t = ktime_add_safe(ktime_get(), t);
3888 tp = &t;
3889 }
3890 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3891 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3892 val2 = (int) (unsigned long) utime;
3893
3894 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3895 }
3896 #endif /* CONFIG_COMPAT_32BIT_TIME */
3897
3898 static void __init futex_detect_cmpxchg(void)
3899 {
3900 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3901 u32 curval;
3902
3903 /*
3904 * This will fail and we want it. Some arch implementations do
3905 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3906 * functionality. We want to know that before we call in any
3907 * of the complex code paths. Also we want to prevent
3908 * registration of robust lists in that case. NULL is
3909 * guaranteed to fault and we get -EFAULT on functional
3910 * implementation, the non-functional ones will return
3911 * -ENOSYS.
3912 */
3913 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3914 futex_cmpxchg_enabled = 1;
3915 #endif
3916 }
3917
3918 static int __init futex_init(void)
3919 {
3920 unsigned int futex_shift;
3921 unsigned long i;
3922
3923 #if CONFIG_BASE_SMALL
3924 futex_hashsize = 16;
3925 #else
3926 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3927 #endif
3928
3929 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3930 futex_hashsize, 0,
3931 futex_hashsize < 256 ? HASH_SMALL : 0,
3932 &futex_shift, NULL,
3933 futex_hashsize, futex_hashsize);
3934 futex_hashsize = 1UL << futex_shift;
3935
3936 futex_detect_cmpxchg();
3937
3938 for (i = 0; i < futex_hashsize; i++) {
3939 atomic_set(&futex_queues[i].waiters, 0);
3940 plist_head_init(&futex_queues[i].chain);
3941 spin_lock_init(&futex_queues[i].lock);
3942 }
3943
3944 return 0;
3945 }
3946 core_initcall(futex_init);