]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/futex.c
PM/Hibernate: Move memory shrinking to snapshot.c (rev. 2)
[mirror_ubuntu-bionic-kernel.git] / kernel / futex.c
1 /*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/module.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62
63 #include <asm/futex.h>
64
65 #include "rtmutex_common.h"
66
67 int __read_mostly futex_cmpxchg_enabled;
68
69 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
71 /*
72 * Priority Inheritance state:
73 */
74 struct futex_pi_state {
75 /*
76 * list of 'owned' pi_state instances - these have to be
77 * cleaned up in do_exit() if the task exits prematurely:
78 */
79 struct list_head list;
80
81 /*
82 * The PI object:
83 */
84 struct rt_mutex pi_mutex;
85
86 struct task_struct *owner;
87 atomic_t refcount;
88
89 union futex_key key;
90 };
91
92 /*
93 * We use this hashed waitqueue instead of a normal wait_queue_t, so
94 * we can wake only the relevant ones (hashed queues may be shared).
95 *
96 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
97 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
98 * The order of wakup is always to make the first condition true, then
99 * wake up q->waiter, then make the second condition true.
100 */
101 struct futex_q {
102 struct plist_node list;
103 /* Waiter reference */
104 struct task_struct *task;
105
106 /* Which hash list lock to use: */
107 spinlock_t *lock_ptr;
108
109 /* Key which the futex is hashed on: */
110 union futex_key key;
111
112 /* Optional priority inheritance state: */
113 struct futex_pi_state *pi_state;
114
115 /* rt_waiter storage for requeue_pi: */
116 struct rt_mutex_waiter *rt_waiter;
117
118 /* Bitset for the optional bitmasked wakeup */
119 u32 bitset;
120 };
121
122 /*
123 * Hash buckets are shared by all the futex_keys that hash to the same
124 * location. Each key may have multiple futex_q structures, one for each task
125 * waiting on a futex.
126 */
127 struct futex_hash_bucket {
128 spinlock_t lock;
129 struct plist_head chain;
130 };
131
132 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
133
134 /*
135 * We hash on the keys returned from get_futex_key (see below).
136 */
137 static struct futex_hash_bucket *hash_futex(union futex_key *key)
138 {
139 u32 hash = jhash2((u32*)&key->both.word,
140 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
141 key->both.offset);
142 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
143 }
144
145 /*
146 * Return 1 if two futex_keys are equal, 0 otherwise.
147 */
148 static inline int match_futex(union futex_key *key1, union futex_key *key2)
149 {
150 return (key1->both.word == key2->both.word
151 && key1->both.ptr == key2->both.ptr
152 && key1->both.offset == key2->both.offset);
153 }
154
155 /*
156 * Take a reference to the resource addressed by a key.
157 * Can be called while holding spinlocks.
158 *
159 */
160 static void get_futex_key_refs(union futex_key *key)
161 {
162 if (!key->both.ptr)
163 return;
164
165 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
166 case FUT_OFF_INODE:
167 atomic_inc(&key->shared.inode->i_count);
168 break;
169 case FUT_OFF_MMSHARED:
170 atomic_inc(&key->private.mm->mm_count);
171 break;
172 }
173 }
174
175 /*
176 * Drop a reference to the resource addressed by a key.
177 * The hash bucket spinlock must not be held.
178 */
179 static void drop_futex_key_refs(union futex_key *key)
180 {
181 if (!key->both.ptr) {
182 /* If we're here then we tried to put a key we failed to get */
183 WARN_ON_ONCE(1);
184 return;
185 }
186
187 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
188 case FUT_OFF_INODE:
189 iput(key->shared.inode);
190 break;
191 case FUT_OFF_MMSHARED:
192 mmdrop(key->private.mm);
193 break;
194 }
195 }
196
197 /**
198 * get_futex_key - Get parameters which are the keys for a futex.
199 * @uaddr: virtual address of the futex
200 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
201 * @key: address where result is stored.
202 * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
203 *
204 * Returns a negative error code or 0
205 * The key words are stored in *key on success.
206 *
207 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
208 * offset_within_page). For private mappings, it's (uaddr, current->mm).
209 * We can usually work out the index without swapping in the page.
210 *
211 * lock_page() might sleep, the caller should not hold a spinlock.
212 */
213 static int
214 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
215 {
216 unsigned long address = (unsigned long)uaddr;
217 struct mm_struct *mm = current->mm;
218 struct page *page;
219 int err;
220
221 /*
222 * The futex address must be "naturally" aligned.
223 */
224 key->both.offset = address % PAGE_SIZE;
225 if (unlikely((address % sizeof(u32)) != 0))
226 return -EINVAL;
227 address -= key->both.offset;
228
229 /*
230 * PROCESS_PRIVATE futexes are fast.
231 * As the mm cannot disappear under us and the 'key' only needs
232 * virtual address, we dont even have to find the underlying vma.
233 * Note : We do have to check 'uaddr' is a valid user address,
234 * but access_ok() should be faster than find_vma()
235 */
236 if (!fshared) {
237 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
238 return -EFAULT;
239 key->private.mm = mm;
240 key->private.address = address;
241 get_futex_key_refs(key);
242 return 0;
243 }
244
245 again:
246 err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
247 if (err < 0)
248 return err;
249
250 lock_page(page);
251 if (!page->mapping) {
252 unlock_page(page);
253 put_page(page);
254 goto again;
255 }
256
257 /*
258 * Private mappings are handled in a simple way.
259 *
260 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
261 * it's a read-only handle, it's expected that futexes attach to
262 * the object not the particular process.
263 */
264 if (PageAnon(page)) {
265 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
266 key->private.mm = mm;
267 key->private.address = address;
268 } else {
269 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
270 key->shared.inode = page->mapping->host;
271 key->shared.pgoff = page->index;
272 }
273
274 get_futex_key_refs(key);
275
276 unlock_page(page);
277 put_page(page);
278 return 0;
279 }
280
281 static inline
282 void put_futex_key(int fshared, union futex_key *key)
283 {
284 drop_futex_key_refs(key);
285 }
286
287 /**
288 * futex_top_waiter() - Return the highest priority waiter on a futex
289 * @hb: the hash bucket the futex_q's reside in
290 * @key: the futex key (to distinguish it from other futex futex_q's)
291 *
292 * Must be called with the hb lock held.
293 */
294 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
295 union futex_key *key)
296 {
297 struct futex_q *this;
298
299 plist_for_each_entry(this, &hb->chain, list) {
300 if (match_futex(&this->key, key))
301 return this;
302 }
303 return NULL;
304 }
305
306 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
307 {
308 u32 curval;
309
310 pagefault_disable();
311 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
312 pagefault_enable();
313
314 return curval;
315 }
316
317 static int get_futex_value_locked(u32 *dest, u32 __user *from)
318 {
319 int ret;
320
321 pagefault_disable();
322 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
323 pagefault_enable();
324
325 return ret ? -EFAULT : 0;
326 }
327
328
329 /*
330 * PI code:
331 */
332 static int refill_pi_state_cache(void)
333 {
334 struct futex_pi_state *pi_state;
335
336 if (likely(current->pi_state_cache))
337 return 0;
338
339 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
340
341 if (!pi_state)
342 return -ENOMEM;
343
344 INIT_LIST_HEAD(&pi_state->list);
345 /* pi_mutex gets initialized later */
346 pi_state->owner = NULL;
347 atomic_set(&pi_state->refcount, 1);
348 pi_state->key = FUTEX_KEY_INIT;
349
350 current->pi_state_cache = pi_state;
351
352 return 0;
353 }
354
355 static struct futex_pi_state * alloc_pi_state(void)
356 {
357 struct futex_pi_state *pi_state = current->pi_state_cache;
358
359 WARN_ON(!pi_state);
360 current->pi_state_cache = NULL;
361
362 return pi_state;
363 }
364
365 static void free_pi_state(struct futex_pi_state *pi_state)
366 {
367 if (!atomic_dec_and_test(&pi_state->refcount))
368 return;
369
370 /*
371 * If pi_state->owner is NULL, the owner is most probably dying
372 * and has cleaned up the pi_state already
373 */
374 if (pi_state->owner) {
375 spin_lock_irq(&pi_state->owner->pi_lock);
376 list_del_init(&pi_state->list);
377 spin_unlock_irq(&pi_state->owner->pi_lock);
378
379 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
380 }
381
382 if (current->pi_state_cache)
383 kfree(pi_state);
384 else {
385 /*
386 * pi_state->list is already empty.
387 * clear pi_state->owner.
388 * refcount is at 0 - put it back to 1.
389 */
390 pi_state->owner = NULL;
391 atomic_set(&pi_state->refcount, 1);
392 current->pi_state_cache = pi_state;
393 }
394 }
395
396 /*
397 * Look up the task based on what TID userspace gave us.
398 * We dont trust it.
399 */
400 static struct task_struct * futex_find_get_task(pid_t pid)
401 {
402 struct task_struct *p;
403 const struct cred *cred = current_cred(), *pcred;
404
405 rcu_read_lock();
406 p = find_task_by_vpid(pid);
407 if (!p) {
408 p = ERR_PTR(-ESRCH);
409 } else {
410 pcred = __task_cred(p);
411 if (cred->euid != pcred->euid &&
412 cred->euid != pcred->uid)
413 p = ERR_PTR(-ESRCH);
414 else
415 get_task_struct(p);
416 }
417
418 rcu_read_unlock();
419
420 return p;
421 }
422
423 /*
424 * This task is holding PI mutexes at exit time => bad.
425 * Kernel cleans up PI-state, but userspace is likely hosed.
426 * (Robust-futex cleanup is separate and might save the day for userspace.)
427 */
428 void exit_pi_state_list(struct task_struct *curr)
429 {
430 struct list_head *next, *head = &curr->pi_state_list;
431 struct futex_pi_state *pi_state;
432 struct futex_hash_bucket *hb;
433 union futex_key key = FUTEX_KEY_INIT;
434
435 if (!futex_cmpxchg_enabled)
436 return;
437 /*
438 * We are a ZOMBIE and nobody can enqueue itself on
439 * pi_state_list anymore, but we have to be careful
440 * versus waiters unqueueing themselves:
441 */
442 spin_lock_irq(&curr->pi_lock);
443 while (!list_empty(head)) {
444
445 next = head->next;
446 pi_state = list_entry(next, struct futex_pi_state, list);
447 key = pi_state->key;
448 hb = hash_futex(&key);
449 spin_unlock_irq(&curr->pi_lock);
450
451 spin_lock(&hb->lock);
452
453 spin_lock_irq(&curr->pi_lock);
454 /*
455 * We dropped the pi-lock, so re-check whether this
456 * task still owns the PI-state:
457 */
458 if (head->next != next) {
459 spin_unlock(&hb->lock);
460 continue;
461 }
462
463 WARN_ON(pi_state->owner != curr);
464 WARN_ON(list_empty(&pi_state->list));
465 list_del_init(&pi_state->list);
466 pi_state->owner = NULL;
467 spin_unlock_irq(&curr->pi_lock);
468
469 rt_mutex_unlock(&pi_state->pi_mutex);
470
471 spin_unlock(&hb->lock);
472
473 spin_lock_irq(&curr->pi_lock);
474 }
475 spin_unlock_irq(&curr->pi_lock);
476 }
477
478 static int
479 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
480 union futex_key *key, struct futex_pi_state **ps)
481 {
482 struct futex_pi_state *pi_state = NULL;
483 struct futex_q *this, *next;
484 struct plist_head *head;
485 struct task_struct *p;
486 pid_t pid = uval & FUTEX_TID_MASK;
487
488 head = &hb->chain;
489
490 plist_for_each_entry_safe(this, next, head, list) {
491 if (match_futex(&this->key, key)) {
492 /*
493 * Another waiter already exists - bump up
494 * the refcount and return its pi_state:
495 */
496 pi_state = this->pi_state;
497 /*
498 * Userspace might have messed up non PI and PI futexes
499 */
500 if (unlikely(!pi_state))
501 return -EINVAL;
502
503 WARN_ON(!atomic_read(&pi_state->refcount));
504 WARN_ON(pid && pi_state->owner &&
505 pi_state->owner->pid != pid);
506
507 atomic_inc(&pi_state->refcount);
508 *ps = pi_state;
509
510 return 0;
511 }
512 }
513
514 /*
515 * We are the first waiter - try to look up the real owner and attach
516 * the new pi_state to it, but bail out when TID = 0
517 */
518 if (!pid)
519 return -ESRCH;
520 p = futex_find_get_task(pid);
521 if (IS_ERR(p))
522 return PTR_ERR(p);
523
524 /*
525 * We need to look at the task state flags to figure out,
526 * whether the task is exiting. To protect against the do_exit
527 * change of the task flags, we do this protected by
528 * p->pi_lock:
529 */
530 spin_lock_irq(&p->pi_lock);
531 if (unlikely(p->flags & PF_EXITING)) {
532 /*
533 * The task is on the way out. When PF_EXITPIDONE is
534 * set, we know that the task has finished the
535 * cleanup:
536 */
537 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
538
539 spin_unlock_irq(&p->pi_lock);
540 put_task_struct(p);
541 return ret;
542 }
543
544 pi_state = alloc_pi_state();
545
546 /*
547 * Initialize the pi_mutex in locked state and make 'p'
548 * the owner of it:
549 */
550 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
551
552 /* Store the key for possible exit cleanups: */
553 pi_state->key = *key;
554
555 WARN_ON(!list_empty(&pi_state->list));
556 list_add(&pi_state->list, &p->pi_state_list);
557 pi_state->owner = p;
558 spin_unlock_irq(&p->pi_lock);
559
560 put_task_struct(p);
561
562 *ps = pi_state;
563
564 return 0;
565 }
566
567 /**
568 * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
569 * @uaddr: the pi futex user address
570 * @hb: the pi futex hash bucket
571 * @key: the futex key associated with uaddr and hb
572 * @ps: the pi_state pointer where we store the result of the
573 * lookup
574 * @task: the task to perform the atomic lock work for. This will
575 * be "current" except in the case of requeue pi.
576 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
577 *
578 * Returns:
579 * 0 - ready to wait
580 * 1 - acquired the lock
581 * <0 - error
582 *
583 * The hb->lock and futex_key refs shall be held by the caller.
584 */
585 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
586 union futex_key *key,
587 struct futex_pi_state **ps,
588 struct task_struct *task, int set_waiters)
589 {
590 int lock_taken, ret, ownerdied = 0;
591 u32 uval, newval, curval;
592
593 retry:
594 ret = lock_taken = 0;
595
596 /*
597 * To avoid races, we attempt to take the lock here again
598 * (by doing a 0 -> TID atomic cmpxchg), while holding all
599 * the locks. It will most likely not succeed.
600 */
601 newval = task_pid_vnr(task);
602 if (set_waiters)
603 newval |= FUTEX_WAITERS;
604
605 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
606
607 if (unlikely(curval == -EFAULT))
608 return -EFAULT;
609
610 /*
611 * Detect deadlocks.
612 */
613 if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
614 return -EDEADLK;
615
616 /*
617 * Surprise - we got the lock. Just return to userspace:
618 */
619 if (unlikely(!curval))
620 return 1;
621
622 uval = curval;
623
624 /*
625 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
626 * to wake at the next unlock.
627 */
628 newval = curval | FUTEX_WAITERS;
629
630 /*
631 * There are two cases, where a futex might have no owner (the
632 * owner TID is 0): OWNER_DIED. We take over the futex in this
633 * case. We also do an unconditional take over, when the owner
634 * of the futex died.
635 *
636 * This is safe as we are protected by the hash bucket lock !
637 */
638 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
639 /* Keep the OWNER_DIED bit */
640 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
641 ownerdied = 0;
642 lock_taken = 1;
643 }
644
645 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
646
647 if (unlikely(curval == -EFAULT))
648 return -EFAULT;
649 if (unlikely(curval != uval))
650 goto retry;
651
652 /*
653 * We took the lock due to owner died take over.
654 */
655 if (unlikely(lock_taken))
656 return 1;
657
658 /*
659 * We dont have the lock. Look up the PI state (or create it if
660 * we are the first waiter):
661 */
662 ret = lookup_pi_state(uval, hb, key, ps);
663
664 if (unlikely(ret)) {
665 switch (ret) {
666 case -ESRCH:
667 /*
668 * No owner found for this futex. Check if the
669 * OWNER_DIED bit is set to figure out whether
670 * this is a robust futex or not.
671 */
672 if (get_futex_value_locked(&curval, uaddr))
673 return -EFAULT;
674
675 /*
676 * We simply start over in case of a robust
677 * futex. The code above will take the futex
678 * and return happy.
679 */
680 if (curval & FUTEX_OWNER_DIED) {
681 ownerdied = 1;
682 goto retry;
683 }
684 default:
685 break;
686 }
687 }
688
689 return ret;
690 }
691
692 /*
693 * The hash bucket lock must be held when this is called.
694 * Afterwards, the futex_q must not be accessed.
695 */
696 static void wake_futex(struct futex_q *q)
697 {
698 struct task_struct *p = q->task;
699
700 /*
701 * We set q->lock_ptr = NULL _before_ we wake up the task. If
702 * a non futex wake up happens on another CPU then the task
703 * might exit and p would dereference a non existing task
704 * struct. Prevent this by holding a reference on p across the
705 * wake up.
706 */
707 get_task_struct(p);
708
709 plist_del(&q->list, &q->list.plist);
710 /*
711 * The waiting task can free the futex_q as soon as
712 * q->lock_ptr = NULL is written, without taking any locks. A
713 * memory barrier is required here to prevent the following
714 * store to lock_ptr from getting ahead of the plist_del.
715 */
716 smp_wmb();
717 q->lock_ptr = NULL;
718
719 wake_up_state(p, TASK_NORMAL);
720 put_task_struct(p);
721 }
722
723 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
724 {
725 struct task_struct *new_owner;
726 struct futex_pi_state *pi_state = this->pi_state;
727 u32 curval, newval;
728
729 if (!pi_state)
730 return -EINVAL;
731
732 spin_lock(&pi_state->pi_mutex.wait_lock);
733 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
734
735 /*
736 * This happens when we have stolen the lock and the original
737 * pending owner did not enqueue itself back on the rt_mutex.
738 * Thats not a tragedy. We know that way, that a lock waiter
739 * is on the fly. We make the futex_q waiter the pending owner.
740 */
741 if (!new_owner)
742 new_owner = this->task;
743
744 /*
745 * We pass it to the next owner. (The WAITERS bit is always
746 * kept enabled while there is PI state around. We must also
747 * preserve the owner died bit.)
748 */
749 if (!(uval & FUTEX_OWNER_DIED)) {
750 int ret = 0;
751
752 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
753
754 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
755
756 if (curval == -EFAULT)
757 ret = -EFAULT;
758 else if (curval != uval)
759 ret = -EINVAL;
760 if (ret) {
761 spin_unlock(&pi_state->pi_mutex.wait_lock);
762 return ret;
763 }
764 }
765
766 spin_lock_irq(&pi_state->owner->pi_lock);
767 WARN_ON(list_empty(&pi_state->list));
768 list_del_init(&pi_state->list);
769 spin_unlock_irq(&pi_state->owner->pi_lock);
770
771 spin_lock_irq(&new_owner->pi_lock);
772 WARN_ON(!list_empty(&pi_state->list));
773 list_add(&pi_state->list, &new_owner->pi_state_list);
774 pi_state->owner = new_owner;
775 spin_unlock_irq(&new_owner->pi_lock);
776
777 spin_unlock(&pi_state->pi_mutex.wait_lock);
778 rt_mutex_unlock(&pi_state->pi_mutex);
779
780 return 0;
781 }
782
783 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
784 {
785 u32 oldval;
786
787 /*
788 * There is no waiter, so we unlock the futex. The owner died
789 * bit has not to be preserved here. We are the owner:
790 */
791 oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
792
793 if (oldval == -EFAULT)
794 return oldval;
795 if (oldval != uval)
796 return -EAGAIN;
797
798 return 0;
799 }
800
801 /*
802 * Express the locking dependencies for lockdep:
803 */
804 static inline void
805 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
806 {
807 if (hb1 <= hb2) {
808 spin_lock(&hb1->lock);
809 if (hb1 < hb2)
810 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
811 } else { /* hb1 > hb2 */
812 spin_lock(&hb2->lock);
813 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
814 }
815 }
816
817 static inline void
818 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
819 {
820 spin_unlock(&hb1->lock);
821 if (hb1 != hb2)
822 spin_unlock(&hb2->lock);
823 }
824
825 /*
826 * Wake up waiters matching bitset queued on this futex (uaddr).
827 */
828 static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
829 {
830 struct futex_hash_bucket *hb;
831 struct futex_q *this, *next;
832 struct plist_head *head;
833 union futex_key key = FUTEX_KEY_INIT;
834 int ret;
835
836 if (!bitset)
837 return -EINVAL;
838
839 ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
840 if (unlikely(ret != 0))
841 goto out;
842
843 hb = hash_futex(&key);
844 spin_lock(&hb->lock);
845 head = &hb->chain;
846
847 plist_for_each_entry_safe(this, next, head, list) {
848 if (match_futex (&this->key, &key)) {
849 if (this->pi_state || this->rt_waiter) {
850 ret = -EINVAL;
851 break;
852 }
853
854 /* Check if one of the bits is set in both bitsets */
855 if (!(this->bitset & bitset))
856 continue;
857
858 wake_futex(this);
859 if (++ret >= nr_wake)
860 break;
861 }
862 }
863
864 spin_unlock(&hb->lock);
865 put_futex_key(fshared, &key);
866 out:
867 return ret;
868 }
869
870 /*
871 * Wake up all waiters hashed on the physical page that is mapped
872 * to this virtual address:
873 */
874 static int
875 futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
876 int nr_wake, int nr_wake2, int op)
877 {
878 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
879 struct futex_hash_bucket *hb1, *hb2;
880 struct plist_head *head;
881 struct futex_q *this, *next;
882 int ret, op_ret;
883
884 retry:
885 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
886 if (unlikely(ret != 0))
887 goto out;
888 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
889 if (unlikely(ret != 0))
890 goto out_put_key1;
891
892 hb1 = hash_futex(&key1);
893 hb2 = hash_futex(&key2);
894
895 double_lock_hb(hb1, hb2);
896 retry_private:
897 op_ret = futex_atomic_op_inuser(op, uaddr2);
898 if (unlikely(op_ret < 0)) {
899 u32 dummy;
900
901 double_unlock_hb(hb1, hb2);
902
903 #ifndef CONFIG_MMU
904 /*
905 * we don't get EFAULT from MMU faults if we don't have an MMU,
906 * but we might get them from range checking
907 */
908 ret = op_ret;
909 goto out_put_keys;
910 #endif
911
912 if (unlikely(op_ret != -EFAULT)) {
913 ret = op_ret;
914 goto out_put_keys;
915 }
916
917 ret = get_user(dummy, uaddr2);
918 if (ret)
919 goto out_put_keys;
920
921 if (!fshared)
922 goto retry_private;
923
924 put_futex_key(fshared, &key2);
925 put_futex_key(fshared, &key1);
926 goto retry;
927 }
928
929 head = &hb1->chain;
930
931 plist_for_each_entry_safe(this, next, head, list) {
932 if (match_futex (&this->key, &key1)) {
933 wake_futex(this);
934 if (++ret >= nr_wake)
935 break;
936 }
937 }
938
939 if (op_ret > 0) {
940 head = &hb2->chain;
941
942 op_ret = 0;
943 plist_for_each_entry_safe(this, next, head, list) {
944 if (match_futex (&this->key, &key2)) {
945 wake_futex(this);
946 if (++op_ret >= nr_wake2)
947 break;
948 }
949 }
950 ret += op_ret;
951 }
952
953 double_unlock_hb(hb1, hb2);
954 out_put_keys:
955 put_futex_key(fshared, &key2);
956 out_put_key1:
957 put_futex_key(fshared, &key1);
958 out:
959 return ret;
960 }
961
962 /**
963 * requeue_futex() - Requeue a futex_q from one hb to another
964 * @q: the futex_q to requeue
965 * @hb1: the source hash_bucket
966 * @hb2: the target hash_bucket
967 * @key2: the new key for the requeued futex_q
968 */
969 static inline
970 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
971 struct futex_hash_bucket *hb2, union futex_key *key2)
972 {
973
974 /*
975 * If key1 and key2 hash to the same bucket, no need to
976 * requeue.
977 */
978 if (likely(&hb1->chain != &hb2->chain)) {
979 plist_del(&q->list, &hb1->chain);
980 plist_add(&q->list, &hb2->chain);
981 q->lock_ptr = &hb2->lock;
982 #ifdef CONFIG_DEBUG_PI_LIST
983 q->list.plist.lock = &hb2->lock;
984 #endif
985 }
986 get_futex_key_refs(key2);
987 q->key = *key2;
988 }
989
990 /**
991 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
992 * q: the futex_q
993 * key: the key of the requeue target futex
994 *
995 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
996 * target futex if it is uncontended or via a lock steal. Set the futex_q key
997 * to the requeue target futex so the waiter can detect the wakeup on the right
998 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
999 * atomic lock acquisition. Must be called with the q->lock_ptr held.
1000 */
1001 static inline
1002 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
1003 {
1004 drop_futex_key_refs(&q->key);
1005 get_futex_key_refs(key);
1006 q->key = *key;
1007
1008 WARN_ON(plist_node_empty(&q->list));
1009 plist_del(&q->list, &q->list.plist);
1010
1011 WARN_ON(!q->rt_waiter);
1012 q->rt_waiter = NULL;
1013
1014 wake_up_state(q->task, TASK_NORMAL);
1015 }
1016
1017 /**
1018 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1019 * @pifutex: the user address of the to futex
1020 * @hb1: the from futex hash bucket, must be locked by the caller
1021 * @hb2: the to futex hash bucket, must be locked by the caller
1022 * @key1: the from futex key
1023 * @key2: the to futex key
1024 * @ps: address to store the pi_state pointer
1025 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1026 *
1027 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1028 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1029 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1030 * hb1 and hb2 must be held by the caller.
1031 *
1032 * Returns:
1033 * 0 - failed to acquire the lock atomicly
1034 * 1 - acquired the lock
1035 * <0 - error
1036 */
1037 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1038 struct futex_hash_bucket *hb1,
1039 struct futex_hash_bucket *hb2,
1040 union futex_key *key1, union futex_key *key2,
1041 struct futex_pi_state **ps, int set_waiters)
1042 {
1043 struct futex_q *top_waiter = NULL;
1044 u32 curval;
1045 int ret;
1046
1047 if (get_futex_value_locked(&curval, pifutex))
1048 return -EFAULT;
1049
1050 /*
1051 * Find the top_waiter and determine if there are additional waiters.
1052 * If the caller intends to requeue more than 1 waiter to pifutex,
1053 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1054 * as we have means to handle the possible fault. If not, don't set
1055 * the bit unecessarily as it will force the subsequent unlock to enter
1056 * the kernel.
1057 */
1058 top_waiter = futex_top_waiter(hb1, key1);
1059
1060 /* There are no waiters, nothing for us to do. */
1061 if (!top_waiter)
1062 return 0;
1063
1064 /*
1065 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1066 * the contended case or if set_waiters is 1. The pi_state is returned
1067 * in ps in contended cases.
1068 */
1069 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1070 set_waiters);
1071 if (ret == 1)
1072 requeue_pi_wake_futex(top_waiter, key2);
1073
1074 return ret;
1075 }
1076
1077 /**
1078 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1079 * uaddr1: source futex user address
1080 * uaddr2: target futex user address
1081 * nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1082 * nr_requeue: number of waiters to requeue (0-INT_MAX)
1083 * requeue_pi: if we are attempting to requeue from a non-pi futex to a
1084 * pi futex (pi to pi requeue is not supported)
1085 *
1086 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1087 * uaddr2 atomically on behalf of the top waiter.
1088 *
1089 * Returns:
1090 * >=0 - on success, the number of tasks requeued or woken
1091 * <0 - on error
1092 */
1093 static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
1094 int nr_wake, int nr_requeue, u32 *cmpval,
1095 int requeue_pi)
1096 {
1097 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1098 int drop_count = 0, task_count = 0, ret;
1099 struct futex_pi_state *pi_state = NULL;
1100 struct futex_hash_bucket *hb1, *hb2;
1101 struct plist_head *head1;
1102 struct futex_q *this, *next;
1103 u32 curval2;
1104
1105 if (requeue_pi) {
1106 /*
1107 * requeue_pi requires a pi_state, try to allocate it now
1108 * without any locks in case it fails.
1109 */
1110 if (refill_pi_state_cache())
1111 return -ENOMEM;
1112 /*
1113 * requeue_pi must wake as many tasks as it can, up to nr_wake
1114 * + nr_requeue, since it acquires the rt_mutex prior to
1115 * returning to userspace, so as to not leave the rt_mutex with
1116 * waiters and no owner. However, second and third wake-ups
1117 * cannot be predicted as they involve race conditions with the
1118 * first wake and a fault while looking up the pi_state. Both
1119 * pthread_cond_signal() and pthread_cond_broadcast() should
1120 * use nr_wake=1.
1121 */
1122 if (nr_wake != 1)
1123 return -EINVAL;
1124 }
1125
1126 retry:
1127 if (pi_state != NULL) {
1128 /*
1129 * We will have to lookup the pi_state again, so free this one
1130 * to keep the accounting correct.
1131 */
1132 free_pi_state(pi_state);
1133 pi_state = NULL;
1134 }
1135
1136 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1137 if (unlikely(ret != 0))
1138 goto out;
1139 ret = get_futex_key(uaddr2, fshared, &key2,
1140 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1141 if (unlikely(ret != 0))
1142 goto out_put_key1;
1143
1144 hb1 = hash_futex(&key1);
1145 hb2 = hash_futex(&key2);
1146
1147 retry_private:
1148 double_lock_hb(hb1, hb2);
1149
1150 if (likely(cmpval != NULL)) {
1151 u32 curval;
1152
1153 ret = get_futex_value_locked(&curval, uaddr1);
1154
1155 if (unlikely(ret)) {
1156 double_unlock_hb(hb1, hb2);
1157
1158 ret = get_user(curval, uaddr1);
1159 if (ret)
1160 goto out_put_keys;
1161
1162 if (!fshared)
1163 goto retry_private;
1164
1165 put_futex_key(fshared, &key2);
1166 put_futex_key(fshared, &key1);
1167 goto retry;
1168 }
1169 if (curval != *cmpval) {
1170 ret = -EAGAIN;
1171 goto out_unlock;
1172 }
1173 }
1174
1175 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1176 /*
1177 * Attempt to acquire uaddr2 and wake the top waiter. If we
1178 * intend to requeue waiters, force setting the FUTEX_WAITERS
1179 * bit. We force this here where we are able to easily handle
1180 * faults rather in the requeue loop below.
1181 */
1182 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1183 &key2, &pi_state, nr_requeue);
1184
1185 /*
1186 * At this point the top_waiter has either taken uaddr2 or is
1187 * waiting on it. If the former, then the pi_state will not
1188 * exist yet, look it up one more time to ensure we have a
1189 * reference to it.
1190 */
1191 if (ret == 1) {
1192 WARN_ON(pi_state);
1193 task_count++;
1194 ret = get_futex_value_locked(&curval2, uaddr2);
1195 if (!ret)
1196 ret = lookup_pi_state(curval2, hb2, &key2,
1197 &pi_state);
1198 }
1199
1200 switch (ret) {
1201 case 0:
1202 break;
1203 case -EFAULT:
1204 double_unlock_hb(hb1, hb2);
1205 put_futex_key(fshared, &key2);
1206 put_futex_key(fshared, &key1);
1207 ret = get_user(curval2, uaddr2);
1208 if (!ret)
1209 goto retry;
1210 goto out;
1211 case -EAGAIN:
1212 /* The owner was exiting, try again. */
1213 double_unlock_hb(hb1, hb2);
1214 put_futex_key(fshared, &key2);
1215 put_futex_key(fshared, &key1);
1216 cond_resched();
1217 goto retry;
1218 default:
1219 goto out_unlock;
1220 }
1221 }
1222
1223 head1 = &hb1->chain;
1224 plist_for_each_entry_safe(this, next, head1, list) {
1225 if (task_count - nr_wake >= nr_requeue)
1226 break;
1227
1228 if (!match_futex(&this->key, &key1))
1229 continue;
1230
1231 WARN_ON(!requeue_pi && this->rt_waiter);
1232 WARN_ON(requeue_pi && !this->rt_waiter);
1233
1234 /*
1235 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1236 * lock, we already woke the top_waiter. If not, it will be
1237 * woken by futex_unlock_pi().
1238 */
1239 if (++task_count <= nr_wake && !requeue_pi) {
1240 wake_futex(this);
1241 continue;
1242 }
1243
1244 /*
1245 * Requeue nr_requeue waiters and possibly one more in the case
1246 * of requeue_pi if we couldn't acquire the lock atomically.
1247 */
1248 if (requeue_pi) {
1249 /* Prepare the waiter to take the rt_mutex. */
1250 atomic_inc(&pi_state->refcount);
1251 this->pi_state = pi_state;
1252 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1253 this->rt_waiter,
1254 this->task, 1);
1255 if (ret == 1) {
1256 /* We got the lock. */
1257 requeue_pi_wake_futex(this, &key2);
1258 continue;
1259 } else if (ret) {
1260 /* -EDEADLK */
1261 this->pi_state = NULL;
1262 free_pi_state(pi_state);
1263 goto out_unlock;
1264 }
1265 }
1266 requeue_futex(this, hb1, hb2, &key2);
1267 drop_count++;
1268 }
1269
1270 out_unlock:
1271 double_unlock_hb(hb1, hb2);
1272
1273 /*
1274 * drop_futex_key_refs() must be called outside the spinlocks. During
1275 * the requeue we moved futex_q's from the hash bucket at key1 to the
1276 * one at key2 and updated their key pointer. We no longer need to
1277 * hold the references to key1.
1278 */
1279 while (--drop_count >= 0)
1280 drop_futex_key_refs(&key1);
1281
1282 out_put_keys:
1283 put_futex_key(fshared, &key2);
1284 out_put_key1:
1285 put_futex_key(fshared, &key1);
1286 out:
1287 if (pi_state != NULL)
1288 free_pi_state(pi_state);
1289 return ret ? ret : task_count;
1290 }
1291
1292 /* The key must be already stored in q->key. */
1293 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1294 {
1295 struct futex_hash_bucket *hb;
1296
1297 get_futex_key_refs(&q->key);
1298 hb = hash_futex(&q->key);
1299 q->lock_ptr = &hb->lock;
1300
1301 spin_lock(&hb->lock);
1302 return hb;
1303 }
1304
1305 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1306 {
1307 int prio;
1308
1309 /*
1310 * The priority used to register this element is
1311 * - either the real thread-priority for the real-time threads
1312 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1313 * - or MAX_RT_PRIO for non-RT threads.
1314 * Thus, all RT-threads are woken first in priority order, and
1315 * the others are woken last, in FIFO order.
1316 */
1317 prio = min(current->normal_prio, MAX_RT_PRIO);
1318
1319 plist_node_init(&q->list, prio);
1320 #ifdef CONFIG_DEBUG_PI_LIST
1321 q->list.plist.lock = &hb->lock;
1322 #endif
1323 plist_add(&q->list, &hb->chain);
1324 q->task = current;
1325 spin_unlock(&hb->lock);
1326 }
1327
1328 static inline void
1329 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1330 {
1331 spin_unlock(&hb->lock);
1332 drop_futex_key_refs(&q->key);
1333 }
1334
1335 /*
1336 * queue_me and unqueue_me must be called as a pair, each
1337 * exactly once. They are called with the hashed spinlock held.
1338 */
1339
1340 /* Return 1 if we were still queued (ie. 0 means we were woken) */
1341 static int unqueue_me(struct futex_q *q)
1342 {
1343 spinlock_t *lock_ptr;
1344 int ret = 0;
1345
1346 /* In the common case we don't take the spinlock, which is nice. */
1347 retry:
1348 lock_ptr = q->lock_ptr;
1349 barrier();
1350 if (lock_ptr != NULL) {
1351 spin_lock(lock_ptr);
1352 /*
1353 * q->lock_ptr can change between reading it and
1354 * spin_lock(), causing us to take the wrong lock. This
1355 * corrects the race condition.
1356 *
1357 * Reasoning goes like this: if we have the wrong lock,
1358 * q->lock_ptr must have changed (maybe several times)
1359 * between reading it and the spin_lock(). It can
1360 * change again after the spin_lock() but only if it was
1361 * already changed before the spin_lock(). It cannot,
1362 * however, change back to the original value. Therefore
1363 * we can detect whether we acquired the correct lock.
1364 */
1365 if (unlikely(lock_ptr != q->lock_ptr)) {
1366 spin_unlock(lock_ptr);
1367 goto retry;
1368 }
1369 WARN_ON(plist_node_empty(&q->list));
1370 plist_del(&q->list, &q->list.plist);
1371
1372 BUG_ON(q->pi_state);
1373
1374 spin_unlock(lock_ptr);
1375 ret = 1;
1376 }
1377
1378 drop_futex_key_refs(&q->key);
1379 return ret;
1380 }
1381
1382 /*
1383 * PI futexes can not be requeued and must remove themself from the
1384 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1385 * and dropped here.
1386 */
1387 static void unqueue_me_pi(struct futex_q *q)
1388 {
1389 WARN_ON(plist_node_empty(&q->list));
1390 plist_del(&q->list, &q->list.plist);
1391
1392 BUG_ON(!q->pi_state);
1393 free_pi_state(q->pi_state);
1394 q->pi_state = NULL;
1395
1396 spin_unlock(q->lock_ptr);
1397
1398 drop_futex_key_refs(&q->key);
1399 }
1400
1401 /*
1402 * Fixup the pi_state owner with the new owner.
1403 *
1404 * Must be called with hash bucket lock held and mm->sem held for non
1405 * private futexes.
1406 */
1407 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1408 struct task_struct *newowner, int fshared)
1409 {
1410 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1411 struct futex_pi_state *pi_state = q->pi_state;
1412 struct task_struct *oldowner = pi_state->owner;
1413 u32 uval, curval, newval;
1414 int ret;
1415
1416 /* Owner died? */
1417 if (!pi_state->owner)
1418 newtid |= FUTEX_OWNER_DIED;
1419
1420 /*
1421 * We are here either because we stole the rtmutex from the
1422 * pending owner or we are the pending owner which failed to
1423 * get the rtmutex. We have to replace the pending owner TID
1424 * in the user space variable. This must be atomic as we have
1425 * to preserve the owner died bit here.
1426 *
1427 * Note: We write the user space value _before_ changing the pi_state
1428 * because we can fault here. Imagine swapped out pages or a fork
1429 * that marked all the anonymous memory readonly for cow.
1430 *
1431 * Modifying pi_state _before_ the user space value would
1432 * leave the pi_state in an inconsistent state when we fault
1433 * here, because we need to drop the hash bucket lock to
1434 * handle the fault. This might be observed in the PID check
1435 * in lookup_pi_state.
1436 */
1437 retry:
1438 if (get_futex_value_locked(&uval, uaddr))
1439 goto handle_fault;
1440
1441 while (1) {
1442 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1443
1444 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1445
1446 if (curval == -EFAULT)
1447 goto handle_fault;
1448 if (curval == uval)
1449 break;
1450 uval = curval;
1451 }
1452
1453 /*
1454 * We fixed up user space. Now we need to fix the pi_state
1455 * itself.
1456 */
1457 if (pi_state->owner != NULL) {
1458 spin_lock_irq(&pi_state->owner->pi_lock);
1459 WARN_ON(list_empty(&pi_state->list));
1460 list_del_init(&pi_state->list);
1461 spin_unlock_irq(&pi_state->owner->pi_lock);
1462 }
1463
1464 pi_state->owner = newowner;
1465
1466 spin_lock_irq(&newowner->pi_lock);
1467 WARN_ON(!list_empty(&pi_state->list));
1468 list_add(&pi_state->list, &newowner->pi_state_list);
1469 spin_unlock_irq(&newowner->pi_lock);
1470 return 0;
1471
1472 /*
1473 * To handle the page fault we need to drop the hash bucket
1474 * lock here. That gives the other task (either the pending
1475 * owner itself or the task which stole the rtmutex) the
1476 * chance to try the fixup of the pi_state. So once we are
1477 * back from handling the fault we need to check the pi_state
1478 * after reacquiring the hash bucket lock and before trying to
1479 * do another fixup. When the fixup has been done already we
1480 * simply return.
1481 */
1482 handle_fault:
1483 spin_unlock(q->lock_ptr);
1484
1485 ret = get_user(uval, uaddr);
1486
1487 spin_lock(q->lock_ptr);
1488
1489 /*
1490 * Check if someone else fixed it for us:
1491 */
1492 if (pi_state->owner != oldowner)
1493 return 0;
1494
1495 if (ret)
1496 return ret;
1497
1498 goto retry;
1499 }
1500
1501 /*
1502 * In case we must use restart_block to restart a futex_wait,
1503 * we encode in the 'flags' shared capability
1504 */
1505 #define FLAGS_SHARED 0x01
1506 #define FLAGS_CLOCKRT 0x02
1507 #define FLAGS_HAS_TIMEOUT 0x04
1508
1509 static long futex_wait_restart(struct restart_block *restart);
1510
1511 /**
1512 * fixup_owner() - Post lock pi_state and corner case management
1513 * @uaddr: user address of the futex
1514 * @fshared: whether the futex is shared (1) or not (0)
1515 * @q: futex_q (contains pi_state and access to the rt_mutex)
1516 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1517 *
1518 * After attempting to lock an rt_mutex, this function is called to cleanup
1519 * the pi_state owner as well as handle race conditions that may allow us to
1520 * acquire the lock. Must be called with the hb lock held.
1521 *
1522 * Returns:
1523 * 1 - success, lock taken
1524 * 0 - success, lock not taken
1525 * <0 - on error (-EFAULT)
1526 */
1527 static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
1528 int locked)
1529 {
1530 struct task_struct *owner;
1531 int ret = 0;
1532
1533 if (locked) {
1534 /*
1535 * Got the lock. We might not be the anticipated owner if we
1536 * did a lock-steal - fix up the PI-state in that case:
1537 */
1538 if (q->pi_state->owner != current)
1539 ret = fixup_pi_state_owner(uaddr, q, current, fshared);
1540 goto out;
1541 }
1542
1543 /*
1544 * Catch the rare case, where the lock was released when we were on the
1545 * way back before we locked the hash bucket.
1546 */
1547 if (q->pi_state->owner == current) {
1548 /*
1549 * Try to get the rt_mutex now. This might fail as some other
1550 * task acquired the rt_mutex after we removed ourself from the
1551 * rt_mutex waiters list.
1552 */
1553 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1554 locked = 1;
1555 goto out;
1556 }
1557
1558 /*
1559 * pi_state is incorrect, some other task did a lock steal and
1560 * we returned due to timeout or signal without taking the
1561 * rt_mutex. Too late. We can access the rt_mutex_owner without
1562 * locking, as the other task is now blocked on the hash bucket
1563 * lock. Fix the state up.
1564 */
1565 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1566 ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
1567 goto out;
1568 }
1569
1570 /*
1571 * Paranoia check. If we did not take the lock, then we should not be
1572 * the owner, nor the pending owner, of the rt_mutex.
1573 */
1574 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1575 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1576 "pi-state %p\n", ret,
1577 q->pi_state->pi_mutex.owner,
1578 q->pi_state->owner);
1579
1580 out:
1581 return ret ? ret : locked;
1582 }
1583
1584 /**
1585 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1586 * @hb: the futex hash bucket, must be locked by the caller
1587 * @q: the futex_q to queue up on
1588 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
1589 */
1590 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1591 struct hrtimer_sleeper *timeout)
1592 {
1593 queue_me(q, hb);
1594
1595 /*
1596 * There might have been scheduling since the queue_me(), as we
1597 * cannot hold a spinlock across the get_user() in case it
1598 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1599 * queueing ourselves into the futex hash. This code thus has to
1600 * rely on the futex_wake() code removing us from hash when it
1601 * wakes us up.
1602 */
1603 set_current_state(TASK_INTERRUPTIBLE);
1604
1605 /* Arm the timer */
1606 if (timeout) {
1607 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1608 if (!hrtimer_active(&timeout->timer))
1609 timeout->task = NULL;
1610 }
1611
1612 /*
1613 * !plist_node_empty() is safe here without any lock.
1614 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1615 */
1616 if (likely(!plist_node_empty(&q->list))) {
1617 /*
1618 * If the timer has already expired, current will already be
1619 * flagged for rescheduling. Only call schedule if there
1620 * is no timeout, or if it has yet to expire.
1621 */
1622 if (!timeout || timeout->task)
1623 schedule();
1624 }
1625 __set_current_state(TASK_RUNNING);
1626 }
1627
1628 /**
1629 * futex_wait_setup() - Prepare to wait on a futex
1630 * @uaddr: the futex userspace address
1631 * @val: the expected value
1632 * @fshared: whether the futex is shared (1) or not (0)
1633 * @q: the associated futex_q
1634 * @hb: storage for hash_bucket pointer to be returned to caller
1635 *
1636 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1637 * compare it with the expected value. Handle atomic faults internally.
1638 * Return with the hb lock held and a q.key reference on success, and unlocked
1639 * with no q.key reference on failure.
1640 *
1641 * Returns:
1642 * 0 - uaddr contains val and hb has been locked
1643 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1644 */
1645 static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
1646 struct futex_q *q, struct futex_hash_bucket **hb)
1647 {
1648 u32 uval;
1649 int ret;
1650
1651 /*
1652 * Access the page AFTER the hash-bucket is locked.
1653 * Order is important:
1654 *
1655 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1656 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1657 *
1658 * The basic logical guarantee of a futex is that it blocks ONLY
1659 * if cond(var) is known to be true at the time of blocking, for
1660 * any cond. If we queued after testing *uaddr, that would open
1661 * a race condition where we could block indefinitely with
1662 * cond(var) false, which would violate the guarantee.
1663 *
1664 * A consequence is that futex_wait() can return zero and absorb
1665 * a wakeup when *uaddr != val on entry to the syscall. This is
1666 * rare, but normal.
1667 */
1668 retry:
1669 q->key = FUTEX_KEY_INIT;
1670 ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
1671 if (unlikely(ret != 0))
1672 return ret;
1673
1674 retry_private:
1675 *hb = queue_lock(q);
1676
1677 ret = get_futex_value_locked(&uval, uaddr);
1678
1679 if (ret) {
1680 queue_unlock(q, *hb);
1681
1682 ret = get_user(uval, uaddr);
1683 if (ret)
1684 goto out;
1685
1686 if (!fshared)
1687 goto retry_private;
1688
1689 put_futex_key(fshared, &q->key);
1690 goto retry;
1691 }
1692
1693 if (uval != val) {
1694 queue_unlock(q, *hb);
1695 ret = -EWOULDBLOCK;
1696 }
1697
1698 out:
1699 if (ret)
1700 put_futex_key(fshared, &q->key);
1701 return ret;
1702 }
1703
1704 static int futex_wait(u32 __user *uaddr, int fshared,
1705 u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1706 {
1707 struct hrtimer_sleeper timeout, *to = NULL;
1708 struct restart_block *restart;
1709 struct futex_hash_bucket *hb;
1710 struct futex_q q;
1711 int ret;
1712
1713 if (!bitset)
1714 return -EINVAL;
1715
1716 q.pi_state = NULL;
1717 q.bitset = bitset;
1718 q.rt_waiter = NULL;
1719
1720 if (abs_time) {
1721 to = &timeout;
1722
1723 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
1724 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1725 hrtimer_init_sleeper(to, current);
1726 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1727 current->timer_slack_ns);
1728 }
1729
1730 /* Prepare to wait on uaddr. */
1731 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1732 if (ret)
1733 goto out;
1734
1735 /* queue_me and wait for wakeup, timeout, or a signal. */
1736 futex_wait_queue_me(hb, &q, to);
1737
1738 /* If we were woken (and unqueued), we succeeded, whatever. */
1739 ret = 0;
1740 if (!unqueue_me(&q))
1741 goto out_put_key;
1742 ret = -ETIMEDOUT;
1743 if (to && !to->task)
1744 goto out_put_key;
1745
1746 /*
1747 * We expect signal_pending(current), but another thread may
1748 * have handled it for us already.
1749 */
1750 ret = -ERESTARTSYS;
1751 if (!abs_time)
1752 goto out_put_key;
1753
1754 restart = &current_thread_info()->restart_block;
1755 restart->fn = futex_wait_restart;
1756 restart->futex.uaddr = (u32 *)uaddr;
1757 restart->futex.val = val;
1758 restart->futex.time = abs_time->tv64;
1759 restart->futex.bitset = bitset;
1760 restart->futex.flags = FLAGS_HAS_TIMEOUT;
1761
1762 if (fshared)
1763 restart->futex.flags |= FLAGS_SHARED;
1764 if (clockrt)
1765 restart->futex.flags |= FLAGS_CLOCKRT;
1766
1767 ret = -ERESTART_RESTARTBLOCK;
1768
1769 out_put_key:
1770 put_futex_key(fshared, &q.key);
1771 out:
1772 if (to) {
1773 hrtimer_cancel(&to->timer);
1774 destroy_hrtimer_on_stack(&to->timer);
1775 }
1776 return ret;
1777 }
1778
1779
1780 static long futex_wait_restart(struct restart_block *restart)
1781 {
1782 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1783 int fshared = 0;
1784 ktime_t t, *tp = NULL;
1785
1786 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1787 t.tv64 = restart->futex.time;
1788 tp = &t;
1789 }
1790 restart->fn = do_no_restart_syscall;
1791 if (restart->futex.flags & FLAGS_SHARED)
1792 fshared = 1;
1793 return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
1794 restart->futex.bitset,
1795 restart->futex.flags & FLAGS_CLOCKRT);
1796 }
1797
1798
1799 /*
1800 * Userspace tried a 0 -> TID atomic transition of the futex value
1801 * and failed. The kernel side here does the whole locking operation:
1802 * if there are waiters then it will block, it does PI, etc. (Due to
1803 * races the kernel might see a 0 value of the futex too.)
1804 */
1805 static int futex_lock_pi(u32 __user *uaddr, int fshared,
1806 int detect, ktime_t *time, int trylock)
1807 {
1808 struct hrtimer_sleeper timeout, *to = NULL;
1809 struct futex_hash_bucket *hb;
1810 u32 uval;
1811 struct futex_q q;
1812 int res, ret;
1813
1814 if (refill_pi_state_cache())
1815 return -ENOMEM;
1816
1817 if (time) {
1818 to = &timeout;
1819 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1820 HRTIMER_MODE_ABS);
1821 hrtimer_init_sleeper(to, current);
1822 hrtimer_set_expires(&to->timer, *time);
1823 }
1824
1825 q.pi_state = NULL;
1826 q.rt_waiter = NULL;
1827 retry:
1828 q.key = FUTEX_KEY_INIT;
1829 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
1830 if (unlikely(ret != 0))
1831 goto out;
1832
1833 retry_private:
1834 hb = queue_lock(&q);
1835
1836 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
1837 if (unlikely(ret)) {
1838 switch (ret) {
1839 case 1:
1840 /* We got the lock. */
1841 ret = 0;
1842 goto out_unlock_put_key;
1843 case -EFAULT:
1844 goto uaddr_faulted;
1845 case -EAGAIN:
1846 /*
1847 * Task is exiting and we just wait for the
1848 * exit to complete.
1849 */
1850 queue_unlock(&q, hb);
1851 put_futex_key(fshared, &q.key);
1852 cond_resched();
1853 goto retry;
1854 default:
1855 goto out_unlock_put_key;
1856 }
1857 }
1858
1859 /*
1860 * Only actually queue now that the atomic ops are done:
1861 */
1862 queue_me(&q, hb);
1863
1864 WARN_ON(!q.pi_state);
1865 /*
1866 * Block on the PI mutex:
1867 */
1868 if (!trylock)
1869 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1870 else {
1871 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1872 /* Fixup the trylock return value: */
1873 ret = ret ? 0 : -EWOULDBLOCK;
1874 }
1875
1876 spin_lock(q.lock_ptr);
1877 /*
1878 * Fixup the pi_state owner and possibly acquire the lock if we
1879 * haven't already.
1880 */
1881 res = fixup_owner(uaddr, fshared, &q, !ret);
1882 /*
1883 * If fixup_owner() returned an error, proprogate that. If it acquired
1884 * the lock, clear our -ETIMEDOUT or -EINTR.
1885 */
1886 if (res)
1887 ret = (res < 0) ? res : 0;
1888
1889 /*
1890 * If fixup_owner() faulted and was unable to handle the fault, unlock
1891 * it and return the fault to userspace.
1892 */
1893 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1894 rt_mutex_unlock(&q.pi_state->pi_mutex);
1895
1896 /* Unqueue and drop the lock */
1897 unqueue_me_pi(&q);
1898
1899 goto out;
1900
1901 out_unlock_put_key:
1902 queue_unlock(&q, hb);
1903
1904 out_put_key:
1905 put_futex_key(fshared, &q.key);
1906 out:
1907 if (to)
1908 destroy_hrtimer_on_stack(&to->timer);
1909 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1910
1911 uaddr_faulted:
1912 /*
1913 * We have to r/w *(int __user *)uaddr, and we have to modify it
1914 * atomically. Therefore, if we continue to fault after get_user()
1915 * below, we need to handle the fault ourselves, while still holding
1916 * the mmap_sem. This can occur if the uaddr is under contention as
1917 * we have to drop the mmap_sem in order to call get_user().
1918 */
1919 queue_unlock(&q, hb);
1920
1921 ret = get_user(uval, uaddr);
1922 if (ret)
1923 goto out_put_key;
1924
1925 if (!fshared)
1926 goto retry_private;
1927
1928 put_futex_key(fshared, &q.key);
1929 goto retry;
1930 }
1931
1932 /*
1933 * Userspace attempted a TID -> 0 atomic transition, and failed.
1934 * This is the in-kernel slowpath: we look up the PI state (if any),
1935 * and do the rt-mutex unlock.
1936 */
1937 static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1938 {
1939 struct futex_hash_bucket *hb;
1940 struct futex_q *this, *next;
1941 u32 uval;
1942 struct plist_head *head;
1943 union futex_key key = FUTEX_KEY_INIT;
1944 int ret;
1945
1946 retry:
1947 if (get_user(uval, uaddr))
1948 return -EFAULT;
1949 /*
1950 * We release only a lock we actually own:
1951 */
1952 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1953 return -EPERM;
1954
1955 ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
1956 if (unlikely(ret != 0))
1957 goto out;
1958
1959 hb = hash_futex(&key);
1960 spin_lock(&hb->lock);
1961
1962 /*
1963 * To avoid races, try to do the TID -> 0 atomic transition
1964 * again. If it succeeds then we can return without waking
1965 * anyone else up:
1966 */
1967 if (!(uval & FUTEX_OWNER_DIED))
1968 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
1969
1970
1971 if (unlikely(uval == -EFAULT))
1972 goto pi_faulted;
1973 /*
1974 * Rare case: we managed to release the lock atomically,
1975 * no need to wake anyone else up:
1976 */
1977 if (unlikely(uval == task_pid_vnr(current)))
1978 goto out_unlock;
1979
1980 /*
1981 * Ok, other tasks may need to be woken up - check waiters
1982 * and do the wakeup if necessary:
1983 */
1984 head = &hb->chain;
1985
1986 plist_for_each_entry_safe(this, next, head, list) {
1987 if (!match_futex (&this->key, &key))
1988 continue;
1989 ret = wake_futex_pi(uaddr, uval, this);
1990 /*
1991 * The atomic access to the futex value
1992 * generated a pagefault, so retry the
1993 * user-access and the wakeup:
1994 */
1995 if (ret == -EFAULT)
1996 goto pi_faulted;
1997 goto out_unlock;
1998 }
1999 /*
2000 * No waiters - kernel unlocks the futex:
2001 */
2002 if (!(uval & FUTEX_OWNER_DIED)) {
2003 ret = unlock_futex_pi(uaddr, uval);
2004 if (ret == -EFAULT)
2005 goto pi_faulted;
2006 }
2007
2008 out_unlock:
2009 spin_unlock(&hb->lock);
2010 put_futex_key(fshared, &key);
2011
2012 out:
2013 return ret;
2014
2015 pi_faulted:
2016 /*
2017 * We have to r/w *(int __user *)uaddr, and we have to modify it
2018 * atomically. Therefore, if we continue to fault after get_user()
2019 * below, we need to handle the fault ourselves, while still holding
2020 * the mmap_sem. This can occur if the uaddr is under contention as
2021 * we have to drop the mmap_sem in order to call get_user().
2022 */
2023 spin_unlock(&hb->lock);
2024 put_futex_key(fshared, &key);
2025
2026 ret = get_user(uval, uaddr);
2027 if (!ret)
2028 goto retry;
2029
2030 return ret;
2031 }
2032
2033 /**
2034 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2035 * @hb: the hash_bucket futex_q was original enqueued on
2036 * @q: the futex_q woken while waiting to be requeued
2037 * @key2: the futex_key of the requeue target futex
2038 * @timeout: the timeout associated with the wait (NULL if none)
2039 *
2040 * Detect if the task was woken on the initial futex as opposed to the requeue
2041 * target futex. If so, determine if it was a timeout or a signal that caused
2042 * the wakeup and return the appropriate error code to the caller. Must be
2043 * called with the hb lock held.
2044 *
2045 * Returns
2046 * 0 - no early wakeup detected
2047 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2048 */
2049 static inline
2050 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2051 struct futex_q *q, union futex_key *key2,
2052 struct hrtimer_sleeper *timeout)
2053 {
2054 int ret = 0;
2055
2056 /*
2057 * With the hb lock held, we avoid races while we process the wakeup.
2058 * We only need to hold hb (and not hb2) to ensure atomicity as the
2059 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2060 * It can't be requeued from uaddr2 to something else since we don't
2061 * support a PI aware source futex for requeue.
2062 */
2063 if (!match_futex(&q->key, key2)) {
2064 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2065 /*
2066 * We were woken prior to requeue by a timeout or a signal.
2067 * Unqueue the futex_q and determine which it was.
2068 */
2069 plist_del(&q->list, &q->list.plist);
2070 drop_futex_key_refs(&q->key);
2071
2072 if (timeout && !timeout->task)
2073 ret = -ETIMEDOUT;
2074 else
2075 ret = -ERESTARTNOINTR;
2076 }
2077 return ret;
2078 }
2079
2080 /**
2081 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2082 * @uaddr: the futex we initialyl wait on (non-pi)
2083 * @fshared: whether the futexes are shared (1) or not (0). They must be
2084 * the same type, no requeueing from private to shared, etc.
2085 * @val: the expected value of uaddr
2086 * @abs_time: absolute timeout
2087 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all.
2088 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2089 * @uaddr2: the pi futex we will take prior to returning to user-space
2090 *
2091 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2092 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
2093 * complete the acquisition of the rt_mutex prior to returning to userspace.
2094 * This ensures the rt_mutex maintains an owner when it has waiters; without
2095 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2096 * need to.
2097 *
2098 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2099 * via the following:
2100 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2101 * 2) wakeup on uaddr2 after a requeue and subsequent unlock
2102 * 3) signal (before or after requeue)
2103 * 4) timeout (before or after requeue)
2104 *
2105 * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function.
2106 *
2107 * If 2, we may then block on trying to take the rt_mutex and return via:
2108 * 5) successful lock
2109 * 6) signal
2110 * 7) timeout
2111 * 8) other lock acquisition failure
2112 *
2113 * If 6, we setup a restart_block with futex_lock_pi() as the function.
2114 *
2115 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2116 *
2117 * Returns:
2118 * 0 - On success
2119 * <0 - On error
2120 */
2121 static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2122 u32 val, ktime_t *abs_time, u32 bitset,
2123 int clockrt, u32 __user *uaddr2)
2124 {
2125 struct hrtimer_sleeper timeout, *to = NULL;
2126 struct rt_mutex_waiter rt_waiter;
2127 struct rt_mutex *pi_mutex = NULL;
2128 struct futex_hash_bucket *hb;
2129 union futex_key key2;
2130 struct futex_q q;
2131 int res, ret;
2132
2133 if (!bitset)
2134 return -EINVAL;
2135
2136 if (abs_time) {
2137 to = &timeout;
2138 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
2139 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2140 hrtimer_init_sleeper(to, current);
2141 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2142 current->timer_slack_ns);
2143 }
2144
2145 /*
2146 * The waiter is allocated on our stack, manipulated by the requeue
2147 * code while we sleep on uaddr.
2148 */
2149 debug_rt_mutex_init_waiter(&rt_waiter);
2150 rt_waiter.task = NULL;
2151
2152 q.pi_state = NULL;
2153 q.bitset = bitset;
2154 q.rt_waiter = &rt_waiter;
2155
2156 key2 = FUTEX_KEY_INIT;
2157 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
2158 if (unlikely(ret != 0))
2159 goto out;
2160
2161 /* Prepare to wait on uaddr. */
2162 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
2163 if (ret)
2164 goto out_key2;
2165
2166 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2167 futex_wait_queue_me(hb, &q, to);
2168
2169 spin_lock(&hb->lock);
2170 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2171 spin_unlock(&hb->lock);
2172 if (ret)
2173 goto out_put_keys;
2174
2175 /*
2176 * In order for us to be here, we know our q.key == key2, and since
2177 * we took the hb->lock above, we also know that futex_requeue() has
2178 * completed and we no longer have to concern ourselves with a wakeup
2179 * race with the atomic proxy lock acquition by the requeue code.
2180 */
2181
2182 /* Check if the requeue code acquired the second futex for us. */
2183 if (!q.rt_waiter) {
2184 /*
2185 * Got the lock. We might not be the anticipated owner if we
2186 * did a lock-steal - fix up the PI-state in that case.
2187 */
2188 if (q.pi_state && (q.pi_state->owner != current)) {
2189 spin_lock(q.lock_ptr);
2190 ret = fixup_pi_state_owner(uaddr2, &q, current,
2191 fshared);
2192 spin_unlock(q.lock_ptr);
2193 }
2194 } else {
2195 /*
2196 * We have been woken up by futex_unlock_pi(), a timeout, or a
2197 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2198 * the pi_state.
2199 */
2200 WARN_ON(!&q.pi_state);
2201 pi_mutex = &q.pi_state->pi_mutex;
2202 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2203 debug_rt_mutex_free_waiter(&rt_waiter);
2204
2205 spin_lock(q.lock_ptr);
2206 /*
2207 * Fixup the pi_state owner and possibly acquire the lock if we
2208 * haven't already.
2209 */
2210 res = fixup_owner(uaddr2, fshared, &q, !ret);
2211 /*
2212 * If fixup_owner() returned an error, proprogate that. If it
2213 * acquired the lock, clear our -ETIMEDOUT or -EINTR.
2214 */
2215 if (res)
2216 ret = (res < 0) ? res : 0;
2217
2218 /* Unqueue and drop the lock. */
2219 unqueue_me_pi(&q);
2220 }
2221
2222 /*
2223 * If fixup_pi_state_owner() faulted and was unable to handle the
2224 * fault, unlock the rt_mutex and return the fault to userspace.
2225 */
2226 if (ret == -EFAULT) {
2227 if (rt_mutex_owner(pi_mutex) == current)
2228 rt_mutex_unlock(pi_mutex);
2229 } else if (ret == -EINTR) {
2230 /*
2231 * We've already been requeued, but we have no way to
2232 * restart by calling futex_lock_pi() directly. We
2233 * could restart the syscall, but that will look at
2234 * the user space value and return right away. So we
2235 * drop back with EWOULDBLOCK to tell user space that
2236 * "val" has been changed. That's the same what the
2237 * restart of the syscall would do in
2238 * futex_wait_setup().
2239 */
2240 ret = -EWOULDBLOCK;
2241 }
2242
2243 out_put_keys:
2244 put_futex_key(fshared, &q.key);
2245 out_key2:
2246 put_futex_key(fshared, &key2);
2247
2248 out:
2249 if (to) {
2250 hrtimer_cancel(&to->timer);
2251 destroy_hrtimer_on_stack(&to->timer);
2252 }
2253 return ret;
2254 }
2255
2256 /*
2257 * Support for robust futexes: the kernel cleans up held futexes at
2258 * thread exit time.
2259 *
2260 * Implementation: user-space maintains a per-thread list of locks it
2261 * is holding. Upon do_exit(), the kernel carefully walks this list,
2262 * and marks all locks that are owned by this thread with the
2263 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2264 * always manipulated with the lock held, so the list is private and
2265 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2266 * field, to allow the kernel to clean up if the thread dies after
2267 * acquiring the lock, but just before it could have added itself to
2268 * the list. There can only be one such pending lock.
2269 */
2270
2271 /**
2272 * sys_set_robust_list - set the robust-futex list head of a task
2273 * @head: pointer to the list-head
2274 * @len: length of the list-head, as userspace expects
2275 */
2276 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2277 size_t, len)
2278 {
2279 if (!futex_cmpxchg_enabled)
2280 return -ENOSYS;
2281 /*
2282 * The kernel knows only one size for now:
2283 */
2284 if (unlikely(len != sizeof(*head)))
2285 return -EINVAL;
2286
2287 current->robust_list = head;
2288
2289 return 0;
2290 }
2291
2292 /**
2293 * sys_get_robust_list - get the robust-futex list head of a task
2294 * @pid: pid of the process [zero for current task]
2295 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2296 * @len_ptr: pointer to a length field, the kernel fills in the header size
2297 */
2298 SYSCALL_DEFINE3(get_robust_list, int, pid,
2299 struct robust_list_head __user * __user *, head_ptr,
2300 size_t __user *, len_ptr)
2301 {
2302 struct robust_list_head __user *head;
2303 unsigned long ret;
2304 const struct cred *cred = current_cred(), *pcred;
2305
2306 if (!futex_cmpxchg_enabled)
2307 return -ENOSYS;
2308
2309 if (!pid)
2310 head = current->robust_list;
2311 else {
2312 struct task_struct *p;
2313
2314 ret = -ESRCH;
2315 rcu_read_lock();
2316 p = find_task_by_vpid(pid);
2317 if (!p)
2318 goto err_unlock;
2319 ret = -EPERM;
2320 pcred = __task_cred(p);
2321 if (cred->euid != pcred->euid &&
2322 cred->euid != pcred->uid &&
2323 !capable(CAP_SYS_PTRACE))
2324 goto err_unlock;
2325 head = p->robust_list;
2326 rcu_read_unlock();
2327 }
2328
2329 if (put_user(sizeof(*head), len_ptr))
2330 return -EFAULT;
2331 return put_user(head, head_ptr);
2332
2333 err_unlock:
2334 rcu_read_unlock();
2335
2336 return ret;
2337 }
2338
2339 /*
2340 * Process a futex-list entry, check whether it's owned by the
2341 * dying task, and do notification if so:
2342 */
2343 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2344 {
2345 u32 uval, nval, mval;
2346
2347 retry:
2348 if (get_user(uval, uaddr))
2349 return -1;
2350
2351 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2352 /*
2353 * Ok, this dying thread is truly holding a futex
2354 * of interest. Set the OWNER_DIED bit atomically
2355 * via cmpxchg, and if the value had FUTEX_WAITERS
2356 * set, wake up a waiter (if any). (We have to do a
2357 * futex_wake() even if OWNER_DIED is already set -
2358 * to handle the rare but possible case of recursive
2359 * thread-death.) The rest of the cleanup is done in
2360 * userspace.
2361 */
2362 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2363 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2364
2365 if (nval == -EFAULT)
2366 return -1;
2367
2368 if (nval != uval)
2369 goto retry;
2370
2371 /*
2372 * Wake robust non-PI futexes here. The wakeup of
2373 * PI futexes happens in exit_pi_state():
2374 */
2375 if (!pi && (uval & FUTEX_WAITERS))
2376 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2377 }
2378 return 0;
2379 }
2380
2381 /*
2382 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2383 */
2384 static inline int fetch_robust_entry(struct robust_list __user **entry,
2385 struct robust_list __user * __user *head,
2386 int *pi)
2387 {
2388 unsigned long uentry;
2389
2390 if (get_user(uentry, (unsigned long __user *)head))
2391 return -EFAULT;
2392
2393 *entry = (void __user *)(uentry & ~1UL);
2394 *pi = uentry & 1;
2395
2396 return 0;
2397 }
2398
2399 /*
2400 * Walk curr->robust_list (very carefully, it's a userspace list!)
2401 * and mark any locks found there dead, and notify any waiters.
2402 *
2403 * We silently return on any sign of list-walking problem.
2404 */
2405 void exit_robust_list(struct task_struct *curr)
2406 {
2407 struct robust_list_head __user *head = curr->robust_list;
2408 struct robust_list __user *entry, *next_entry, *pending;
2409 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
2410 unsigned long futex_offset;
2411 int rc;
2412
2413 if (!futex_cmpxchg_enabled)
2414 return;
2415
2416 /*
2417 * Fetch the list head (which was registered earlier, via
2418 * sys_set_robust_list()):
2419 */
2420 if (fetch_robust_entry(&entry, &head->list.next, &pi))
2421 return;
2422 /*
2423 * Fetch the relative futex offset:
2424 */
2425 if (get_user(futex_offset, &head->futex_offset))
2426 return;
2427 /*
2428 * Fetch any possibly pending lock-add first, and handle it
2429 * if it exists:
2430 */
2431 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2432 return;
2433
2434 next_entry = NULL; /* avoid warning with gcc */
2435 while (entry != &head->list) {
2436 /*
2437 * Fetch the next entry in the list before calling
2438 * handle_futex_death:
2439 */
2440 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2441 /*
2442 * A pending lock might already be on the list, so
2443 * don't process it twice:
2444 */
2445 if (entry != pending)
2446 if (handle_futex_death((void __user *)entry + futex_offset,
2447 curr, pi))
2448 return;
2449 if (rc)
2450 return;
2451 entry = next_entry;
2452 pi = next_pi;
2453 /*
2454 * Avoid excessively long or circular lists:
2455 */
2456 if (!--limit)
2457 break;
2458
2459 cond_resched();
2460 }
2461
2462 if (pending)
2463 handle_futex_death((void __user *)pending + futex_offset,
2464 curr, pip);
2465 }
2466
2467 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2468 u32 __user *uaddr2, u32 val2, u32 val3)
2469 {
2470 int clockrt, ret = -ENOSYS;
2471 int cmd = op & FUTEX_CMD_MASK;
2472 int fshared = 0;
2473
2474 if (!(op & FUTEX_PRIVATE_FLAG))
2475 fshared = 1;
2476
2477 clockrt = op & FUTEX_CLOCK_REALTIME;
2478 if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2479 return -ENOSYS;
2480
2481 switch (cmd) {
2482 case FUTEX_WAIT:
2483 val3 = FUTEX_BITSET_MATCH_ANY;
2484 case FUTEX_WAIT_BITSET:
2485 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
2486 break;
2487 case FUTEX_WAKE:
2488 val3 = FUTEX_BITSET_MATCH_ANY;
2489 case FUTEX_WAKE_BITSET:
2490 ret = futex_wake(uaddr, fshared, val, val3);
2491 break;
2492 case FUTEX_REQUEUE:
2493 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
2494 break;
2495 case FUTEX_CMP_REQUEUE:
2496 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2497 0);
2498 break;
2499 case FUTEX_WAKE_OP:
2500 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2501 break;
2502 case FUTEX_LOCK_PI:
2503 if (futex_cmpxchg_enabled)
2504 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2505 break;
2506 case FUTEX_UNLOCK_PI:
2507 if (futex_cmpxchg_enabled)
2508 ret = futex_unlock_pi(uaddr, fshared);
2509 break;
2510 case FUTEX_TRYLOCK_PI:
2511 if (futex_cmpxchg_enabled)
2512 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2513 break;
2514 case FUTEX_WAIT_REQUEUE_PI:
2515 val3 = FUTEX_BITSET_MATCH_ANY;
2516 ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
2517 clockrt, uaddr2);
2518 break;
2519 case FUTEX_CMP_REQUEUE_PI:
2520 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2521 1);
2522 break;
2523 default:
2524 ret = -ENOSYS;
2525 }
2526 return ret;
2527 }
2528
2529
2530 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2531 struct timespec __user *, utime, u32 __user *, uaddr2,
2532 u32, val3)
2533 {
2534 struct timespec ts;
2535 ktime_t t, *tp = NULL;
2536 u32 val2 = 0;
2537 int cmd = op & FUTEX_CMD_MASK;
2538
2539 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2540 cmd == FUTEX_WAIT_BITSET ||
2541 cmd == FUTEX_WAIT_REQUEUE_PI)) {
2542 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2543 return -EFAULT;
2544 if (!timespec_valid(&ts))
2545 return -EINVAL;
2546
2547 t = timespec_to_ktime(ts);
2548 if (cmd == FUTEX_WAIT)
2549 t = ktime_add_safe(ktime_get(), t);
2550 tp = &t;
2551 }
2552 /*
2553 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2554 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2555 */
2556 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2557 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2558 val2 = (u32) (unsigned long) utime;
2559
2560 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2561 }
2562
2563 static int __init futex_init(void)
2564 {
2565 u32 curval;
2566 int i;
2567
2568 /*
2569 * This will fail and we want it. Some arch implementations do
2570 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2571 * functionality. We want to know that before we call in any
2572 * of the complex code paths. Also we want to prevent
2573 * registration of robust lists in that case. NULL is
2574 * guaranteed to fault and we get -EFAULT on functional
2575 * implementation, the non functional ones will return
2576 * -ENOSYS.
2577 */
2578 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2579 if (curval == -EFAULT)
2580 futex_cmpxchg_enabled = 1;
2581
2582 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2583 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2584 spin_lock_init(&futex_queues[i].lock);
2585 }
2586
2587 return 0;
2588 }
2589 __initcall(futex_init);