]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
sched: Move wait.c into kernel/sched/
authorPeter Zijlstra <peterz@infradead.org>
Thu, 31 Oct 2013 17:07:08 +0000 (18:07 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 6 Nov 2013 06:49:16 +0000 (07:49 +0100)
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-5q5yqvdaen0rmapwloeaotx3@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/Makefile
kernel/sched/Makefile
kernel/sched/wait.c [new file with mode: 0644]
kernel/wait.c [deleted file]

index 1ce47553fb020e97e2a930309242c2c1e5161437..b3d51e229356db26978e2244053ba93ebf686c3c 100644 (file)
@@ -7,7 +7,7 @@ obj-y     = fork.o exec_domain.o panic.o \
            sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
            signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
            rcupdate.o extable.o params.o posix-timers.o \
-           kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
+           kthread.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o cred.o reboot.o \
            async.o range.o groups.o lglock.o smpboot.o
index 54adcf35f49526ef29bde1bfa79fbc97d37738c5..f8d3f4baa1a188c10b287bd4698fc85d787c9546 100644 (file)
@@ -12,6 +12,7 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
 endif
 
 obj-y += core.o proc.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o
+obj-y += wait.o
 obj-$(CONFIG_SMP) += cpupri.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
new file mode 100644 (file)
index 0000000..de21c63
--- /dev/null
@@ -0,0 +1,401 @@
+/*
+ * Generic waiting primitives.
+ *
+ * (C) 2004 Nadia Yvette Chambers, Oracle
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/wait.h>
+#include <linux/hash.h>
+
+void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
+{
+       spin_lock_init(&q->lock);
+       lockdep_set_class_and_name(&q->lock, key, name);
+       INIT_LIST_HEAD(&q->task_list);
+}
+
+EXPORT_SYMBOL(__init_waitqueue_head);
+
+void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+       unsigned long flags;
+
+       wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+       spin_lock_irqsave(&q->lock, flags);
+       __add_wait_queue(q, wait);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(add_wait_queue);
+
+void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+{
+       unsigned long flags;
+
+       wait->flags |= WQ_FLAG_EXCLUSIVE;
+       spin_lock_irqsave(&q->lock, flags);
+       __add_wait_queue_tail(q, wait);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(add_wait_queue_exclusive);
+
+void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       __remove_wait_queue(q, wait);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(remove_wait_queue);
+
+
+/*
+ * Note: we use "set_current_state()" _after_ the wait-queue add,
+ * because we need a memory barrier there on SMP, so that any
+ * wake-function that tests for the wait-queue being active
+ * will be guaranteed to see waitqueue addition _or_ subsequent
+ * tests in this thread will see the wakeup having taken place.
+ *
+ * The spin_unlock() itself is semi-permeable and only protects
+ * one way (it only protects stuff inside the critical region and
+ * stops them from bleeding out - it would still allow subsequent
+ * loads to move into the critical region).
+ */
+void
+prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+       unsigned long flags;
+
+       wait->flags &= ~WQ_FLAG_EXCLUSIVE;
+       spin_lock_irqsave(&q->lock, flags);
+       if (list_empty(&wait->task_list))
+               __add_wait_queue(q, wait);
+       set_current_state(state);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(prepare_to_wait);
+
+void
+prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+       unsigned long flags;
+
+       wait->flags |= WQ_FLAG_EXCLUSIVE;
+       spin_lock_irqsave(&q->lock, flags);
+       if (list_empty(&wait->task_list))
+               __add_wait_queue_tail(q, wait);
+       set_current_state(state);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(prepare_to_wait_exclusive);
+
+long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
+{
+       unsigned long flags;
+
+       if (signal_pending_state(state, current))
+               return -ERESTARTSYS;
+
+       wait->private = current;
+       wait->func = autoremove_wake_function;
+
+       spin_lock_irqsave(&q->lock, flags);
+       if (list_empty(&wait->task_list)) {
+               if (wait->flags & WQ_FLAG_EXCLUSIVE)
+                       __add_wait_queue_tail(q, wait);
+               else
+                       __add_wait_queue(q, wait);
+       }
+       set_current_state(state);
+       spin_unlock_irqrestore(&q->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(prepare_to_wait_event);
+
+/**
+ * finish_wait - clean up after waiting in a queue
+ * @q: waitqueue waited on
+ * @wait: wait descriptor
+ *
+ * Sets current thread back to running state and removes
+ * the wait descriptor from the given waitqueue if still
+ * queued.
+ */
+void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+{
+       unsigned long flags;
+
+       __set_current_state(TASK_RUNNING);
+       /*
+        * We can check for list emptiness outside the lock
+        * IFF:
+        *  - we use the "careful" check that verifies both
+        *    the next and prev pointers, so that there cannot
+        *    be any half-pending updates in progress on other
+        *    CPU's that we haven't seen yet (and that might
+        *    still change the stack area.
+        * and
+        *  - all other users take the lock (ie we can only
+        *    have _one_ other CPU that looks at or modifies
+        *    the list).
+        */
+       if (!list_empty_careful(&wait->task_list)) {
+               spin_lock_irqsave(&q->lock, flags);
+               list_del_init(&wait->task_list);
+               spin_unlock_irqrestore(&q->lock, flags);
+       }
+}
+EXPORT_SYMBOL(finish_wait);
+
+/**
+ * abort_exclusive_wait - abort exclusive waiting in a queue
+ * @q: waitqueue waited on
+ * @wait: wait descriptor
+ * @mode: runstate of the waiter to be woken
+ * @key: key to identify a wait bit queue or %NULL
+ *
+ * Sets current thread back to running state and removes
+ * the wait descriptor from the given waitqueue if still
+ * queued.
+ *
+ * Wakes up the next waiter if the caller is concurrently
+ * woken up through the queue.
+ *
+ * This prevents waiter starvation where an exclusive waiter
+ * aborts and is woken up concurrently and no one wakes up
+ * the next waiter.
+ */
+void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
+                       unsigned int mode, void *key)
+{
+       unsigned long flags;
+
+       __set_current_state(TASK_RUNNING);
+       spin_lock_irqsave(&q->lock, flags);
+       if (!list_empty(&wait->task_list))
+               list_del_init(&wait->task_list);
+       else if (waitqueue_active(q))
+               __wake_up_locked_key(q, mode, key);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(abort_exclusive_wait);
+
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+       int ret = default_wake_function(wait, mode, sync, key);
+
+       if (ret)
+               list_del_init(&wait->task_list);
+       return ret;
+}
+EXPORT_SYMBOL(autoremove_wake_function);
+
+int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
+{
+       struct wait_bit_key *key = arg;
+       struct wait_bit_queue *wait_bit
+               = container_of(wait, struct wait_bit_queue, wait);
+
+       if (wait_bit->key.flags != key->flags ||
+                       wait_bit->key.bit_nr != key->bit_nr ||
+                       test_bit(key->bit_nr, key->flags))
+               return 0;
+       else
+               return autoremove_wake_function(wait, mode, sync, key);
+}
+EXPORT_SYMBOL(wake_bit_function);
+
+/*
+ * To allow interruptible waiting and asynchronous (i.e. nonblocking)
+ * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
+ * permitted return codes. Nonzero return codes halt waiting and return.
+ */
+int __sched
+__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
+                       int (*action)(void *), unsigned mode)
+{
+       int ret = 0;
+
+       do {
+               prepare_to_wait(wq, &q->wait, mode);
+               if (test_bit(q->key.bit_nr, q->key.flags))
+                       ret = (*action)(q->key.flags);
+       } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
+       finish_wait(wq, &q->wait);
+       return ret;
+}
+EXPORT_SYMBOL(__wait_on_bit);
+
+int __sched out_of_line_wait_on_bit(void *word, int bit,
+                                       int (*action)(void *), unsigned mode)
+{
+       wait_queue_head_t *wq = bit_waitqueue(word, bit);
+       DEFINE_WAIT_BIT(wait, word, bit);
+
+       return __wait_on_bit(wq, &wait, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_bit);
+
+int __sched
+__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
+                       int (*action)(void *), unsigned mode)
+{
+       do {
+               int ret;
+
+               prepare_to_wait_exclusive(wq, &q->wait, mode);
+               if (!test_bit(q->key.bit_nr, q->key.flags))
+                       continue;
+               ret = action(q->key.flags);
+               if (!ret)
+                       continue;
+               abort_exclusive_wait(wq, &q->wait, mode, &q->key);
+               return ret;
+       } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
+       finish_wait(wq, &q->wait);
+       return 0;
+}
+EXPORT_SYMBOL(__wait_on_bit_lock);
+
+int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
+                                       int (*action)(void *), unsigned mode)
+{
+       wait_queue_head_t *wq = bit_waitqueue(word, bit);
+       DEFINE_WAIT_BIT(wait, word, bit);
+
+       return __wait_on_bit_lock(wq, &wait, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
+
+void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
+{
+       struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
+       if (waitqueue_active(wq))
+               __wake_up(wq, TASK_NORMAL, 1, &key);
+}
+EXPORT_SYMBOL(__wake_up_bit);
+
+/**
+ * wake_up_bit - wake up a waiter on a bit
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that wakes up waiters
+ * on a bit. For instance, if one were to have waiters on a bitflag,
+ * one would call wake_up_bit() after clearing the bit.
+ *
+ * In order for this to function properly, as it uses waitqueue_active()
+ * internally, some kind of memory barrier must be done prior to calling
+ * this. Typically, this will be smp_mb__after_clear_bit(), but in some
+ * cases where bitflags are manipulated non-atomically under a lock, one
+ * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
+ * because spin_unlock() does not guarantee a memory barrier.
+ */
+void wake_up_bit(void *word, int bit)
+{
+       __wake_up_bit(bit_waitqueue(word, bit), word, bit);
+}
+EXPORT_SYMBOL(wake_up_bit);
+
+wait_queue_head_t *bit_waitqueue(void *word, int bit)
+{
+       const int shift = BITS_PER_LONG == 32 ? 5 : 6;
+       const struct zone *zone = page_zone(virt_to_page(word));
+       unsigned long val = (unsigned long)word << shift | bit;
+
+       return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
+}
+EXPORT_SYMBOL(bit_waitqueue);
+
+/*
+ * Manipulate the atomic_t address to produce a better bit waitqueue table hash
+ * index (we're keying off bit -1, but that would produce a horrible hash
+ * value).
+ */
+static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
+{
+       if (BITS_PER_LONG == 64) {
+               unsigned long q = (unsigned long)p;
+               return bit_waitqueue((void *)(q & ~1), q & 1);
+       }
+       return bit_waitqueue(p, 0);
+}
+
+static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
+                                 void *arg)
+{
+       struct wait_bit_key *key = arg;
+       struct wait_bit_queue *wait_bit
+               = container_of(wait, struct wait_bit_queue, wait);
+       atomic_t *val = key->flags;
+
+       if (wait_bit->key.flags != key->flags ||
+           wait_bit->key.bit_nr != key->bit_nr ||
+           atomic_read(val) != 0)
+               return 0;
+       return autoremove_wake_function(wait, mode, sync, key);
+}
+
+/*
+ * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
+ * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
+ * return codes halt waiting and return.
+ */
+static __sched
+int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
+                      int (*action)(atomic_t *), unsigned mode)
+{
+       atomic_t *val;
+       int ret = 0;
+
+       do {
+               prepare_to_wait(wq, &q->wait, mode);
+               val = q->key.flags;
+               if (atomic_read(val) == 0)
+                       break;
+               ret = (*action)(val);
+       } while (!ret && atomic_read(val) != 0);
+       finish_wait(wq, &q->wait);
+       return ret;
+}
+
+#define DEFINE_WAIT_ATOMIC_T(name, p)                                  \
+       struct wait_bit_queue name = {                                  \
+               .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p),              \
+               .wait   = {                                             \
+                       .private        = current,                      \
+                       .func           = wake_atomic_t_function,       \
+                       .task_list      =                               \
+                               LIST_HEAD_INIT((name).wait.task_list),  \
+               },                                                      \
+       }
+
+__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
+                                        unsigned mode)
+{
+       wait_queue_head_t *wq = atomic_t_waitqueue(p);
+       DEFINE_WAIT_ATOMIC_T(wait, p);
+
+       return __wait_on_atomic_t(wq, &wait, action, mode);
+}
+EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
+
+/**
+ * wake_up_atomic_t - Wake up a waiter on a atomic_t
+ * @p: The atomic_t being waited on, a kernel virtual address
+ *
+ * Wake up anyone waiting for the atomic_t to go to zero.
+ *
+ * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
+ * check is done by the waiter's wake function, not the by the waker itself).
+ */
+void wake_up_atomic_t(atomic_t *p)
+{
+       __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
+}
+EXPORT_SYMBOL(wake_up_atomic_t);
diff --git a/kernel/wait.c b/kernel/wait.c
deleted file mode 100644 (file)
index de21c63..0000000
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Generic waiting primitives.
- *
- * (C) 2004 Nadia Yvette Chambers, Oracle
- */
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/wait.h>
-#include <linux/hash.h>
-
-void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
-{
-       spin_lock_init(&q->lock);
-       lockdep_set_class_and_name(&q->lock, key, name);
-       INIT_LIST_HEAD(&q->task_list);
-}
-
-EXPORT_SYMBOL(__init_waitqueue_head);
-
-void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
-{
-       unsigned long flags;
-
-       wait->flags &= ~WQ_FLAG_EXCLUSIVE;
-       spin_lock_irqsave(&q->lock, flags);
-       __add_wait_queue(q, wait);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(add_wait_queue);
-
-void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
-{
-       unsigned long flags;
-
-       wait->flags |= WQ_FLAG_EXCLUSIVE;
-       spin_lock_irqsave(&q->lock, flags);
-       __add_wait_queue_tail(q, wait);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(add_wait_queue_exclusive);
-
-void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&q->lock, flags);
-       __remove_wait_queue(q, wait);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(remove_wait_queue);
-
-
-/*
- * Note: we use "set_current_state()" _after_ the wait-queue add,
- * because we need a memory barrier there on SMP, so that any
- * wake-function that tests for the wait-queue being active
- * will be guaranteed to see waitqueue addition _or_ subsequent
- * tests in this thread will see the wakeup having taken place.
- *
- * The spin_unlock() itself is semi-permeable and only protects
- * one way (it only protects stuff inside the critical region and
- * stops them from bleeding out - it would still allow subsequent
- * loads to move into the critical region).
- */
-void
-prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
-       unsigned long flags;
-
-       wait->flags &= ~WQ_FLAG_EXCLUSIVE;
-       spin_lock_irqsave(&q->lock, flags);
-       if (list_empty(&wait->task_list))
-               __add_wait_queue(q, wait);
-       set_current_state(state);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(prepare_to_wait);
-
-void
-prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
-       unsigned long flags;
-
-       wait->flags |= WQ_FLAG_EXCLUSIVE;
-       spin_lock_irqsave(&q->lock, flags);
-       if (list_empty(&wait->task_list))
-               __add_wait_queue_tail(q, wait);
-       set_current_state(state);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(prepare_to_wait_exclusive);
-
-long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
-{
-       unsigned long flags;
-
-       if (signal_pending_state(state, current))
-               return -ERESTARTSYS;
-
-       wait->private = current;
-       wait->func = autoremove_wake_function;
-
-       spin_lock_irqsave(&q->lock, flags);
-       if (list_empty(&wait->task_list)) {
-               if (wait->flags & WQ_FLAG_EXCLUSIVE)
-                       __add_wait_queue_tail(q, wait);
-               else
-                       __add_wait_queue(q, wait);
-       }
-       set_current_state(state);
-       spin_unlock_irqrestore(&q->lock, flags);
-
-       return 0;
-}
-EXPORT_SYMBOL(prepare_to_wait_event);
-
-/**
- * finish_wait - clean up after waiting in a queue
- * @q: waitqueue waited on
- * @wait: wait descriptor
- *
- * Sets current thread back to running state and removes
- * the wait descriptor from the given waitqueue if still
- * queued.
- */
-void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
-{
-       unsigned long flags;
-
-       __set_current_state(TASK_RUNNING);
-       /*
-        * We can check for list emptiness outside the lock
-        * IFF:
-        *  - we use the "careful" check that verifies both
-        *    the next and prev pointers, so that there cannot
-        *    be any half-pending updates in progress on other
-        *    CPU's that we haven't seen yet (and that might
-        *    still change the stack area.
-        * and
-        *  - all other users take the lock (ie we can only
-        *    have _one_ other CPU that looks at or modifies
-        *    the list).
-        */
-       if (!list_empty_careful(&wait->task_list)) {
-               spin_lock_irqsave(&q->lock, flags);
-               list_del_init(&wait->task_list);
-               spin_unlock_irqrestore(&q->lock, flags);
-       }
-}
-EXPORT_SYMBOL(finish_wait);
-
-/**
- * abort_exclusive_wait - abort exclusive waiting in a queue
- * @q: waitqueue waited on
- * @wait: wait descriptor
- * @mode: runstate of the waiter to be woken
- * @key: key to identify a wait bit queue or %NULL
- *
- * Sets current thread back to running state and removes
- * the wait descriptor from the given waitqueue if still
- * queued.
- *
- * Wakes up the next waiter if the caller is concurrently
- * woken up through the queue.
- *
- * This prevents waiter starvation where an exclusive waiter
- * aborts and is woken up concurrently and no one wakes up
- * the next waiter.
- */
-void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
-                       unsigned int mode, void *key)
-{
-       unsigned long flags;
-
-       __set_current_state(TASK_RUNNING);
-       spin_lock_irqsave(&q->lock, flags);
-       if (!list_empty(&wait->task_list))
-               list_del_init(&wait->task_list);
-       else if (waitqueue_active(q))
-               __wake_up_locked_key(q, mode, key);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(abort_exclusive_wait);
-
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
-{
-       int ret = default_wake_function(wait, mode, sync, key);
-
-       if (ret)
-               list_del_init(&wait->task_list);
-       return ret;
-}
-EXPORT_SYMBOL(autoremove_wake_function);
-
-int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
-{
-       struct wait_bit_key *key = arg;
-       struct wait_bit_queue *wait_bit
-               = container_of(wait, struct wait_bit_queue, wait);
-
-       if (wait_bit->key.flags != key->flags ||
-                       wait_bit->key.bit_nr != key->bit_nr ||
-                       test_bit(key->bit_nr, key->flags))
-               return 0;
-       else
-               return autoremove_wake_function(wait, mode, sync, key);
-}
-EXPORT_SYMBOL(wake_bit_function);
-
-/*
- * To allow interruptible waiting and asynchronous (i.e. nonblocking)
- * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
- * permitted return codes. Nonzero return codes halt waiting and return.
- */
-int __sched
-__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
-                       int (*action)(void *), unsigned mode)
-{
-       int ret = 0;
-
-       do {
-               prepare_to_wait(wq, &q->wait, mode);
-               if (test_bit(q->key.bit_nr, q->key.flags))
-                       ret = (*action)(q->key.flags);
-       } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
-       finish_wait(wq, &q->wait);
-       return ret;
-}
-EXPORT_SYMBOL(__wait_on_bit);
-
-int __sched out_of_line_wait_on_bit(void *word, int bit,
-                                       int (*action)(void *), unsigned mode)
-{
-       wait_queue_head_t *wq = bit_waitqueue(word, bit);
-       DEFINE_WAIT_BIT(wait, word, bit);
-
-       return __wait_on_bit(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL(out_of_line_wait_on_bit);
-
-int __sched
-__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
-                       int (*action)(void *), unsigned mode)
-{
-       do {
-               int ret;
-
-               prepare_to_wait_exclusive(wq, &q->wait, mode);
-               if (!test_bit(q->key.bit_nr, q->key.flags))
-                       continue;
-               ret = action(q->key.flags);
-               if (!ret)
-                       continue;
-               abort_exclusive_wait(wq, &q->wait, mode, &q->key);
-               return ret;
-       } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
-       finish_wait(wq, &q->wait);
-       return 0;
-}
-EXPORT_SYMBOL(__wait_on_bit_lock);
-
-int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
-                                       int (*action)(void *), unsigned mode)
-{
-       wait_queue_head_t *wq = bit_waitqueue(word, bit);
-       DEFINE_WAIT_BIT(wait, word, bit);
-
-       return __wait_on_bit_lock(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
-
-void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
-{
-       struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
-       if (waitqueue_active(wq))
-               __wake_up(wq, TASK_NORMAL, 1, &key);
-}
-EXPORT_SYMBOL(__wake_up_bit);
-
-/**
- * wake_up_bit - wake up a waiter on a bit
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that wakes up waiters
- * on a bit. For instance, if one were to have waiters on a bitflag,
- * one would call wake_up_bit() after clearing the bit.
- *
- * In order for this to function properly, as it uses waitqueue_active()
- * internally, some kind of memory barrier must be done prior to calling
- * this. Typically, this will be smp_mb__after_clear_bit(), but in some
- * cases where bitflags are manipulated non-atomically under a lock, one
- * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
- * because spin_unlock() does not guarantee a memory barrier.
- */
-void wake_up_bit(void *word, int bit)
-{
-       __wake_up_bit(bit_waitqueue(word, bit), word, bit);
-}
-EXPORT_SYMBOL(wake_up_bit);
-
-wait_queue_head_t *bit_waitqueue(void *word, int bit)
-{
-       const int shift = BITS_PER_LONG == 32 ? 5 : 6;
-       const struct zone *zone = page_zone(virt_to_page(word));
-       unsigned long val = (unsigned long)word << shift | bit;
-
-       return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
-}
-EXPORT_SYMBOL(bit_waitqueue);
-
-/*
- * Manipulate the atomic_t address to produce a better bit waitqueue table hash
- * index (we're keying off bit -1, but that would produce a horrible hash
- * value).
- */
-static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
-{
-       if (BITS_PER_LONG == 64) {
-               unsigned long q = (unsigned long)p;
-               return bit_waitqueue((void *)(q & ~1), q & 1);
-       }
-       return bit_waitqueue(p, 0);
-}
-
-static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
-                                 void *arg)
-{
-       struct wait_bit_key *key = arg;
-       struct wait_bit_queue *wait_bit
-               = container_of(wait, struct wait_bit_queue, wait);
-       atomic_t *val = key->flags;
-
-       if (wait_bit->key.flags != key->flags ||
-           wait_bit->key.bit_nr != key->bit_nr ||
-           atomic_read(val) != 0)
-               return 0;
-       return autoremove_wake_function(wait, mode, sync, key);
-}
-
-/*
- * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
- * the actions of __wait_on_atomic_t() are permitted return codes.  Nonzero
- * return codes halt waiting and return.
- */
-static __sched
-int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
-                      int (*action)(atomic_t *), unsigned mode)
-{
-       atomic_t *val;
-       int ret = 0;
-
-       do {
-               prepare_to_wait(wq, &q->wait, mode);
-               val = q->key.flags;
-               if (atomic_read(val) == 0)
-                       break;
-               ret = (*action)(val);
-       } while (!ret && atomic_read(val) != 0);
-       finish_wait(wq, &q->wait);
-       return ret;
-}
-
-#define DEFINE_WAIT_ATOMIC_T(name, p)                                  \
-       struct wait_bit_queue name = {                                  \
-               .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p),              \
-               .wait   = {                                             \
-                       .private        = current,                      \
-                       .func           = wake_atomic_t_function,       \
-                       .task_list      =                               \
-                               LIST_HEAD_INIT((name).wait.task_list),  \
-               },                                                      \
-       }
-
-__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
-                                        unsigned mode)
-{
-       wait_queue_head_t *wq = atomic_t_waitqueue(p);
-       DEFINE_WAIT_ATOMIC_T(wait, p);
-
-       return __wait_on_atomic_t(wq, &wait, action, mode);
-}
-EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
-
-/**
- * wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @p: The atomic_t being waited on, a kernel virtual address
- *
- * Wake up anyone waiting for the atomic_t to go to zero.
- *
- * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
- * check is done by the waiter's wake function, not the by the waker itself).
- */
-void wake_up_atomic_t(atomic_t *p)
-{
-       __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
-}
-EXPORT_SYMBOL(wake_up_atomic_t);