]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
[BLOCK] Move all core block layer code to new block/ directory
authorJens Axboe <axboe@suse.de>
Fri, 4 Nov 2005 07:43:35 +0000 (08:43 +0100)
committerJens Axboe <axboe@suse.de>
Fri, 4 Nov 2005 07:43:35 +0000 (08:43 +0100)
drivers/block/ is right now a mix of core and driver parts. Lets move
the core parts to a new top level directory. Al will move the fs/
related block parts to block/ next.

Signed-off-by: Jens Axboe <axboe@suse.de>
26 files changed:
Makefile
block/Kconfig [new file with mode: 0644]
block/Kconfig.iosched [new file with mode: 0644]
block/Makefile [new file with mode: 0644]
block/as-iosched.c [new file with mode: 0644]
block/cfq-iosched.c [new file with mode: 0644]
block/deadline-iosched.c [new file with mode: 0644]
block/elevator.c [new file with mode: 0644]
block/genhd.c [new file with mode: 0644]
block/ioctl.c [new file with mode: 0644]
block/ll_rw_blk.c [new file with mode: 0644]
block/noop-iosched.c [new file with mode: 0644]
block/scsi_ioctl.c [new file with mode: 0644]
drivers/block/Kconfig
drivers/block/Kconfig.iosched [deleted file]
drivers/block/Makefile
drivers/block/as-iosched.c [deleted file]
drivers/block/cfq-iosched.c [deleted file]
drivers/block/deadline-iosched.c [deleted file]
drivers/block/elevator.c [deleted file]
drivers/block/genhd.c [deleted file]
drivers/block/ioctl.c [deleted file]
drivers/block/ll_rw_blk.c [deleted file]
drivers/block/noop-iosched.c [deleted file]
drivers/block/scsi_ioctl.c [deleted file]
init/Kconfig

index 79601320ac3e371666593abcca5820efdbae5caf..a0270c5c3f333b80801903cb007b9edf86670b10 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -582,7 +582,7 @@ export MODLIB
 
 
 ifeq ($(KBUILD_EXTMOD),)
-core-y         += kernel/ mm/ fs/ ipc/ security/ crypto/
+core-y         += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
 
 vmlinux-dirs   := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
                     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
diff --git a/block/Kconfig b/block/Kconfig
new file mode 100644 (file)
index 0000000..eb48edb
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Block layer core configuration
+#
+#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
+#for instance.
+config LBD
+       bool "Support for Large Block Devices"
+       depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
+       help
+         Say Y here if you want to attach large (bigger than 2TB) discs to
+         your machine, or if you want to have a raid or loopback device
+         bigger than 2TB.  Otherwise say N.
+
+source block/Kconfig.iosched
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
new file mode 100644 (file)
index 0000000..5b90d2f
--- /dev/null
@@ -0,0 +1,69 @@
+
+menu "IO Schedulers"
+
+config IOSCHED_NOOP
+       bool
+       default y
+       ---help---
+         The no-op I/O scheduler is a minimal scheduler that does basic merging
+         and sorting. Its main uses include non-disk based block devices like
+         memory devices, and specialised software or hardware environments
+         that do their own scheduling and require only minimal assistance from
+         the kernel.
+
+config IOSCHED_AS
+       tristate "Anticipatory I/O scheduler"
+       default y
+       ---help---
+         The anticipatory I/O scheduler is the default disk scheduler. It is
+         generally a good choice for most environments, but is quite large and
+         complex when compared to the deadline I/O scheduler, it can also be
+         slower in some cases especially some database loads.
+
+config IOSCHED_DEADLINE
+       tristate "Deadline I/O scheduler"
+       default y
+       ---help---
+         The deadline I/O scheduler is simple and compact, and is often as
+         good as the anticipatory I/O scheduler, and in some database
+         workloads, better. In the case of a single process performing I/O to
+         a disk at any one time, its behaviour is almost identical to the
+         anticipatory I/O scheduler and so is a good choice.
+
+config IOSCHED_CFQ
+       tristate "CFQ I/O scheduler"
+       default y
+       ---help---
+         The CFQ I/O scheduler tries to distribute bandwidth equally
+         among all processes in the system. It should provide a fair
+         working environment, suitable for desktop systems.
+
+choice
+       prompt "Default I/O scheduler"
+       default DEFAULT_AS
+       help
+         Select the I/O scheduler which will be used by default for all
+         block devices.
+
+       config DEFAULT_AS
+               bool "Anticipatory" if IOSCHED_AS
+
+       config DEFAULT_DEADLINE
+               bool "Deadline" if IOSCHED_DEADLINE
+
+       config DEFAULT_CFQ
+               bool "CFQ" if IOSCHED_CFQ
+
+       config DEFAULT_NOOP
+               bool "No-op"
+
+endchoice
+
+config DEFAULT_IOSCHED
+       string
+       default "anticipatory" if DEFAULT_AS
+       default "deadline" if DEFAULT_DEADLINE
+       default "cfq" if DEFAULT_CFQ
+       default "noop" if DEFAULT_NOOP
+
+endmenu
diff --git a/block/Makefile b/block/Makefile
new file mode 100644 (file)
index 0000000..7e4f93e
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the kernel block layer
+#
+
+obj-y  := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
+
+obj-$(CONFIG_IOSCHED_NOOP)     += noop-iosched.o
+obj-$(CONFIG_IOSCHED_AS)       += as-iosched.o
+obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
+obj-$(CONFIG_IOSCHED_CFQ)      += cfq-iosched.o
diff --git a/block/as-iosched.c b/block/as-iosched.c
new file mode 100644 (file)
index 0000000..c6744ff
--- /dev/null
@@ -0,0 +1,1985 @@
+/*
+ *  linux/drivers/block/as-iosched.c
+ *
+ *  Anticipatory & deadline i/o scheduler.
+ *
+ *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
+ *                     Nick Piggin <piggin@cyberone.com.au>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/rbtree.h>
+#include <linux/interrupt.h>
+
+#define REQ_SYNC       1
+#define REQ_ASYNC      0
+
+/*
+ * See Documentation/block/as-iosched.txt
+ */
+
+/*
+ * max time before a read is submitted.
+ */
+#define default_read_expire (HZ / 8)
+
+/*
+ * ditto for writes, these limits are not hard, even
+ * if the disk is capable of satisfying them.
+ */
+#define default_write_expire (HZ / 4)
+
+/*
+ * read_batch_expire describes how long we will allow a stream of reads to
+ * persist before looking to see whether it is time to switch over to writes.
+ */
+#define default_read_batch_expire (HZ / 2)
+
+/*
+ * write_batch_expire describes how long we want a stream of writes to run for.
+ * This is not a hard limit, but a target we set for the auto-tuning thingy.
+ * See, the problem is: we can send a lot of writes to disk cache / TCQ in
+ * a short amount of time...
+ */
+#define default_write_batch_expire (HZ / 8)
+
+/*
+ * max time we may wait to anticipate a read (default around 6ms)
+ */
+#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
+
+/*
+ * Keep track of up to 20ms thinktimes. We can go as big as we like here,
+ * however huge values tend to interfere and not decay fast enough. A program
+ * might be in a non-io phase of operation. Waiting on user input for example,
+ * or doing a lengthy computation. A small penalty can be justified there, and
+ * will still catch out those processes that constantly have large thinktimes.
+ */
+#define MAX_THINKTIME (HZ/50UL)
+
+/* Bits in as_io_context.state */
+enum as_io_states {
+       AS_TASK_RUNNING=0,      /* Process has not exitted */
+       AS_TASK_IOSTARTED,      /* Process has started some IO */
+       AS_TASK_IORUNNING,      /* Process has completed some IO */
+};
+
+enum anticipation_status {
+       ANTIC_OFF=0,            /* Not anticipating (normal operation)  */
+       ANTIC_WAIT_REQ,         /* The last read has not yet completed  */
+       ANTIC_WAIT_NEXT,        /* Currently anticipating a request vs
+                                  last read (which has completed) */
+       ANTIC_FINISHED,         /* Anticipating but have found a candidate
+                                * or timed out */
+};
+
+struct as_data {
+       /*
+        * run time data
+        */
+
+       struct request_queue *q;        /* the "owner" queue */
+
+       /*
+        * requests (as_rq s) are present on both sort_list and fifo_list
+        */
+       struct rb_root sort_list[2];
+       struct list_head fifo_list[2];
+
+       struct as_rq *next_arq[2];      /* next in sort order */
+       sector_t last_sector[2];        /* last REQ_SYNC & REQ_ASYNC sectors */
+       struct list_head *hash;         /* request hash */
+
+       unsigned long exit_prob;        /* probability a task will exit while
+                                          being waited on */
+       unsigned long new_ttime_total;  /* mean thinktime on new proc */
+       unsigned long new_ttime_mean;
+       u64 new_seek_total;             /* mean seek on new proc */
+       sector_t new_seek_mean;
+
+       unsigned long current_batch_expires;
+       unsigned long last_check_fifo[2];
+       int changed_batch;              /* 1: waiting for old batch to end */
+       int new_batch;                  /* 1: waiting on first read complete */
+       int batch_data_dir;             /* current batch REQ_SYNC / REQ_ASYNC */
+       int write_batch_count;          /* max # of reqs in a write batch */
+       int current_write_count;        /* how many requests left this batch */
+       int write_batch_idled;          /* has the write batch gone idle? */
+       mempool_t *arq_pool;
+
+       enum anticipation_status antic_status;
+       unsigned long antic_start;      /* jiffies: when it started */
+       struct timer_list antic_timer;  /* anticipatory scheduling timer */
+       struct work_struct antic_work;  /* Deferred unplugging */
+       struct io_context *io_context;  /* Identify the expected process */
+       int ioc_finished; /* IO associated with io_context is finished */
+       int nr_dispatched;
+
+       /*
+        * settings that change how the i/o scheduler behaves
+        */
+       unsigned long fifo_expire[2];
+       unsigned long batch_expire[2];
+       unsigned long antic_expire;
+};
+
+#define list_entry_fifo(ptr)   list_entry((ptr), struct as_rq, fifo)
+
+/*
+ * per-request data.
+ */
+enum arq_state {
+       AS_RQ_NEW=0,            /* New - not referenced and not on any lists */
+       AS_RQ_QUEUED,           /* In the request queue. It belongs to the
+                                  scheduler */
+       AS_RQ_DISPATCHED,       /* On the dispatch list. It belongs to the
+                                  driver now */
+       AS_RQ_PRESCHED,         /* Debug poisoning for requests being used */
+       AS_RQ_REMOVED,
+       AS_RQ_MERGED,
+       AS_RQ_POSTSCHED,        /* when they shouldn't be */
+};
+
+struct as_rq {
+       /*
+        * rbtree index, key is the starting offset
+        */
+       struct rb_node rb_node;
+       sector_t rb_key;
+
+       struct request *request;
+
+       struct io_context *io_context;  /* The submitting task */
+
+       /*
+        * request hash, key is the ending offset (for back merge lookup)
+        */
+       struct list_head hash;
+       unsigned int on_hash;
+
+       /*
+        * expire fifo
+        */
+       struct list_head fifo;
+       unsigned long expires;
+
+       unsigned int is_sync;
+       enum arq_state state;
+};
+
+#define RQ_DATA(rq)    ((struct as_rq *) (rq)->elevator_private)
+
+static kmem_cache_t *arq_pool;
+
+/*
+ * IO Context helper functions
+ */
+
+/* Called to deallocate the as_io_context */
+static void free_as_io_context(struct as_io_context *aic)
+{
+       kfree(aic);
+}
+
+/* Called when the task exits */
+static void exit_as_io_context(struct as_io_context *aic)
+{
+       WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
+       clear_bit(AS_TASK_RUNNING, &aic->state);
+}
+
+static struct as_io_context *alloc_as_io_context(void)
+{
+       struct as_io_context *ret;
+
+       ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+       if (ret) {
+               ret->dtor = free_as_io_context;
+               ret->exit = exit_as_io_context;
+               ret->state = 1 << AS_TASK_RUNNING;
+               atomic_set(&ret->nr_queued, 0);
+               atomic_set(&ret->nr_dispatched, 0);
+               spin_lock_init(&ret->lock);
+               ret->ttime_total = 0;
+               ret->ttime_samples = 0;
+               ret->ttime_mean = 0;
+               ret->seek_total = 0;
+               ret->seek_samples = 0;
+               ret->seek_mean = 0;
+       }
+
+       return ret;
+}
+
+/*
+ * If the current task has no AS IO context then create one and initialise it.
+ * Then take a ref on the task's io context and return it.
+ */
+static struct io_context *as_get_io_context(void)
+{
+       struct io_context *ioc = get_io_context(GFP_ATOMIC);
+       if (ioc && !ioc->aic) {
+               ioc->aic = alloc_as_io_context();
+               if (!ioc->aic) {
+                       put_io_context(ioc);
+                       ioc = NULL;
+               }
+       }
+       return ioc;
+}
+
+static void as_put_io_context(struct as_rq *arq)
+{
+       struct as_io_context *aic;
+
+       if (unlikely(!arq->io_context))
+               return;
+
+       aic = arq->io_context->aic;
+
+       if (arq->is_sync == REQ_SYNC && aic) {
+               spin_lock(&aic->lock);
+               set_bit(AS_TASK_IORUNNING, &aic->state);
+               aic->last_end_request = jiffies;
+               spin_unlock(&aic->lock);
+       }
+
+       put_io_context(arq->io_context);
+}
+
+/*
+ * the back merge hash support functions
+ */
+static const int as_hash_shift = 6;
+#define AS_HASH_BLOCK(sec)     ((sec) >> 3)
+#define AS_HASH_FN(sec)                (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
+#define AS_HASH_ENTRIES                (1 << as_hash_shift)
+#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr)   list_entry((ptr), struct as_rq, hash)
+
+static inline void __as_del_arq_hash(struct as_rq *arq)
+{
+       arq->on_hash = 0;
+       list_del_init(&arq->hash);
+}
+
+static inline void as_del_arq_hash(struct as_rq *arq)
+{
+       if (arq->on_hash)
+               __as_del_arq_hash(arq);
+}
+
+static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
+{
+       struct request *rq = arq->request;
+
+       BUG_ON(arq->on_hash);
+
+       arq->on_hash = 1;
+       list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
+}
+
+/*
+ * move hot entry to front of chain
+ */
+static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
+{
+       struct request *rq = arq->request;
+       struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
+
+       if (!arq->on_hash) {
+               WARN_ON(1);
+               return;
+       }
+
+       if (arq->hash.prev != head) {
+               list_del(&arq->hash);
+               list_add(&arq->hash, head);
+       }
+}
+
+static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
+{
+       struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
+       struct list_head *entry, *next = hash_list->next;
+
+       while ((entry = next) != hash_list) {
+               struct as_rq *arq = list_entry_hash(entry);
+               struct request *__rq = arq->request;
+
+               next = entry->next;
+
+               BUG_ON(!arq->on_hash);
+
+               if (!rq_mergeable(__rq)) {
+                       as_del_arq_hash(arq);
+                       continue;
+               }
+
+               if (rq_hash_key(__rq) == offset)
+                       return __rq;
+       }
+
+       return NULL;
+}
+
+/*
+ * rb tree support functions
+ */
+#define RB_NONE                (2)
+#define RB_EMPTY(root) ((root)->rb_node == NULL)
+#define ON_RB(node)    ((node)->rb_color != RB_NONE)
+#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
+#define rb_entry_arq(node)     rb_entry((node), struct as_rq, rb_node)
+#define ARQ_RB_ROOT(ad, arq)   (&(ad)->sort_list[(arq)->is_sync])
+#define rq_rb_key(rq)          (rq)->sector
+
+/*
+ * as_find_first_arq finds the first (lowest sector numbered) request
+ * for the specified data_dir. Used to sweep back to the start of the disk
+ * (1-way elevator) after we process the last (highest sector) request.
+ */
+static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
+{
+       struct rb_node *n = ad->sort_list[data_dir].rb_node;
+
+       if (n == NULL)
+               return NULL;
+
+       for (;;) {
+               if (n->rb_left == NULL)
+                       return rb_entry_arq(n);
+
+               n = n->rb_left;
+       }
+}
+
+/*
+ * Add the request to the rb tree if it is unique.  If there is an alias (an
+ * existing request against the same sector), which can happen when using
+ * direct IO, then return the alias.
+ */
+static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
+{
+       struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
+       struct rb_node *parent = NULL;
+       struct as_rq *__arq;
+       struct request *rq = arq->request;
+
+       arq->rb_key = rq_rb_key(rq);
+
+       while (*p) {
+               parent = *p;
+               __arq = rb_entry_arq(parent);
+
+               if (arq->rb_key < __arq->rb_key)
+                       p = &(*p)->rb_left;
+               else if (arq->rb_key > __arq->rb_key)
+                       p = &(*p)->rb_right;
+               else
+                       return __arq;
+       }
+
+       rb_link_node(&arq->rb_node, parent, p);
+       rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
+
+       return NULL;
+}
+
+static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
+{
+       if (!ON_RB(&arq->rb_node)) {
+               WARN_ON(1);
+               return;
+       }
+
+       rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
+       RB_CLEAR(&arq->rb_node);
+}
+
+static struct request *
+as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
+{
+       struct rb_node *n = ad->sort_list[data_dir].rb_node;
+       struct as_rq *arq;
+
+       while (n) {
+               arq = rb_entry_arq(n);
+
+               if (sector < arq->rb_key)
+                       n = n->rb_left;
+               else if (sector > arq->rb_key)
+                       n = n->rb_right;
+               else
+                       return arq->request;
+       }
+
+       return NULL;
+}
+
+/*
+ * IO Scheduler proper
+ */
+
+#define MAXBACK (1024 * 1024)  /*
+                                * Maximum distance the disk will go backward
+                                * for a request.
+                                */
+
+#define BACK_PENALTY   2
+
+/*
+ * as_choose_req selects the preferred one of two requests of the same data_dir
+ * ignoring time - eg. timeouts, which is the job of as_dispatch_request
+ */
+static struct as_rq *
+as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
+{
+       int data_dir;
+       sector_t last, s1, s2, d1, d2;
+       int r1_wrap=0, r2_wrap=0;       /* requests are behind the disk head */
+       const sector_t maxback = MAXBACK;
+
+       if (arq1 == NULL || arq1 == arq2)
+               return arq2;
+       if (arq2 == NULL)
+               return arq1;
+
+       data_dir = arq1->is_sync;
+
+       last = ad->last_sector[data_dir];
+       s1 = arq1->request->sector;
+       s2 = arq2->request->sector;
+
+       BUG_ON(data_dir != arq2->is_sync);
+
+       /*
+        * Strict one way elevator _except_ in the case where we allow
+        * short backward seeks which are biased as twice the cost of a
+        * similar forward seek.
+        */
+       if (s1 >= last)
+               d1 = s1 - last;
+       else if (s1+maxback >= last)
+               d1 = (last - s1)*BACK_PENALTY;
+       else {
+               r1_wrap = 1;
+               d1 = 0; /* shut up, gcc */
+       }
+
+       if (s2 >= last)
+               d2 = s2 - last;
+       else if (s2+maxback >= last)
+               d2 = (last - s2)*BACK_PENALTY;
+       else {
+               r2_wrap = 1;
+               d2 = 0;
+       }
+
+       /* Found required data */
+       if (!r1_wrap && r2_wrap)
+               return arq1;
+       else if (!r2_wrap && r1_wrap)
+               return arq2;
+       else if (r1_wrap && r2_wrap) {
+               /* both behind the head */
+               if (s1 <= s2)
+                       return arq1;
+               else
+                       return arq2;
+       }
+
+       /* Both requests in front of the head */
+       if (d1 < d2)
+               return arq1;
+       else if (d2 < d1)
+               return arq2;
+       else {
+               if (s1 >= s2)
+                       return arq1;
+               else
+                       return arq2;
+       }
+}
+
+/*
+ * as_find_next_arq finds the next request after @prev in elevator order.
+ * this with as_choose_req form the basis for how the scheduler chooses
+ * what request to process next. Anticipation works on top of this.
+ */
+static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
+{
+       const int data_dir = last->is_sync;
+       struct as_rq *ret;
+       struct rb_node *rbnext = rb_next(&last->rb_node);
+       struct rb_node *rbprev = rb_prev(&last->rb_node);
+       struct as_rq *arq_next, *arq_prev;
+
+       BUG_ON(!ON_RB(&last->rb_node));
+
+       if (rbprev)
+               arq_prev = rb_entry_arq(rbprev);
+       else
+               arq_prev = NULL;
+
+       if (rbnext)
+               arq_next = rb_entry_arq(rbnext);
+       else {
+               arq_next = as_find_first_arq(ad, data_dir);
+               if (arq_next == last)
+                       arq_next = NULL;
+       }
+
+       ret = as_choose_req(ad, arq_next, arq_prev);
+
+       return ret;
+}
+
+/*
+ * anticipatory scheduling functions follow
+ */
+
+/*
+ * as_antic_expired tells us when we have anticipated too long.
+ * The funny "absolute difference" math on the elapsed time is to handle
+ * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
+ */
+static int as_antic_expired(struct as_data *ad)
+{
+       long delta_jif;
+
+       delta_jif = jiffies - ad->antic_start;
+       if (unlikely(delta_jif < 0))
+               delta_jif = -delta_jif;
+       if (delta_jif < ad->antic_expire)
+               return 0;
+
+       return 1;
+}
+
+/*
+ * as_antic_waitnext starts anticipating that a nice request will soon be
+ * submitted. See also as_antic_waitreq
+ */
+static void as_antic_waitnext(struct as_data *ad)
+{
+       unsigned long timeout;
+
+       BUG_ON(ad->antic_status != ANTIC_OFF
+                       && ad->antic_status != ANTIC_WAIT_REQ);
+
+       timeout = ad->antic_start + ad->antic_expire;
+
+       mod_timer(&ad->antic_timer, timeout);
+
+       ad->antic_status = ANTIC_WAIT_NEXT;
+}
+
+/*
+ * as_antic_waitreq starts anticipating. We don't start timing the anticipation
+ * until the request that we're anticipating on has finished. This means we
+ * are timing from when the candidate process wakes up hopefully.
+ */
+static void as_antic_waitreq(struct as_data *ad)
+{
+       BUG_ON(ad->antic_status == ANTIC_FINISHED);
+       if (ad->antic_status == ANTIC_OFF) {
+               if (!ad->io_context || ad->ioc_finished)
+                       as_antic_waitnext(ad);
+               else
+                       ad->antic_status = ANTIC_WAIT_REQ;
+       }
+}
+
+/*
+ * This is called directly by the functions in this file to stop anticipation.
+ * We kill the timer and schedule a call to the request_fn asap.
+ */
+static void as_antic_stop(struct as_data *ad)
+{
+       int status = ad->antic_status;
+
+       if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
+               if (status == ANTIC_WAIT_NEXT)
+                       del_timer(&ad->antic_timer);
+               ad->antic_status = ANTIC_FINISHED;
+               /* see as_work_handler */
+               kblockd_schedule_work(&ad->antic_work);
+       }
+}
+
+/*
+ * as_antic_timeout is the timer function set by as_antic_waitnext.
+ */
+static void as_antic_timeout(unsigned long data)
+{
+       struct request_queue *q = (struct request_queue *)data;
+       struct as_data *ad = q->elevator->elevator_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (ad->antic_status == ANTIC_WAIT_REQ
+                       || ad->antic_status == ANTIC_WAIT_NEXT) {
+               struct as_io_context *aic = ad->io_context->aic;
+
+               ad->antic_status = ANTIC_FINISHED;
+               kblockd_schedule_work(&ad->antic_work);
+
+               if (aic->ttime_samples == 0) {
+                       /* process anticipated on has exitted or timed out*/
+                       ad->exit_prob = (7*ad->exit_prob + 256)/8;
+               }
+       }
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * as_close_req decides if one request is considered "close" to the
+ * previous one issued.
+ */
+static int as_close_req(struct as_data *ad, struct as_rq *arq)
+{
+       unsigned long delay;    /* milliseconds */
+       sector_t last = ad->last_sector[ad->batch_data_dir];
+       sector_t next = arq->request->sector;
+       sector_t delta; /* acceptable close offset (in sectors) */
+
+       if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
+               delay = 0;
+       else
+               delay = ((jiffies - ad->antic_start) * 1000) / HZ;
+
+       if (delay <= 1)
+               delta = 64;
+       else if (delay <= 20 && delay <= ad->antic_expire)
+               delta = 64 << (delay-1);
+       else
+               return 1;
+
+       return (last - (delta>>1) <= next) && (next <= last + delta);
+}
+
+/*
+ * as_can_break_anticipation returns true if we have been anticipating this
+ * request.
+ *
+ * It also returns true if the process against which we are anticipating
+ * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
+ * dispatch it ASAP, because we know that application will not be submitting
+ * any new reads.
+ *
+ * If the task which has submitted the request has exitted, break anticipation.
+ *
+ * If this task has queued some other IO, do not enter enticipation.
+ */
+static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
+{
+       struct io_context *ioc;
+       struct as_io_context *aic;
+       sector_t s;
+
+       ioc = ad->io_context;
+       BUG_ON(!ioc);
+
+       if (arq && ioc == arq->io_context) {
+               /* request from same process */
+               return 1;
+       }
+
+       if (ad->ioc_finished && as_antic_expired(ad)) {
+               /*
+                * In this situation status should really be FINISHED,
+                * however the timer hasn't had the chance to run yet.
+                */
+               return 1;
+       }
+
+       aic = ioc->aic;
+       if (!aic)
+               return 0;
+
+       if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
+               /* process anticipated on has exitted */
+               if (aic->ttime_samples == 0)
+                       ad->exit_prob = (7*ad->exit_prob + 256)/8;
+               return 1;
+       }
+
+       if (atomic_read(&aic->nr_queued) > 0) {
+               /* process has more requests queued */
+               return 1;
+       }
+
+       if (atomic_read(&aic->nr_dispatched) > 0) {
+               /* process has more requests dispatched */
+               return 1;
+       }
+
+       if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, arq)) {
+               /*
+                * Found a close request that is not one of ours.
+                *
+                * This makes close requests from another process reset
+                * our thinktime delay. Is generally useful when there are
+                * two or more cooperating processes working in the same
+                * area.
+                */
+               spin_lock(&aic->lock);
+               aic->last_end_request = jiffies;
+               spin_unlock(&aic->lock);
+               return 1;
+       }
+
+
+       if (aic->ttime_samples == 0) {
+               if (ad->new_ttime_mean > ad->antic_expire)
+                       return 1;
+               if (ad->exit_prob > 128)
+                       return 1;
+       } else if (aic->ttime_mean > ad->antic_expire) {
+               /* the process thinks too much between requests */
+               return 1;
+       }
+
+       if (!arq)
+               return 0;
+
+       if (ad->last_sector[REQ_SYNC] < arq->request->sector)
+               s = arq->request->sector - ad->last_sector[REQ_SYNC];
+       else
+               s = ad->last_sector[REQ_SYNC] - arq->request->sector;
+
+       if (aic->seek_samples == 0) {
+               /*
+                * Process has just started IO. Use past statistics to
+                * guage success possibility
+                */
+               if (ad->new_seek_mean > s) {
+                       /* this request is better than what we're expecting */
+                       return 1;
+               }
+
+       } else {
+               if (aic->seek_mean > s) {
+                       /* this request is better than what we're expecting */
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * as_can_anticipate indicates weather we should either run arq
+ * or keep anticipating a better request.
+ */
+static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
+{
+       if (!ad->io_context)
+               /*
+                * Last request submitted was a write
+                */
+               return 0;
+
+       if (ad->antic_status == ANTIC_FINISHED)
+               /*
+                * Don't restart if we have just finished. Run the next request
+                */
+               return 0;
+
+       if (as_can_break_anticipation(ad, arq))
+               /*
+                * This request is a good candidate. Don't keep anticipating,
+                * run it.
+                */
+               return 0;
+
+       /*
+        * OK from here, we haven't finished, and don't have a decent request!
+        * Status is either ANTIC_OFF so start waiting,
+        * ANTIC_WAIT_REQ so continue waiting for request to finish
+        * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
+        *
+        */
+
+       return 1;
+}
+
+static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, unsigned long ttime)
+{
+       /* fixed point: 1.0 == 1<<8 */
+       if (aic->ttime_samples == 0) {
+               ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
+               ad->new_ttime_mean = ad->new_ttime_total / 256;
+
+               ad->exit_prob = (7*ad->exit_prob)/8;
+       }
+       aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
+       aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
+       aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
+}
+
+static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, sector_t sdist)
+{
+       u64 total;
+
+       if (aic->seek_samples == 0) {
+               ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
+               ad->new_seek_mean = ad->new_seek_total / 256;
+       }
+
+       /*
+        * Don't allow the seek distance to get too large from the
+        * odd fragment, pagein, etc
+        */
+       if (aic->seek_samples <= 60) /* second&third seek */
+               sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
+       else
+               sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
+
+       aic->seek_samples = (7*aic->seek_samples + 256) / 8;
+       aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
+       total = aic->seek_total + (aic->seek_samples/2);
+       do_div(total, aic->seek_samples);
+       aic->seek_mean = (sector_t)total;
+}
+
+/*
+ * as_update_iohist keeps a decaying histogram of IO thinktimes, and
+ * updates @aic->ttime_mean based on that. It is called when a new
+ * request is queued.
+ */
+static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, struct request *rq)
+{
+       struct as_rq *arq = RQ_DATA(rq);
+       int data_dir = arq->is_sync;
+       unsigned long thinktime;
+       sector_t seek_dist;
+
+       if (aic == NULL)
+               return;
+
+       if (data_dir == REQ_SYNC) {
+               unsigned long in_flight = atomic_read(&aic->nr_queued)
+                                       + atomic_read(&aic->nr_dispatched);
+               spin_lock(&aic->lock);
+               if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
+                       test_bit(AS_TASK_IOSTARTED, &aic->state)) {
+                       /* Calculate read -> read thinktime */
+                       if (test_bit(AS_TASK_IORUNNING, &aic->state)
+                                                       && in_flight == 0) {
+                               thinktime = jiffies - aic->last_end_request;
+                               thinktime = min(thinktime, MAX_THINKTIME-1);
+                       } else
+                               thinktime = 0;
+                       as_update_thinktime(ad, aic, thinktime);
+
+                       /* Calculate read -> read seek distance */
+                       if (aic->last_request_pos < rq->sector)
+                               seek_dist = rq->sector - aic->last_request_pos;
+                       else
+                               seek_dist = aic->last_request_pos - rq->sector;
+                       as_update_seekdist(ad, aic, seek_dist);
+               }
+               aic->last_request_pos = rq->sector + rq->nr_sectors;
+               set_bit(AS_TASK_IOSTARTED, &aic->state);
+               spin_unlock(&aic->lock);
+       }
+}
+
+/*
+ * as_update_arq must be called whenever a request (arq) is added to
+ * the sort_list. This function keeps caches up to date, and checks if the
+ * request might be one we are "anticipating"
+ */
+static void as_update_arq(struct as_data *ad, struct as_rq *arq)
+{
+       const int data_dir = arq->is_sync;
+
+       /* keep the next_arq cache up to date */
+       ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
+
+       /*
+        * have we been anticipating this request?
+        * or does it come from the same process as the one we are anticipating
+        * for?
+        */
+       if (ad->antic_status == ANTIC_WAIT_REQ
+                       || ad->antic_status == ANTIC_WAIT_NEXT) {
+               if (as_can_break_anticipation(ad, arq))
+                       as_antic_stop(ad);
+       }
+}
+
+/*
+ * Gathers timings and resizes the write batch automatically
+ */
+static void update_write_batch(struct as_data *ad)
+{
+       unsigned long batch = ad->batch_expire[REQ_ASYNC];
+       long write_time;
+
+       write_time = (jiffies - ad->current_batch_expires) + batch;
+       if (write_time < 0)
+               write_time = 0;
+
+       if (write_time > batch && !ad->write_batch_idled) {
+               if (write_time > batch * 3)
+                       ad->write_batch_count /= 2;
+               else
+                       ad->write_batch_count--;
+       } else if (write_time < batch && ad->current_write_count == 0) {
+               if (batch > write_time * 3)
+                       ad->write_batch_count *= 2;
+               else
+                       ad->write_batch_count++;
+       }
+
+       if (ad->write_batch_count < 1)
+               ad->write_batch_count = 1;
+}
+
+/*
+ * as_completed_request is to be called when a request has completed and
+ * returned something to the requesting process, be it an error or data.
+ */
+static void as_completed_request(request_queue_t *q, struct request *rq)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq = RQ_DATA(rq);
+
+       WARN_ON(!list_empty(&rq->queuelist));
+
+       if (arq->state != AS_RQ_REMOVED) {
+               printk("arq->state %d\n", arq->state);
+               WARN_ON(1);
+               goto out;
+       }
+
+       if (ad->changed_batch && ad->nr_dispatched == 1) {
+               kblockd_schedule_work(&ad->antic_work);
+               ad->changed_batch = 0;
+
+               if (ad->batch_data_dir == REQ_SYNC)
+                       ad->new_batch = 1;
+       }
+       WARN_ON(ad->nr_dispatched == 0);
+       ad->nr_dispatched--;
+
+       /*
+        * Start counting the batch from when a request of that direction is
+        * actually serviced. This should help devices with big TCQ windows
+        * and writeback caches
+        */
+       if (ad->new_batch && ad->batch_data_dir == arq->is_sync) {
+               update_write_batch(ad);
+               ad->current_batch_expires = jiffies +
+                               ad->batch_expire[REQ_SYNC];
+               ad->new_batch = 0;
+       }
+
+       if (ad->io_context == arq->io_context && ad->io_context) {
+               ad->antic_start = jiffies;
+               ad->ioc_finished = 1;
+               if (ad->antic_status == ANTIC_WAIT_REQ) {
+                       /*
+                        * We were waiting on this request, now anticipate
+                        * the next one
+                        */
+                       as_antic_waitnext(ad);
+               }
+       }
+
+       as_put_io_context(arq);
+out:
+       arq->state = AS_RQ_POSTSCHED;
+}
+
+/*
+ * as_remove_queued_request removes a request from the pre dispatch queue
+ * without updating refcounts. It is expected the caller will drop the
+ * reference unless it replaces the request at somepart of the elevator
+ * (ie. the dispatch queue)
+ */
+static void as_remove_queued_request(request_queue_t *q, struct request *rq)
+{
+       struct as_rq *arq = RQ_DATA(rq);
+       const int data_dir = arq->is_sync;
+       struct as_data *ad = q->elevator->elevator_data;
+
+       WARN_ON(arq->state != AS_RQ_QUEUED);
+
+       if (arq->io_context && arq->io_context->aic) {
+               BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued));
+               atomic_dec(&arq->io_context->aic->nr_queued);
+       }
+
+       /*
+        * Update the "next_arq" cache if we are about to remove its
+        * entry
+        */
+       if (ad->next_arq[data_dir] == arq)
+               ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
+
+       list_del_init(&arq->fifo);
+       as_del_arq_hash(arq);
+       as_del_arq_rb(ad, arq);
+}
+
+/*
+ * as_fifo_expired returns 0 if there are no expired reads on the fifo,
+ * 1 otherwise.  It is ratelimited so that we only perform the check once per
+ * `fifo_expire' interval.  Otherwise a large number of expired requests
+ * would create a hopeless seekstorm.
+ *
+ * See as_antic_expired comment.
+ */
+static int as_fifo_expired(struct as_data *ad, int adir)
+{
+       struct as_rq *arq;
+       long delta_jif;
+
+       delta_jif = jiffies - ad->last_check_fifo[adir];
+       if (unlikely(delta_jif < 0))
+               delta_jif = -delta_jif;
+       if (delta_jif < ad->fifo_expire[adir])
+               return 0;
+
+       ad->last_check_fifo[adir] = jiffies;
+
+       if (list_empty(&ad->fifo_list[adir]))
+               return 0;
+
+       arq = list_entry_fifo(ad->fifo_list[adir].next);
+
+       return time_after(jiffies, arq->expires);
+}
+
+/*
+ * as_batch_expired returns true if the current batch has expired. A batch
+ * is a set of reads or a set of writes.
+ */
+static inline int as_batch_expired(struct as_data *ad)
+{
+       if (ad->changed_batch || ad->new_batch)
+               return 0;
+
+       if (ad->batch_data_dir == REQ_SYNC)
+               /* TODO! add a check so a complete fifo gets written? */
+               return time_after(jiffies, ad->current_batch_expires);
+
+       return time_after(jiffies, ad->current_batch_expires)
+               || ad->current_write_count == 0;
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
+{
+       struct request *rq = arq->request;
+       const int data_dir = arq->is_sync;
+
+       BUG_ON(!ON_RB(&arq->rb_node));
+
+       as_antic_stop(ad);
+       ad->antic_status = ANTIC_OFF;
+
+       /*
+        * This has to be set in order to be correctly updated by
+        * as_find_next_arq
+        */
+       ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
+
+       if (data_dir == REQ_SYNC) {
+               /* In case we have to anticipate after this */
+               copy_io_context(&ad->io_context, &arq->io_context);
+       } else {
+               if (ad->io_context) {
+                       put_io_context(ad->io_context);
+                       ad->io_context = NULL;
+               }
+
+               if (ad->current_write_count != 0)
+                       ad->current_write_count--;
+       }
+       ad->ioc_finished = 0;
+
+       ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
+
+       /*
+        * take it off the sort and fifo list, add to dispatch queue
+        */
+       while (!list_empty(&rq->queuelist)) {
+               struct request *__rq = list_entry_rq(rq->queuelist.next);
+               struct as_rq *__arq = RQ_DATA(__rq);
+
+               list_del(&__rq->queuelist);
+
+               elv_dispatch_add_tail(ad->q, __rq);
+
+               if (__arq->io_context && __arq->io_context->aic)
+                       atomic_inc(&__arq->io_context->aic->nr_dispatched);
+
+               WARN_ON(__arq->state != AS_RQ_QUEUED);
+               __arq->state = AS_RQ_DISPATCHED;
+
+               ad->nr_dispatched++;
+       }
+
+       as_remove_queued_request(ad->q, rq);
+       WARN_ON(arq->state != AS_RQ_QUEUED);
+
+       elv_dispatch_sort(ad->q, rq);
+
+       arq->state = AS_RQ_DISPATCHED;
+       if (arq->io_context && arq->io_context->aic)
+               atomic_inc(&arq->io_context->aic->nr_dispatched);
+       ad->nr_dispatched++;
+}
+
+/*
+ * as_dispatch_request selects the best request according to
+ * read/write expire, batch expire, etc, and moves it to the dispatch
+ * queue. Returns 1 if a request was found, 0 otherwise.
+ */
+static int as_dispatch_request(request_queue_t *q, int force)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq;
+       const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
+       const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
+
+       if (unlikely(force)) {
+               /*
+                * Forced dispatch, accounting is useless.  Reset
+                * accounting states and dump fifo_lists.  Note that
+                * batch_data_dir is reset to REQ_SYNC to avoid
+                * screwing write batch accounting as write batch
+                * accounting occurs on W->R transition.
+                */
+               int dispatched = 0;
+
+               ad->batch_data_dir = REQ_SYNC;
+               ad->changed_batch = 0;
+               ad->new_batch = 0;
+
+               while (ad->next_arq[REQ_SYNC]) {
+                       as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
+                       dispatched++;
+               }
+               ad->last_check_fifo[REQ_SYNC] = jiffies;
+
+               while (ad->next_arq[REQ_ASYNC]) {
+                       as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
+                       dispatched++;
+               }
+               ad->last_check_fifo[REQ_ASYNC] = jiffies;
+
+               return dispatched;
+       }
+
+       /* Signal that the write batch was uncontended, so we can't time it */
+       if (ad->batch_data_dir == REQ_ASYNC && !reads) {
+               if (ad->current_write_count == 0 || !writes)
+                       ad->write_batch_idled = 1;
+       }
+
+       if (!(reads || writes)
+               || ad->antic_status == ANTIC_WAIT_REQ
+               || ad->antic_status == ANTIC_WAIT_NEXT
+               || ad->changed_batch)
+               return 0;
+
+       if (!(reads && writes && as_batch_expired(ad)) ) {
+               /*
+                * batch is still running or no reads or no writes
+                */
+               arq = ad->next_arq[ad->batch_data_dir];
+
+               if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
+                       if (as_fifo_expired(ad, REQ_SYNC))
+                               goto fifo_expired;
+
+                       if (as_can_anticipate(ad, arq)) {
+                               as_antic_waitreq(ad);
+                               return 0;
+                       }
+               }
+
+               if (arq) {
+                       /* we have a "next request" */
+                       if (reads && !writes)
+                               ad->current_batch_expires =
+                                       jiffies + ad->batch_expire[REQ_SYNC];
+                       goto dispatch_request;
+               }
+       }
+
+       /*
+        * at this point we are not running a batch. select the appropriate
+        * data direction (read / write)
+        */
+
+       if (reads) {
+               BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));
+
+               if (writes && ad->batch_data_dir == REQ_SYNC)
+                       /*
+                        * Last batch was a read, switch to writes
+                        */
+                       goto dispatch_writes;
+
+               if (ad->batch_data_dir == REQ_ASYNC) {
+                       WARN_ON(ad->new_batch);
+                       ad->changed_batch = 1;
+               }
+               ad->batch_data_dir = REQ_SYNC;
+               arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
+               ad->last_check_fifo[ad->batch_data_dir] = jiffies;
+               goto dispatch_request;
+       }
+
+       /*
+        * the last batch was a read
+        */
+
+       if (writes) {
+dispatch_writes:
+               BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));
+
+               if (ad->batch_data_dir == REQ_SYNC) {
+                       ad->changed_batch = 1;
+
+                       /*
+                        * new_batch might be 1 when the queue runs out of
+                        * reads. A subsequent submission of a write might
+                        * cause a change of batch before the read is finished.
+                        */
+                       ad->new_batch = 0;
+               }
+               ad->batch_data_dir = REQ_ASYNC;
+               ad->current_write_count = ad->write_batch_count;
+               ad->write_batch_idled = 0;
+               arq = ad->next_arq[ad->batch_data_dir];
+               goto dispatch_request;
+       }
+
+       BUG();
+       return 0;
+
+dispatch_request:
+       /*
+        * If a request has expired, service it.
+        */
+
+       if (as_fifo_expired(ad, ad->batch_data_dir)) {
+fifo_expired:
+               arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
+               BUG_ON(arq == NULL);
+       }
+
+       if (ad->changed_batch) {
+               WARN_ON(ad->new_batch);
+
+               if (ad->nr_dispatched)
+                       return 0;
+
+               if (ad->batch_data_dir == REQ_ASYNC)
+                       ad->current_batch_expires = jiffies +
+                                       ad->batch_expire[REQ_ASYNC];
+               else
+                       ad->new_batch = 1;
+
+               ad->changed_batch = 0;
+       }
+
+       /*
+        * arq is the selected appropriate request.
+        */
+       as_move_to_dispatch(ad, arq);
+
+       return 1;
+}
+
+/*
+ * Add arq to a list behind alias
+ */
+static inline void
+as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alias)
+{
+       struct request  *req = arq->request;
+       struct list_head *insert = alias->request->queuelist.prev;
+
+       /*
+        * Transfer list of aliases
+        */
+       while (!list_empty(&req->queuelist)) {
+               struct request *__rq = list_entry_rq(req->queuelist.next);
+               struct as_rq *__arq = RQ_DATA(__rq);
+
+               list_move_tail(&__rq->queuelist, &alias->request->queuelist);
+
+               WARN_ON(__arq->state != AS_RQ_QUEUED);
+       }
+
+       /*
+        * Another request with the same start sector on the rbtree.
+        * Link this request to that sector. They are untangled in
+        * as_move_to_dispatch
+        */
+       list_add(&arq->request->queuelist, insert);
+
+       /*
+        * Don't want to have to handle merges.
+        */
+       as_del_arq_hash(arq);
+       arq->request->flags |= REQ_NOMERGE;
+}
+
+/*
+ * add arq to rbtree and fifo
+ */
+static void as_add_request(request_queue_t *q, struct request *rq)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq = RQ_DATA(rq);
+       struct as_rq *alias;
+       int data_dir;
+
+       if (arq->state != AS_RQ_PRESCHED) {
+               printk("arq->state: %d\n", arq->state);
+               WARN_ON(1);
+       }
+       arq->state = AS_RQ_NEW;
+
+       if (rq_data_dir(arq->request) == READ
+                       || current->flags&PF_SYNCWRITE)
+               arq->is_sync = 1;
+       else
+               arq->is_sync = 0;
+       data_dir = arq->is_sync;
+
+       arq->io_context = as_get_io_context();
+
+       if (arq->io_context) {
+               as_update_iohist(ad, arq->io_context->aic, arq->request);
+               atomic_inc(&arq->io_context->aic->nr_queued);
+       }
+
+       alias = as_add_arq_rb(ad, arq);
+       if (!alias) {
+               /*
+                * set expire time (only used for reads) and add to fifo list
+                */
+               arq->expires = jiffies + ad->fifo_expire[data_dir];
+               list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
+
+               if (rq_mergeable(arq->request))
+                       as_add_arq_hash(ad, arq);
+               as_update_arq(ad, arq); /* keep state machine up to date */
+
+       } else {
+               as_add_aliased_request(ad, arq, alias);
+
+               /*
+                * have we been anticipating this request?
+                * or does it come from the same process as the one we are
+                * anticipating for?
+                */
+               if (ad->antic_status == ANTIC_WAIT_REQ
+                               || ad->antic_status == ANTIC_WAIT_NEXT) {
+                       if (as_can_break_anticipation(ad, arq))
+                               as_antic_stop(ad);
+               }
+       }
+
+       arq->state = AS_RQ_QUEUED;
+}
+
+static void as_activate_request(request_queue_t *q, struct request *rq)
+{
+       struct as_rq *arq = RQ_DATA(rq);
+
+       WARN_ON(arq->state != AS_RQ_DISPATCHED);
+       arq->state = AS_RQ_REMOVED;
+       if (arq->io_context && arq->io_context->aic)
+               atomic_dec(&arq->io_context->aic->nr_dispatched);
+}
+
+static void as_deactivate_request(request_queue_t *q, struct request *rq)
+{
+       struct as_rq *arq = RQ_DATA(rq);
+
+       WARN_ON(arq->state != AS_RQ_REMOVED);
+       arq->state = AS_RQ_DISPATCHED;
+       if (arq->io_context && arq->io_context->aic)
+               atomic_inc(&arq->io_context->aic->nr_dispatched);
+}
+
+/*
+ * as_queue_empty tells us if there are requests left in the device. It may
+ * not be the case that a driver can get the next request even if the queue
+ * is not empty - it is used in the block layer to check for plugging and
+ * merging opportunities
+ */
+static int as_queue_empty(request_queue_t *q)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+
+       return list_empty(&ad->fifo_list[REQ_ASYNC])
+               && list_empty(&ad->fifo_list[REQ_SYNC]);
+}
+
+static struct request *
+as_former_request(request_queue_t *q, struct request *rq)
+{
+       struct as_rq *arq = RQ_DATA(rq);
+       struct rb_node *rbprev = rb_prev(&arq->rb_node);
+       struct request *ret = NULL;
+
+       if (rbprev)
+               ret = rb_entry_arq(rbprev)->request;
+
+       return ret;
+}
+
+static struct request *
+as_latter_request(request_queue_t *q, struct request *rq)
+{
+       struct as_rq *arq = RQ_DATA(rq);
+       struct rb_node *rbnext = rb_next(&arq->rb_node);
+       struct request *ret = NULL;
+
+       if (rbnext)
+               ret = rb_entry_arq(rbnext)->request;
+
+       return ret;
+}
+
+static int
+as_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       sector_t rb_key = bio->bi_sector + bio_sectors(bio);
+       struct request *__rq;
+       int ret;
+
+       /*
+        * see if the merge hash can satisfy a back merge
+        */
+       __rq = as_find_arq_hash(ad, bio->bi_sector);
+       if (__rq) {
+               BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
+
+               if (elv_rq_merge_ok(__rq, bio)) {
+                       ret = ELEVATOR_BACK_MERGE;
+                       goto out;
+               }
+       }
+
+       /*
+        * check for front merge
+        */
+       __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
+       if (__rq) {
+               BUG_ON(rb_key != rq_rb_key(__rq));
+
+               if (elv_rq_merge_ok(__rq, bio)) {
+                       ret = ELEVATOR_FRONT_MERGE;
+                       goto out;
+               }
+       }
+
+       return ELEVATOR_NO_MERGE;
+out:
+       if (ret) {
+               if (rq_mergeable(__rq))
+                       as_hot_arq_hash(ad, RQ_DATA(__rq));
+       }
+       *req = __rq;
+       return ret;
+}
+
+static void as_merged_request(request_queue_t *q, struct request *req)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq = RQ_DATA(req);
+
+       /*
+        * hash always needs to be repositioned, key is end sector
+        */
+       as_del_arq_hash(arq);
+       as_add_arq_hash(ad, arq);
+
+       /*
+        * if the merge was a front merge, we need to reposition request
+        */
+       if (rq_rb_key(req) != arq->rb_key) {
+               struct as_rq *alias, *next_arq = NULL;
+
+               if (ad->next_arq[arq->is_sync] == arq)
+                       next_arq = as_find_next_arq(ad, arq);
+
+               /*
+                * Note! We should really be moving any old aliased requests
+                * off this request and try to insert them into the rbtree. We
+                * currently don't bother. Ditto the next function.
+                */
+               as_del_arq_rb(ad, arq);
+               if ((alias = as_add_arq_rb(ad, arq)) ) {
+                       list_del_init(&arq->fifo);
+                       as_add_aliased_request(ad, arq, alias);
+                       if (next_arq)
+                               ad->next_arq[arq->is_sync] = next_arq;
+               }
+               /*
+                * Note! At this stage of this and the next function, our next
+                * request may not be optimal - eg the request may have "grown"
+                * behind the disk head. We currently don't bother adjusting.
+                */
+       }
+}
+
+static void
+as_merged_requests(request_queue_t *q, struct request *req,
+                        struct request *next)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq = RQ_DATA(req);
+       struct as_rq *anext = RQ_DATA(next);
+
+       BUG_ON(!arq);
+       BUG_ON(!anext);
+
+       /*
+        * reposition arq (this is the merged request) in hash, and in rbtree
+        * in case of a front merge
+        */
+       as_del_arq_hash(arq);
+       as_add_arq_hash(ad, arq);
+
+       if (rq_rb_key(req) != arq->rb_key) {
+               struct as_rq *alias, *next_arq = NULL;
+
+               if (ad->next_arq[arq->is_sync] == arq)
+                       next_arq = as_find_next_arq(ad, arq);
+
+               as_del_arq_rb(ad, arq);
+               if ((alias = as_add_arq_rb(ad, arq)) ) {
+                       list_del_init(&arq->fifo);
+                       as_add_aliased_request(ad, arq, alias);
+                       if (next_arq)
+                               ad->next_arq[arq->is_sync] = next_arq;
+               }
+       }
+
+       /*
+        * if anext expires before arq, assign its expire time to arq
+        * and move into anext position (anext will be deleted) in fifo
+        */
+       if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) {
+               if (time_before(anext->expires, arq->expires)) {
+                       list_move(&arq->fifo, &anext->fifo);
+                       arq->expires = anext->expires;
+                       /*
+                        * Don't copy here but swap, because when anext is
+                        * removed below, it must contain the unused context
+                        */
+                       swap_io_context(&arq->io_context, &anext->io_context);
+               }
+       }
+
+       /*
+        * Transfer list of aliases
+        */
+       while (!list_empty(&next->queuelist)) {
+               struct request *__rq = list_entry_rq(next->queuelist.next);
+               struct as_rq *__arq = RQ_DATA(__rq);
+
+               list_move_tail(&__rq->queuelist, &req->queuelist);
+
+               WARN_ON(__arq->state != AS_RQ_QUEUED);
+       }
+
+       /*
+        * kill knowledge of next, this one is a goner
+        */
+       as_remove_queued_request(q, next);
+       as_put_io_context(anext);
+
+       anext->state = AS_RQ_MERGED;
+}
+
+/*
+ * This is executed in a "deferred" process context, by kblockd. It calls the
+ * driver's request_fn so the driver can submit that request.
+ *
+ * IMPORTANT! This guy will reenter the elevator, so set up all queue global
+ * state before calling, and don't rely on any state over calls.
+ *
+ * FIXME! dispatch queue is not a queue at all!
+ */
+static void as_work_handler(void *data)
+{
+       struct request_queue *q = data;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       if (!as_queue_empty(q))
+               q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void as_put_request(request_queue_t *q, struct request *rq)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq = RQ_DATA(rq);
+
+       if (!arq) {
+               WARN_ON(1);
+               return;
+       }
+
+       if (unlikely(arq->state != AS_RQ_POSTSCHED &&
+                    arq->state != AS_RQ_PRESCHED &&
+                    arq->state != AS_RQ_MERGED)) {
+               printk("arq->state %d\n", arq->state);
+               WARN_ON(1);
+       }
+
+       mempool_free(arq, ad->arq_pool);
+       rq->elevator_private = NULL;
+}
+
+static int as_set_request(request_queue_t *q, struct request *rq,
+                         struct bio *bio, gfp_t gfp_mask)
+{
+       struct as_data *ad = q->elevator->elevator_data;
+       struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
+
+       if (arq) {
+               memset(arq, 0, sizeof(*arq));
+               RB_CLEAR(&arq->rb_node);
+               arq->request = rq;
+               arq->state = AS_RQ_PRESCHED;
+               arq->io_context = NULL;
+               INIT_LIST_HEAD(&arq->hash);
+               arq->on_hash = 0;
+               INIT_LIST_HEAD(&arq->fifo);
+               rq->elevator_private = arq;
+               return 0;
+       }
+
+       return 1;
+}
+
+static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
+{
+       int ret = ELV_MQUEUE_MAY;
+       struct as_data *ad = q->elevator->elevator_data;
+       struct io_context *ioc;
+       if (ad->antic_status == ANTIC_WAIT_REQ ||
+                       ad->antic_status == ANTIC_WAIT_NEXT) {
+               ioc = as_get_io_context();
+               if (ad->io_context == ioc)
+                       ret = ELV_MQUEUE_MUST;
+               put_io_context(ioc);
+       }
+
+       return ret;
+}
+
+static void as_exit_queue(elevator_t *e)
+{
+       struct as_data *ad = e->elevator_data;
+
+       del_timer_sync(&ad->antic_timer);
+       kblockd_flush();
+
+       BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
+       BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
+
+       mempool_destroy(ad->arq_pool);
+       put_io_context(ad->io_context);
+       kfree(ad->hash);
+       kfree(ad);
+}
+
+/*
+ * initialize elevator private data (as_data), and alloc a arq for
+ * each request on the free lists
+ */
+static int as_init_queue(request_queue_t *q, elevator_t *e)
+{
+       struct as_data *ad;
+       int i;
+
+       if (!arq_pool)
+               return -ENOMEM;
+
+       ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
+       if (!ad)
+               return -ENOMEM;
+       memset(ad, 0, sizeof(*ad));
+
+       ad->q = q; /* Identify what queue the data belongs to */
+
+       ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES,
+                               GFP_KERNEL, q->node);
+       if (!ad->hash) {
+               kfree(ad);
+               return -ENOMEM;
+       }
+
+       ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+                               mempool_free_slab, arq_pool, q->node);
+       if (!ad->arq_pool) {
+               kfree(ad->hash);
+               kfree(ad);
+               return -ENOMEM;
+       }
+
+       /* anticipatory scheduling helpers */
+       ad->antic_timer.function = as_antic_timeout;
+       ad->antic_timer.data = (unsigned long)q;
+       init_timer(&ad->antic_timer);
+       INIT_WORK(&ad->antic_work, as_work_handler, q);
+
+       for (i = 0; i < AS_HASH_ENTRIES; i++)
+               INIT_LIST_HEAD(&ad->hash[i]);
+
+       INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
+       INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
+       ad->sort_list[REQ_SYNC] = RB_ROOT;
+       ad->sort_list[REQ_ASYNC] = RB_ROOT;
+       ad->fifo_expire[REQ_SYNC] = default_read_expire;
+       ad->fifo_expire[REQ_ASYNC] = default_write_expire;
+       ad->antic_expire = default_antic_expire;
+       ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
+       ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
+       e->elevator_data = ad;
+
+       ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
+       ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
+       if (ad->write_batch_count < 2)
+               ad->write_batch_count = 2;
+
+       return 0;
+}
+
+/*
+ * sysfs parts below
+ */
+struct as_fs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct as_data *, char *);
+       ssize_t (*store)(struct as_data *, const char *, size_t);
+};
+
+static ssize_t
+as_var_show(unsigned int var, char *page)
+{
+       return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+as_var_store(unsigned long *var, const char *page, size_t count)
+{
+       char *p = (char *) page;
+
+       *var = simple_strtoul(p, &p, 10);
+       return count;
+}
+
+static ssize_t as_est_show(struct as_data *ad, char *page)
+{
+       int pos = 0;
+
+       pos += sprintf(page+pos, "%lu %% exit probability\n", 100*ad->exit_prob/256);
+       pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
+       pos += sprintf(page+pos, "%llu sectors new seek distance\n", (unsigned long long)ad->new_seek_mean);
+
+       return pos;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR)                           \
+static ssize_t __FUNC(struct as_data *ad, char *page)          \
+{                                                              \
+       return as_var_show(jiffies_to_msecs((__VAR)), (page));  \
+}
+SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
+SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
+SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                                \
+static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count)      \
+{                                                                      \
+       int ret = as_var_store(__PTR, (page), count);           \
+       if (*(__PTR) < (MIN))                                           \
+               *(__PTR) = (MIN);                                       \
+       else if (*(__PTR) > (MAX))                                      \
+               *(__PTR) = (MAX);                                       \
+       *(__PTR) = msecs_to_jiffies(*(__PTR));                          \
+       return ret;                                                     \
+}
+STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
+STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
+STORE_FUNCTION(as_read_batchexpire_store,
+                       &ad->batch_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_write_batchexpire_store,
+                       &ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
+#undef STORE_FUNCTION
+
+static struct as_fs_entry as_est_entry = {
+       .attr = {.name = "est_time", .mode = S_IRUGO },
+       .show = as_est_show,
+};
+static struct as_fs_entry as_readexpire_entry = {
+       .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = as_readexpire_show,
+       .store = as_readexpire_store,
+};
+static struct as_fs_entry as_writeexpire_entry = {
+       .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = as_writeexpire_show,
+       .store = as_writeexpire_store,
+};
+static struct as_fs_entry as_anticexpire_entry = {
+       .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = as_anticexpire_show,
+       .store = as_anticexpire_store,
+};
+static struct as_fs_entry as_read_batchexpire_entry = {
+       .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = as_read_batchexpire_show,
+       .store = as_read_batchexpire_store,
+};
+static struct as_fs_entry as_write_batchexpire_entry = {
+       .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = as_write_batchexpire_show,
+       .store = as_write_batchexpire_store,
+};
+
+static struct attribute *default_attrs[] = {
+       &as_est_entry.attr,
+       &as_readexpire_entry.attr,
+       &as_writeexpire_entry.attr,
+       &as_anticexpire_entry.attr,
+       &as_read_batchexpire_entry.attr,
+       &as_write_batchexpire_entry.attr,
+       NULL,
+};
+
+#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
+
+static ssize_t
+as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct as_fs_entry *entry = to_as(attr);
+
+       if (!entry->show)
+               return -EIO;
+
+       return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+as_attr_store(struct kobject *kobj, struct attribute *attr,
+                   const char *page, size_t length)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct as_fs_entry *entry = to_as(attr);
+
+       if (!entry->store)
+               return -EIO;
+
+       return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops as_sysfs_ops = {
+       .show   = as_attr_show,
+       .store  = as_attr_store,
+};
+
+static struct kobj_type as_ktype = {
+       .sysfs_ops      = &as_sysfs_ops,
+       .default_attrs  = default_attrs,
+};
+
+static struct elevator_type iosched_as = {
+       .ops = {
+               .elevator_merge_fn =            as_merge,
+               .elevator_merged_fn =           as_merged_request,
+               .elevator_merge_req_fn =        as_merged_requests,
+               .elevator_dispatch_fn =         as_dispatch_request,
+               .elevator_add_req_fn =          as_add_request,
+               .elevator_activate_req_fn =     as_activate_request,
+               .elevator_deactivate_req_fn =   as_deactivate_request,
+               .elevator_queue_empty_fn =      as_queue_empty,
+               .elevator_completed_req_fn =    as_completed_request,
+               .elevator_former_req_fn =       as_former_request,
+               .elevator_latter_req_fn =       as_latter_request,
+               .elevator_set_req_fn =          as_set_request,
+               .elevator_put_req_fn =          as_put_request,
+               .elevator_may_queue_fn =        as_may_queue,
+               .elevator_init_fn =             as_init_queue,
+               .elevator_exit_fn =             as_exit_queue,
+       },
+
+       .elevator_ktype = &as_ktype,
+       .elevator_name = "anticipatory",
+       .elevator_owner = THIS_MODULE,
+};
+
+static int __init as_init(void)
+{
+       int ret;
+
+       arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
+                                    0, 0, NULL, NULL);
+       if (!arq_pool)
+               return -ENOMEM;
+
+       ret = elv_register(&iosched_as);
+       if (!ret) {
+               /*
+                * don't allow AS to get unregistered, since we would have
+                * to browse all tasks in the system and release their
+                * as_io_context first
+                */
+               __module_get(THIS_MODULE);
+               return 0;
+       }
+
+       kmem_cache_destroy(arq_pool);
+       return ret;
+}
+
+static void __exit as_exit(void)
+{
+       elv_unregister(&iosched_as);
+       kmem_cache_destroy(arq_pool);
+}
+
+module_init(as_init);
+module_exit(as_exit);
+
+MODULE_AUTHOR("Nick Piggin");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("anticipatory IO scheduler");
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
new file mode 100644 (file)
index 0000000..ecacca9
--- /dev/null
@@ -0,0 +1,2428 @@
+/*
+ *  linux/drivers/block/cfq-iosched.c
+ *
+ *  CFQ, or complete fairness queueing, disk scheduler.
+ *
+ *  Based on ideas from a previously unfinished io
+ *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
+ *
+ *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/rbtree.h>
+#include <linux/mempool.h>
+#include <linux/ioprio.h>
+#include <linux/writeback.h>
+
+/*
+ * tunables
+ */
+static int cfq_quantum = 4;            /* max queue in one round of service */
+static int cfq_queued = 8;             /* minimum rq allocate limit per-queue*/
+static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
+static int cfq_back_max = 16 * 1024;   /* maximum backwards seek, in KiB */
+static int cfq_back_penalty = 2;       /* penalty of a backwards seek */
+
+static int cfq_slice_sync = HZ / 10;
+static int cfq_slice_async = HZ / 25;
+static int cfq_slice_async_rq = 2;
+static int cfq_slice_idle = HZ / 100;
+
+#define CFQ_IDLE_GRACE         (HZ / 10)
+#define CFQ_SLICE_SCALE                (5)
+
+#define CFQ_KEY_ASYNC          (0)
+#define CFQ_KEY_ANY            (0xffff)
+
+/*
+ * disable queueing at the driver/hardware level
+ */
+static int cfq_max_depth = 2;
+
+/*
+ * for the hash of cfqq inside the cfqd
+ */
+#define CFQ_QHASH_SHIFT                6
+#define CFQ_QHASH_ENTRIES      (1 << CFQ_QHASH_SHIFT)
+#define list_entry_qhash(entry)        hlist_entry((entry), struct cfq_queue, cfq_hash)
+
+/*
+ * for the hash of crq inside the cfqq
+ */
+#define CFQ_MHASH_SHIFT                6
+#define CFQ_MHASH_BLOCK(sec)   ((sec) >> 3)
+#define CFQ_MHASH_ENTRIES      (1 << CFQ_MHASH_SHIFT)
+#define CFQ_MHASH_FN(sec)      hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
+#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr)   hlist_entry((ptr), struct cfq_rq, hash)
+
+#define list_entry_cfqq(ptr)   list_entry((ptr), struct cfq_queue, cfq_list)
+#define list_entry_fifo(ptr)   list_entry((ptr), struct request, queuelist)
+
+#define RQ_DATA(rq)            (rq)->elevator_private
+
+/*
+ * rb-tree defines
+ */
+#define RB_NONE                        (2)
+#define RB_EMPTY(node)         ((node)->rb_node == NULL)
+#define RB_CLEAR_COLOR(node)   (node)->rb_color = RB_NONE
+#define RB_CLEAR(node)         do {    \
+       (node)->rb_parent = NULL;       \
+       RB_CLEAR_COLOR((node));         \
+       (node)->rb_right = NULL;        \
+       (node)->rb_left = NULL;         \
+} while (0)
+#define RB_CLEAR_ROOT(root)    ((root)->rb_node = NULL)
+#define rb_entry_crq(node)     rb_entry((node), struct cfq_rq, rb_node)
+#define rq_rb_key(rq)          (rq)->sector
+
+static kmem_cache_t *crq_pool;
+static kmem_cache_t *cfq_pool;
+static kmem_cache_t *cfq_ioc_pool;
+
+#define CFQ_PRIO_LISTS         IOPRIO_BE_NR
+#define cfq_class_idle(cfqq)   ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+#define cfq_class_be(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
+#define cfq_class_rt(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
+
+#define ASYNC                  (0)
+#define SYNC                   (1)
+
+#define cfq_cfqq_dispatched(cfqq)      \
+       ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
+
+#define cfq_cfqq_class_sync(cfqq)      ((cfqq)->key != CFQ_KEY_ASYNC)
+
+#define cfq_cfqq_sync(cfqq)            \
+       (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
+
+/*
+ * Per block device queue structure
+ */
+struct cfq_data {
+       atomic_t ref;
+       request_queue_t *queue;
+
+       /*
+        * rr list of queues with requests and the count of them
+        */
+       struct list_head rr_list[CFQ_PRIO_LISTS];
+       struct list_head busy_rr;
+       struct list_head cur_rr;
+       struct list_head idle_rr;
+       unsigned int busy_queues;
+
+       /*
+        * non-ordered list of empty cfqq's
+        */
+       struct list_head empty_list;
+
+       /*
+        * cfqq lookup hash
+        */
+       struct hlist_head *cfq_hash;
+
+       /*
+        * global crq hash for all queues
+        */
+       struct hlist_head *crq_hash;
+
+       unsigned int max_queued;
+
+       mempool_t *crq_pool;
+
+       int rq_in_driver;
+
+       /*
+        * schedule slice state info
+        */
+       /*
+        * idle window management
+        */
+       struct timer_list idle_slice_timer;
+       struct work_struct unplug_work;
+
+       struct cfq_queue *active_queue;
+       struct cfq_io_context *active_cic;
+       int cur_prio, cur_end_prio;
+       unsigned int dispatch_slice;
+
+       struct timer_list idle_class_timer;
+
+       sector_t last_sector;
+       unsigned long last_end_request;
+
+       unsigned int rq_starved;
+
+       /*
+        * tunables, see top of file
+        */
+       unsigned int cfq_quantum;
+       unsigned int cfq_queued;
+       unsigned int cfq_fifo_expire[2];
+       unsigned int cfq_back_penalty;
+       unsigned int cfq_back_max;
+       unsigned int cfq_slice[2];
+       unsigned int cfq_slice_async_rq;
+       unsigned int cfq_slice_idle;
+       unsigned int cfq_max_depth;
+};
+
+/*
+ * Per process-grouping structure
+ */
+struct cfq_queue {
+       /* reference count */
+       atomic_t ref;
+       /* parent cfq_data */
+       struct cfq_data *cfqd;
+       /* cfqq lookup hash */
+       struct hlist_node cfq_hash;
+       /* hash key */
+       unsigned int key;
+       /* on either rr or empty list of cfqd */
+       struct list_head cfq_list;
+       /* sorted list of pending requests */
+       struct rb_root sort_list;
+       /* if fifo isn't expired, next request to serve */
+       struct cfq_rq *next_crq;
+       /* requests queued in sort_list */
+       int queued[2];
+       /* currently allocated requests */
+       int allocated[2];
+       /* fifo list of requests in sort_list */
+       struct list_head fifo;
+
+       unsigned long slice_start;
+       unsigned long slice_end;
+       unsigned long slice_left;
+       unsigned long service_last;
+
+       /* number of requests that are on the dispatch list */
+       int on_dispatch[2];
+
+       /* io prio of this group */
+       unsigned short ioprio, org_ioprio;
+       unsigned short ioprio_class, org_ioprio_class;
+
+       /* various state flags, see below */
+       unsigned int flags;
+};
+
+struct cfq_rq {
+       struct rb_node rb_node;
+       sector_t rb_key;
+       struct request *request;
+       struct hlist_node hash;
+
+       struct cfq_queue *cfq_queue;
+       struct cfq_io_context *io_context;
+
+       unsigned int crq_flags;
+};
+
+enum cfqq_state_flags {
+       CFQ_CFQQ_FLAG_on_rr = 0,
+       CFQ_CFQQ_FLAG_wait_request,
+       CFQ_CFQQ_FLAG_must_alloc,
+       CFQ_CFQQ_FLAG_must_alloc_slice,
+       CFQ_CFQQ_FLAG_must_dispatch,
+       CFQ_CFQQ_FLAG_fifo_expire,
+       CFQ_CFQQ_FLAG_idle_window,
+       CFQ_CFQQ_FLAG_prio_changed,
+       CFQ_CFQQ_FLAG_expired,
+};
+
+#define CFQ_CFQQ_FNS(name)                                             \
+static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)                \
+{                                                                      \
+       cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
+}                                                                      \
+static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)       \
+{                                                                      \
+       cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
+}                                                                      \
+static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)                \
+{                                                                      \
+       return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
+}
+
+CFQ_CFQQ_FNS(on_rr);
+CFQ_CFQQ_FNS(wait_request);
+CFQ_CFQQ_FNS(must_alloc);
+CFQ_CFQQ_FNS(must_alloc_slice);
+CFQ_CFQQ_FNS(must_dispatch);
+CFQ_CFQQ_FNS(fifo_expire);
+CFQ_CFQQ_FNS(idle_window);
+CFQ_CFQQ_FNS(prio_changed);
+CFQ_CFQQ_FNS(expired);
+#undef CFQ_CFQQ_FNS
+
+enum cfq_rq_state_flags {
+       CFQ_CRQ_FLAG_is_sync = 0,
+};
+
+#define CFQ_CRQ_FNS(name)                                              \
+static inline void cfq_mark_crq_##name(struct cfq_rq *crq)             \
+{                                                                      \
+       crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name);                   \
+}                                                                      \
+static inline void cfq_clear_crq_##name(struct cfq_rq *crq)            \
+{                                                                      \
+       crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name);                  \
+}                                                                      \
+static inline int cfq_crq_##name(const struct cfq_rq *crq)             \
+{                                                                      \
+       return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0;      \
+}
+
+CFQ_CRQ_FNS(is_sync);
+#undef CFQ_CRQ_FNS
+
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
+static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
+static void cfq_put_cfqd(struct cfq_data *cfqd);
+
+#define process_sync(tsk)      ((tsk)->flags & PF_SYNCWRITE)
+
+/*
+ * lots of deadline iosched dupes, can be abstracted later...
+ */
+static inline void cfq_del_crq_hash(struct cfq_rq *crq)
+{
+       hlist_del_init(&crq->hash);
+}
+
+static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
+{
+       const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
+
+       hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
+}
+
+static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
+{
+       struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
+       struct hlist_node *entry, *next;
+
+       hlist_for_each_safe(entry, next, hash_list) {
+               struct cfq_rq *crq = list_entry_hash(entry);
+               struct request *__rq = crq->request;
+
+               if (!rq_mergeable(__rq)) {
+                       cfq_del_crq_hash(crq);
+                       continue;
+               }
+
+               if (rq_hash_key(__rq) == offset)
+                       return __rq;
+       }
+
+       return NULL;
+}
+
+/*
+ * scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing
+ */
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
+{
+       if (!cfqd->rq_in_driver && cfqd->busy_queues)
+               kblockd_schedule_work(&cfqd->unplug_work);
+}
+
+static int cfq_queue_empty(request_queue_t *q)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
+       return !cfqd->busy_queues;
+}
+
+/*
+ * Lifted from AS - choose which of crq1 and crq2 that is best served now.
+ * We choose the request that is closest to the head right now. Distance
+ * behind the head are penalized and only allowed to a certain extent.
+ */
+static struct cfq_rq *
+cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
+{
+       sector_t last, s1, s2, d1 = 0, d2 = 0;
+       int r1_wrap = 0, r2_wrap = 0;   /* requests are behind the disk head */
+       unsigned long back_max;
+
+       if (crq1 == NULL || crq1 == crq2)
+               return crq2;
+       if (crq2 == NULL)
+               return crq1;
+
+       if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
+               return crq1;
+       else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
+               return crq2;
+
+       s1 = crq1->request->sector;
+       s2 = crq2->request->sector;
+
+       last = cfqd->last_sector;
+
+       /*
+        * by definition, 1KiB is 2 sectors
+        */
+       back_max = cfqd->cfq_back_max * 2;
+
+       /*
+        * Strict one way elevator _except_ in the case where we allow
+        * short backward seeks which are biased as twice the cost of a
+        * similar forward seek.
+        */
+       if (s1 >= last)
+               d1 = s1 - last;
+       else if (s1 + back_max >= last)
+               d1 = (last - s1) * cfqd->cfq_back_penalty;
+       else
+               r1_wrap = 1;
+
+       if (s2 >= last)
+               d2 = s2 - last;
+       else if (s2 + back_max >= last)
+               d2 = (last - s2) * cfqd->cfq_back_penalty;
+       else
+               r2_wrap = 1;
+
+       /* Found required data */
+       if (!r1_wrap && r2_wrap)
+               return crq1;
+       else if (!r2_wrap && r1_wrap)
+               return crq2;
+       else if (r1_wrap && r2_wrap) {
+               /* both behind the head */
+               if (s1 <= s2)
+                       return crq1;
+               else
+                       return crq2;
+       }
+
+       /* Both requests in front of the head */
+       if (d1 < d2)
+               return crq1;
+       else if (d2 < d1)
+               return crq2;
+       else {
+               if (s1 >= s2)
+                       return crq1;
+               else
+                       return crq2;
+       }
+}
+
+/*
+ * would be nice to take fifo expire time into account as well
+ */
+static struct cfq_rq *
+cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                 struct cfq_rq *last)
+{
+       struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
+       struct rb_node *rbnext, *rbprev;
+
+       if (!(rbnext = rb_next(&last->rb_node))) {
+               rbnext = rb_first(&cfqq->sort_list);
+               if (rbnext == &last->rb_node)
+                       rbnext = NULL;
+       }
+
+       rbprev = rb_prev(&last->rb_node);
+
+       if (rbprev)
+               crq_prev = rb_entry_crq(rbprev);
+       if (rbnext)
+               crq_next = rb_entry_crq(rbnext);
+
+       return cfq_choose_req(cfqd, crq_next, crq_prev);
+}
+
+static void cfq_update_next_crq(struct cfq_rq *crq)
+{
+       struct cfq_queue *cfqq = crq->cfq_queue;
+
+       if (cfqq->next_crq == crq)
+               cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
+}
+
+static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
+{
+       struct cfq_data *cfqd = cfqq->cfqd;
+       struct list_head *list, *entry;
+
+       BUG_ON(!cfq_cfqq_on_rr(cfqq));
+
+       list_del(&cfqq->cfq_list);
+
+       if (cfq_class_rt(cfqq))
+               list = &cfqd->cur_rr;
+       else if (cfq_class_idle(cfqq))
+               list = &cfqd->idle_rr;
+       else {
+               /*
+                * if cfqq has requests in flight, don't allow it to be
+                * found in cfq_set_active_queue before it has finished them.
+                * this is done to increase fairness between a process that
+                * has lots of io pending vs one that only generates one
+                * sporadically or synchronously
+                */
+               if (cfq_cfqq_dispatched(cfqq))
+                       list = &cfqd->busy_rr;
+               else
+                       list = &cfqd->rr_list[cfqq->ioprio];
+       }
+
+       /*
+        * if queue was preempted, just add to front to be fair. busy_rr
+        * isn't sorted.
+        */
+       if (preempted || list == &cfqd->busy_rr) {
+               list_add(&cfqq->cfq_list, list);
+               return;
+       }
+
+       /*
+        * sort by when queue was last serviced
+        */
+       entry = list;
+       while ((entry = entry->prev) != list) {
+               struct cfq_queue *__cfqq = list_entry_cfqq(entry);
+
+               if (!__cfqq->service_last)
+                       break;
+               if (time_before(__cfqq->service_last, cfqq->service_last))
+                       break;
+       }
+
+       list_add(&cfqq->cfq_list, entry);
+}
+
+/*
+ * add to busy list of queues for service, trying to be fair in ordering
+ * the pending list according to last request service
+ */
+static inline void
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       BUG_ON(cfq_cfqq_on_rr(cfqq));
+       cfq_mark_cfqq_on_rr(cfqq);
+       cfqd->busy_queues++;
+
+       cfq_resort_rr_list(cfqq, 0);
+}
+
+static inline void
+cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       BUG_ON(!cfq_cfqq_on_rr(cfqq));
+       cfq_clear_cfqq_on_rr(cfqq);
+       list_move(&cfqq->cfq_list, &cfqd->empty_list);
+
+       BUG_ON(!cfqd->busy_queues);
+       cfqd->busy_queues--;
+}
+
+/*
+ * rb tree support functions
+ */
+static inline void cfq_del_crq_rb(struct cfq_rq *crq)
+{
+       struct cfq_queue *cfqq = crq->cfq_queue;
+       struct cfq_data *cfqd = cfqq->cfqd;
+       const int sync = cfq_crq_is_sync(crq);
+
+       BUG_ON(!cfqq->queued[sync]);
+       cfqq->queued[sync]--;
+
+       cfq_update_next_crq(crq);
+
+       rb_erase(&crq->rb_node, &cfqq->sort_list);
+       RB_CLEAR_COLOR(&crq->rb_node);
+
+       if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
+               cfq_del_cfqq_rr(cfqd, cfqq);
+}
+
+static struct cfq_rq *
+__cfq_add_crq_rb(struct cfq_rq *crq)
+{
+       struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
+       struct rb_node *parent = NULL;
+       struct cfq_rq *__crq;
+
+       while (*p) {
+               parent = *p;
+               __crq = rb_entry_crq(parent);
+
+               if (crq->rb_key < __crq->rb_key)
+                       p = &(*p)->rb_left;
+               else if (crq->rb_key > __crq->rb_key)
+                       p = &(*p)->rb_right;
+               else
+                       return __crq;
+       }
+
+       rb_link_node(&crq->rb_node, parent, p);
+       return NULL;
+}
+
+static void cfq_add_crq_rb(struct cfq_rq *crq)
+{
+       struct cfq_queue *cfqq = crq->cfq_queue;
+       struct cfq_data *cfqd = cfqq->cfqd;
+       struct request *rq = crq->request;
+       struct cfq_rq *__alias;
+
+       crq->rb_key = rq_rb_key(rq);
+       cfqq->queued[cfq_crq_is_sync(crq)]++;
+
+       /*
+        * looks a little odd, but the first insert might return an alias.
+        * if that happens, put the alias on the dispatch list
+        */
+       while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
+               cfq_dispatch_insert(cfqd->queue, __alias);
+
+       rb_insert_color(&crq->rb_node, &cfqq->sort_list);
+
+       if (!cfq_cfqq_on_rr(cfqq))
+               cfq_add_cfqq_rr(cfqd, cfqq);
+
+       /*
+        * check if this request is a better next-serve candidate
+        */
+       cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+}
+
+static inline void
+cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
+{
+       rb_erase(&crq->rb_node, &cfqq->sort_list);
+       cfqq->queued[cfq_crq_is_sync(crq)]--;
+
+       cfq_add_crq_rb(crq);
+}
+
+static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
+
+{
+       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);
+       struct rb_node *n;
+
+       if (!cfqq)
+               goto out;
+
+       n = cfqq->sort_list.rb_node;
+       while (n) {
+               struct cfq_rq *crq = rb_entry_crq(n);
+
+               if (sector < crq->rb_key)
+                       n = n->rb_left;
+               else if (sector > crq->rb_key)
+                       n = n->rb_right;
+               else
+                       return crq->request;
+       }
+
+out:
+       return NULL;
+}
+
+static void cfq_activate_request(request_queue_t *q, struct request *rq)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
+       cfqd->rq_in_driver++;
+}
+
+static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
+       WARN_ON(!cfqd->rq_in_driver);
+       cfqd->rq_in_driver--;
+}
+
+static void cfq_remove_request(struct request *rq)
+{
+       struct cfq_rq *crq = RQ_DATA(rq);
+
+       list_del_init(&rq->queuelist);
+       cfq_del_crq_rb(crq);
+       cfq_del_crq_hash(crq);
+}
+
+static int
+cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct request *__rq;
+       int ret;
+
+       __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
+       if (__rq && elv_rq_merge_ok(__rq, bio)) {
+               ret = ELEVATOR_BACK_MERGE;
+               goto out;
+       }
+
+       __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
+       if (__rq && elv_rq_merge_ok(__rq, bio)) {
+               ret = ELEVATOR_FRONT_MERGE;
+               goto out;
+       }
+
+       return ELEVATOR_NO_MERGE;
+out:
+       *req = __rq;
+       return ret;
+}
+
+static void cfq_merged_request(request_queue_t *q, struct request *req)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_rq *crq = RQ_DATA(req);
+
+       cfq_del_crq_hash(crq);
+       cfq_add_crq_hash(cfqd, crq);
+
+       if (rq_rb_key(req) != crq->rb_key) {
+               struct cfq_queue *cfqq = crq->cfq_queue;
+
+               cfq_update_next_crq(crq);
+               cfq_reposition_crq_rb(cfqq, crq);
+       }
+}
+
+static void
+cfq_merged_requests(request_queue_t *q, struct request *rq,
+                   struct request *next)
+{
+       cfq_merged_request(q, rq);
+
+       /*
+        * reposition in fifo if next is older than rq
+        */
+       if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
+           time_before(next->start_time, rq->start_time))
+               list_move(&rq->queuelist, &next->queuelist);
+
+       cfq_remove_request(next);
+}
+
+static inline void
+__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       if (cfqq) {
+               /*
+                * stop potential idle class queues waiting service
+                */
+               del_timer(&cfqd->idle_class_timer);
+
+               cfqq->slice_start = jiffies;
+               cfqq->slice_end = 0;
+               cfqq->slice_left = 0;
+               cfq_clear_cfqq_must_alloc_slice(cfqq);
+               cfq_clear_cfqq_fifo_expire(cfqq);
+               cfq_clear_cfqq_expired(cfqq);
+       }
+
+       cfqd->active_queue = cfqq;
+}
+
+/*
+ * 0
+ * 0,1
+ * 0,1,2
+ * 0,1,2,3
+ * 0,1,2,3,4
+ * 0,1,2,3,4,5
+ * 0,1,2,3,4,5,6
+ * 0,1,2,3,4,5,6,7
+ */
+static int cfq_get_next_prio_level(struct cfq_data *cfqd)
+{
+       int prio, wrap;
+
+       prio = -1;
+       wrap = 0;
+       do {
+               int p;
+
+               for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
+                       if (!list_empty(&cfqd->rr_list[p])) {
+                               prio = p;
+                               break;
+                       }
+               }
+
+               if (prio != -1)
+                       break;
+               cfqd->cur_prio = 0;
+               if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
+                       cfqd->cur_end_prio = 0;
+                       if (wrap)
+                               break;
+                       wrap = 1;
+               }
+       } while (1);
+
+       if (unlikely(prio == -1))
+               return -1;
+
+       BUG_ON(prio >= CFQ_PRIO_LISTS);
+
+       list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
+
+       cfqd->cur_prio = prio + 1;
+       if (cfqd->cur_prio > cfqd->cur_end_prio) {
+               cfqd->cur_end_prio = cfqd->cur_prio;
+               cfqd->cur_prio = 0;
+       }
+       if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
+               cfqd->cur_prio = 0;
+               cfqd->cur_end_prio = 0;
+       }
+
+       return prio;
+}
+
+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
+{
+       struct cfq_queue *cfqq;
+
+       /*
+        * if current queue is expired but not done with its requests yet,
+        * wait for that to happen
+        */
+       if ((cfqq = cfqd->active_queue) != NULL) {
+               if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))
+                       return NULL;
+       }
+
+       /*
+        * if current list is non-empty, grab first entry. if it is empty,
+        * get next prio level and grab first entry then if any are spliced
+        */
+       if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
+               cfqq = list_entry_cfqq(cfqd->cur_rr.next);
+
+       /*
+        * if we have idle queues and no rt or be queues had pending
+        * requests, either allow immediate service if the grace period
+        * has passed or arm the idle grace timer
+        */
+       if (!cfqq && !list_empty(&cfqd->idle_rr)) {
+               unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+
+               if (time_after_eq(jiffies, end))
+                       cfqq = list_entry_cfqq(cfqd->idle_rr.next);
+               else
+                       mod_timer(&cfqd->idle_class_timer, end);
+       }
+
+       __cfq_set_active_queue(cfqd, cfqq);
+       return cfqq;
+}
+
+/*
+ * current cfqq expired its slice (or was too idle), select new one
+ */
+static void
+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                   int preempted)
+{
+       unsigned long now = jiffies;
+
+       if (cfq_cfqq_wait_request(cfqq))
+               del_timer(&cfqd->idle_slice_timer);
+
+       if (!preempted && !cfq_cfqq_dispatched(cfqq))
+               cfqq->service_last = now;
+
+       cfq_clear_cfqq_must_dispatch(cfqq);
+       cfq_clear_cfqq_wait_request(cfqq);
+
+       /*
+        * store what was left of this slice, if the queue idled out
+        * or was preempted
+        */
+       if (time_after(now, cfqq->slice_end))
+               cfqq->slice_left = now - cfqq->slice_end;
+       else
+               cfqq->slice_left = 0;
+
+       if (cfq_cfqq_on_rr(cfqq))
+               cfq_resort_rr_list(cfqq, preempted);
+
+       if (cfqq == cfqd->active_queue)
+               cfqd->active_queue = NULL;
+
+       if (cfqd->active_cic) {
+               put_io_context(cfqd->active_cic->ioc);
+               cfqd->active_cic = NULL;
+       }
+
+       cfqd->dispatch_slice = 0;
+}
+
+static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
+{
+       struct cfq_queue *cfqq = cfqd->active_queue;
+
+       if (cfqq) {
+               /*
+                * use deferred expiry, if there are requests in progress as
+                * not to disturb the slice of the next queue
+                */
+               if (cfq_cfqq_dispatched(cfqq))
+                       cfq_mark_cfqq_expired(cfqq);
+               else
+                       __cfq_slice_expired(cfqd, cfqq, preempted);
+       }
+}
+
+static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+
+{
+       WARN_ON(!RB_EMPTY(&cfqq->sort_list));
+       WARN_ON(cfqq != cfqd->active_queue);
+
+       /*
+        * idle is disabled, either manually or by past process history
+        */
+       if (!cfqd->cfq_slice_idle)
+               return 0;
+       if (!cfq_cfqq_idle_window(cfqq))
+               return 0;
+       /*
+        * task has exited, don't wait
+        */
+       if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
+               return 0;
+
+       cfq_mark_cfqq_must_dispatch(cfqq);
+       cfq_mark_cfqq_wait_request(cfqq);
+
+       if (!timer_pending(&cfqd->idle_slice_timer)) {
+               unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
+
+               cfqd->idle_slice_timer.expires = jiffies + slice_left;
+               add_timer(&cfqd->idle_slice_timer);
+       }
+
+       return 1;
+}
+
+static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_queue *cfqq = crq->cfq_queue;
+
+       cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
+       cfq_remove_request(crq->request);
+       cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
+       elv_dispatch_sort(q, crq->request);
+}
+
+/*
+ * return expired entry, or NULL to just start from scratch in rbtree
+ */
+static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
+{
+       struct cfq_data *cfqd = cfqq->cfqd;
+       struct request *rq;
+       struct cfq_rq *crq;
+
+       if (cfq_cfqq_fifo_expire(cfqq))
+               return NULL;
+
+       if (!list_empty(&cfqq->fifo)) {
+               int fifo = cfq_cfqq_class_sync(cfqq);
+
+               crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
+               rq = crq->request;
+               if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
+                       cfq_mark_cfqq_fifo_expire(cfqq);
+                       return crq;
+               }
+       }
+
+       return NULL;
+}
+
+/*
+ * Scale schedule slice based on io priority. Use the sync time slice only
+ * if a queue is marked sync and has sync io queued. A sync queue with async
+ * io only, should not get full sync slice length.
+ */
+static inline int
+cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
+
+       WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+       return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
+}
+
+static inline int
+cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       const int base_rq = cfqd->cfq_slice_async_rq;
+
+       WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+       return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
+}
+
+/*
+ * get next queue for service
+ */
+static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
+{
+       unsigned long now = jiffies;
+       struct cfq_queue *cfqq;
+
+       cfqq = cfqd->active_queue;
+       if (!cfqq)
+               goto new_queue;
+
+       if (cfq_cfqq_expired(cfqq))
+               goto new_queue;
+
+       /*
+        * slice has expired
+        */
+       if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
+               goto expire;
+
+       /*
+        * if queue has requests, dispatch one. if not, check if
+        * enough slice is left to wait for one
+        */
+       if (!RB_EMPTY(&cfqq->sort_list))
+               goto keep_queue;
+       else if (!force && cfq_cfqq_class_sync(cfqq) &&
+                time_before(now, cfqq->slice_end)) {
+               if (cfq_arm_slice_timer(cfqd, cfqq))
+                       return NULL;
+       }
+
+expire:
+       cfq_slice_expired(cfqd, 0);
+new_queue:
+       cfqq = cfq_set_active_queue(cfqd);
+keep_queue:
+       return cfqq;
+}
+
+static int
+__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                       int max_dispatch)
+{
+       int dispatched = 0;
+
+       BUG_ON(RB_EMPTY(&cfqq->sort_list));
+
+       do {
+               struct cfq_rq *crq;
+
+               /*
+                * follow expired path, else get first next available
+                */
+               if ((crq = cfq_check_fifo(cfqq)) == NULL)
+                       crq = cfqq->next_crq;
+
+               /*
+                * finally, insert request into driver dispatch list
+                */
+               cfq_dispatch_insert(cfqd->queue, crq);
+
+               cfqd->dispatch_slice++;
+               dispatched++;
+
+               if (!cfqd->active_cic) {
+                       atomic_inc(&crq->io_context->ioc->refcount);
+                       cfqd->active_cic = crq->io_context;
+               }
+
+               if (RB_EMPTY(&cfqq->sort_list))
+                       break;
+
+       } while (dispatched < max_dispatch);
+
+       /*
+        * if slice end isn't set yet, set it. if at least one request was
+        * sync, use the sync time slice value
+        */
+       if (!cfqq->slice_end)
+               cfq_set_prio_slice(cfqd, cfqq);
+
+       /*
+        * expire an async queue immediately if it has used up its slice. idle
+        * queue always expire after 1 dispatch round.
+        */
+       if ((!cfq_cfqq_sync(cfqq) &&
+           cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
+           cfq_class_idle(cfqq))
+               cfq_slice_expired(cfqd, 0);
+
+       return dispatched;
+}
+
+static int
+cfq_dispatch_requests(request_queue_t *q, int force)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_queue *cfqq;
+
+       if (!cfqd->busy_queues)
+               return 0;
+
+       cfqq = cfq_select_queue(cfqd, force);
+       if (cfqq) {
+               int max_dispatch;
+
+               /*
+                * if idle window is disabled, allow queue buildup
+                */
+               if (!cfq_cfqq_idle_window(cfqq) &&
+                   cfqd->rq_in_driver >= cfqd->cfq_max_depth)
+                       return 0;
+
+               cfq_clear_cfqq_must_dispatch(cfqq);
+               cfq_clear_cfqq_wait_request(cfqq);
+               del_timer(&cfqd->idle_slice_timer);
+
+               if (!force) {
+                       max_dispatch = cfqd->cfq_quantum;
+                       if (cfq_class_idle(cfqq))
+                               max_dispatch = 1;
+               } else
+                       max_dispatch = INT_MAX;
+
+               return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+       }
+
+       return 0;
+}
+
+/*
+ * task holds one reference to the queue, dropped when task exits. each crq
+ * in-flight on this queue also holds a reference, dropped when crq is freed.
+ *
+ * queue lock must be held here.
+ */
+static void cfq_put_queue(struct cfq_queue *cfqq)
+{
+       struct cfq_data *cfqd = cfqq->cfqd;
+
+       BUG_ON(atomic_read(&cfqq->ref) <= 0);
+
+       if (!atomic_dec_and_test(&cfqq->ref))
+               return;
+
+       BUG_ON(rb_first(&cfqq->sort_list));
+       BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
+       BUG_ON(cfq_cfqq_on_rr(cfqq));
+
+       if (unlikely(cfqd->active_queue == cfqq)) {
+               __cfq_slice_expired(cfqd, cfqq, 0);
+               cfq_schedule_dispatch(cfqd);
+       }
+
+       cfq_put_cfqd(cfqq->cfqd);
+
+       /*
+        * it's on the empty list and still hashed
+        */
+       list_del(&cfqq->cfq_list);
+       hlist_del(&cfqq->cfq_hash);
+       kmem_cache_free(cfq_pool, cfqq);
+}
+
+static inline struct cfq_queue *
+__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
+                   const int hashval)
+{
+       struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
+       struct hlist_node *entry, *next;
+
+       hlist_for_each_safe(entry, next, hash_list) {
+               struct cfq_queue *__cfqq = list_entry_qhash(entry);
+               const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
+
+               if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
+                       return __cfqq;
+       }
+
+       return NULL;
+}
+
+static struct cfq_queue *
+cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
+{
+       return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
+}
+
+static void cfq_free_io_context(struct cfq_io_context *cic)
+{
+       struct cfq_io_context *__cic;
+       struct list_head *entry, *next;
+
+       list_for_each_safe(entry, next, &cic->list) {
+               __cic = list_entry(entry, struct cfq_io_context, list);
+               kmem_cache_free(cfq_ioc_pool, __cic);
+       }
+
+       kmem_cache_free(cfq_ioc_pool, cic);
+}
+
+/*
+ * Called with interrupts disabled
+ */
+static void cfq_exit_single_io_context(struct cfq_io_context *cic)
+{
+       struct cfq_data *cfqd = cic->cfqq->cfqd;
+       request_queue_t *q = cfqd->queue;
+
+       WARN_ON(!irqs_disabled());
+
+       spin_lock(q->queue_lock);
+
+       if (unlikely(cic->cfqq == cfqd->active_queue)) {
+               __cfq_slice_expired(cfqd, cic->cfqq, 0);
+               cfq_schedule_dispatch(cfqd);
+       }
+
+       cfq_put_queue(cic->cfqq);
+       cic->cfqq = NULL;
+       spin_unlock(q->queue_lock);
+}
+
+/*
+ * Another task may update the task cic list, if it is doing a queue lookup
+ * on its behalf. cfq_cic_lock excludes such concurrent updates
+ */
+static void cfq_exit_io_context(struct cfq_io_context *cic)
+{
+       struct cfq_io_context *__cic;
+       struct list_head *entry;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       /*
+        * put the reference this task is holding to the various queues
+        */
+       list_for_each(entry, &cic->list) {
+               __cic = list_entry(entry, struct cfq_io_context, list);
+               cfq_exit_single_io_context(__cic);
+       }
+
+       cfq_exit_single_io_context(cic);
+       local_irq_restore(flags);
+}
+
+static struct cfq_io_context *
+cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+{
+       struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
+
+       if (cic) {
+               INIT_LIST_HEAD(&cic->list);
+               cic->cfqq = NULL;
+               cic->key = NULL;
+               cic->last_end_request = jiffies;
+               cic->ttime_total = 0;
+               cic->ttime_samples = 0;
+               cic->ttime_mean = 0;
+               cic->dtor = cfq_free_io_context;
+               cic->exit = cfq_exit_io_context;
+       }
+
+       return cic;
+}
+
+static void cfq_init_prio_data(struct cfq_queue *cfqq)
+{
+       struct task_struct *tsk = current;
+       int ioprio_class;
+
+       if (!cfq_cfqq_prio_changed(cfqq))
+               return;
+
+       ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
+       switch (ioprio_class) {
+               default:
+                       printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+               case IOPRIO_CLASS_NONE:
+                       /*
+                        * no prio set, place us in the middle of the BE classes
+                        */
+                       cfqq->ioprio = task_nice_ioprio(tsk);
+                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
+                       break;
+               case IOPRIO_CLASS_RT:
+                       cfqq->ioprio = task_ioprio(tsk);
+                       cfqq->ioprio_class = IOPRIO_CLASS_RT;
+                       break;
+               case IOPRIO_CLASS_BE:
+                       cfqq->ioprio = task_ioprio(tsk);
+                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
+                       break;
+               case IOPRIO_CLASS_IDLE:
+                       cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
+                       cfqq->ioprio = 7;
+                       cfq_clear_cfqq_idle_window(cfqq);
+                       break;
+       }
+
+       /*
+        * keep track of original prio settings in case we have to temporarily
+        * elevate the priority of this queue
+        */
+       cfqq->org_ioprio = cfqq->ioprio;
+       cfqq->org_ioprio_class = cfqq->ioprio_class;
+
+       if (cfq_cfqq_on_rr(cfqq))
+               cfq_resort_rr_list(cfqq, 0);
+
+       cfq_clear_cfqq_prio_changed(cfqq);
+}
+
+static inline void changed_ioprio(struct cfq_queue *cfqq)
+{
+       if (cfqq) {
+               struct cfq_data *cfqd = cfqq->cfqd;
+
+               spin_lock(cfqd->queue->queue_lock);
+               cfq_mark_cfqq_prio_changed(cfqq);
+               cfq_init_prio_data(cfqq);
+               spin_unlock(cfqd->queue->queue_lock);
+       }
+}
+
+/*
+ * callback from sys_ioprio_set, irqs are disabled
+ */
+static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
+{
+       struct cfq_io_context *cic = ioc->cic;
+
+       changed_ioprio(cic->cfqq);
+
+       list_for_each_entry(cic, &cic->list, list)
+               changed_ioprio(cic->cfqq);
+
+       return 0;
+}
+
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
+             gfp_t gfp_mask)
+{
+       const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
+       struct cfq_queue *cfqq, *new_cfqq = NULL;
+
+retry:
+       cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
+
+       if (!cfqq) {
+               if (new_cfqq) {
+                       cfqq = new_cfqq;
+                       new_cfqq = NULL;
+               } else if (gfp_mask & __GFP_WAIT) {
+                       spin_unlock_irq(cfqd->queue->queue_lock);
+                       new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+                       spin_lock_irq(cfqd->queue->queue_lock);
+                       goto retry;
+               } else {
+                       cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+                       if (!cfqq)
+                               goto out;
+               }
+
+               memset(cfqq, 0, sizeof(*cfqq));
+
+               INIT_HLIST_NODE(&cfqq->cfq_hash);
+               INIT_LIST_HEAD(&cfqq->cfq_list);
+               RB_CLEAR_ROOT(&cfqq->sort_list);
+               INIT_LIST_HEAD(&cfqq->fifo);
+
+               cfqq->key = key;
+               hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+               atomic_set(&cfqq->ref, 0);
+               cfqq->cfqd = cfqd;
+               atomic_inc(&cfqd->ref);
+               cfqq->service_last = 0;
+               /*
+                * set ->slice_left to allow preemption for a new process
+                */
+               cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
+               cfq_mark_cfqq_idle_window(cfqq);
+               cfq_mark_cfqq_prio_changed(cfqq);
+               cfq_init_prio_data(cfqq);
+       }
+
+       if (new_cfqq)
+               kmem_cache_free(cfq_pool, new_cfqq);
+
+       atomic_inc(&cfqq->ref);
+out:
+       WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
+       return cfqq;
+}
+
+/*
+ * Setup general io context and cfq io context. There can be several cfq
+ * io contexts per general io context, if this process is doing io to more
+ * than one device managed by cfq. Note that caller is holding a reference to
+ * cfqq, so we don't need to worry about it disappearing
+ */
+static struct cfq_io_context *
+cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
+{
+       struct io_context *ioc = NULL;
+       struct cfq_io_context *cic;
+
+       might_sleep_if(gfp_mask & __GFP_WAIT);
+
+       ioc = get_io_context(gfp_mask);
+       if (!ioc)
+               return NULL;
+
+       if ((cic = ioc->cic) == NULL) {
+               cic = cfq_alloc_io_context(cfqd, gfp_mask);
+
+               if (cic == NULL)
+                       goto err;
+
+               /*
+                * manually increment generic io_context usage count, it
+                * cannot go away since we are already holding one ref to it
+                */
+               ioc->cic = cic;
+               ioc->set_ioprio = cfq_ioc_set_ioprio;
+               cic->ioc = ioc;
+               cic->key = cfqd;
+               atomic_inc(&cfqd->ref);
+       } else {
+               struct cfq_io_context *__cic;
+
+               /*
+                * the first cic on the list is actually the head itself
+                */
+               if (cic->key == cfqd)
+                       goto out;
+
+               /*
+                * cic exists, check if we already are there. linear search
+                * should be ok here, the list will usually not be more than
+                * 1 or a few entries long
+                */
+               list_for_each_entry(__cic, &cic->list, list) {
+                       /*
+                        * this process is already holding a reference to
+                        * this queue, so no need to get one more
+                        */
+                       if (__cic->key == cfqd) {
+                               cic = __cic;
+                               goto out;
+                       }
+               }
+
+               /*
+                * nope, process doesn't have a cic assoicated with this
+                * cfqq yet. get a new one and add to list
+                */
+               __cic = cfq_alloc_io_context(cfqd, gfp_mask);
+               if (__cic == NULL)
+                       goto err;
+
+               __cic->ioc = ioc;
+               __cic->key = cfqd;
+               atomic_inc(&cfqd->ref);
+               list_add(&__cic->list, &cic->list);
+               cic = __cic;
+       }
+
+out:
+       return cic;
+err:
+       put_io_context(ioc);
+       return NULL;
+}
+
+static void
+cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
+{
+       unsigned long elapsed, ttime;
+
+       /*
+        * if this context already has stuff queued, thinktime is from
+        * last queue not last end
+        */
+#if 0
+       if (time_after(cic->last_end_request, cic->last_queue))
+               elapsed = jiffies - cic->last_end_request;
+       else
+               elapsed = jiffies - cic->last_queue;
+#else
+               elapsed = jiffies - cic->last_end_request;
+#endif
+
+       ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
+
+       cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
+       cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
+       cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
+}
+
+#define sample_valid(samples)  ((samples) > 80)
+
+/*
+ * Disable idle window if the process thinks too long or seeks so much that
+ * it doesn't matter
+ */
+static void
+cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                      struct cfq_io_context *cic)
+{
+       int enable_idle = cfq_cfqq_idle_window(cfqq);
+
+       if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+               enable_idle = 0;
+       else if (sample_valid(cic->ttime_samples)) {
+               if (cic->ttime_mean > cfqd->cfq_slice_idle)
+                       enable_idle = 0;
+               else
+                       enable_idle = 1;
+       }
+
+       if (enable_idle)
+               cfq_mark_cfqq_idle_window(cfqq);
+       else
+               cfq_clear_cfqq_idle_window(cfqq);
+}
+
+
+/*
+ * Check if new_cfqq should preempt the currently active queue. Return 0 for
+ * no or if we aren't sure, a 1 will cause a preempt.
+ */
+static int
+cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
+                  struct cfq_rq *crq)
+{
+       struct cfq_queue *cfqq = cfqd->active_queue;
+
+       if (cfq_class_idle(new_cfqq))
+               return 0;
+
+       if (!cfqq)
+               return 1;
+
+       if (cfq_class_idle(cfqq))
+               return 1;
+       if (!cfq_cfqq_wait_request(new_cfqq))
+               return 0;
+       /*
+        * if it doesn't have slice left, forget it
+        */
+       if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
+               return 0;
+       if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
+               return 1;
+
+       return 0;
+}
+
+/*
+ * cfqq preempts the active queue. if we allowed preempt with no slice left,
+ * let it have half of its nominal slice.
+ */
+static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct cfq_queue *__cfqq, *next;
+
+       list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
+               cfq_resort_rr_list(__cfqq, 1);
+
+       if (!cfqq->slice_left)
+               cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
+
+       cfqq->slice_end = cfqq->slice_left + jiffies;
+       __cfq_slice_expired(cfqd, cfqq, 1);
+       __cfq_set_active_queue(cfqd, cfqq);
+}
+
+/*
+ * should really be a ll_rw_blk.c helper
+ */
+static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       request_queue_t *q = cfqd->queue;
+
+       if (!blk_queue_plugged(q))
+               q->request_fn(q);
+       else
+               __generic_unplug_device(q);
+}
+
+/*
+ * Called when a new fs request (crq) is added (to cfqq). Check if there's
+ * something we should do about it
+ */
+static void
+cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                struct cfq_rq *crq)
+{
+       struct cfq_io_context *cic;
+
+       cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+
+       /*
+        * we never wait for an async request and we don't allow preemption
+        * of an async request. so just return early
+        */
+       if (!cfq_crq_is_sync(crq))
+               return;
+
+       cic = crq->io_context;
+
+       cfq_update_io_thinktime(cfqd, cic);
+       cfq_update_idle_window(cfqd, cfqq, cic);
+
+       cic->last_queue = jiffies;
+
+       if (cfqq == cfqd->active_queue) {
+               /*
+                * if we are waiting for a request for this queue, let it rip
+                * immediately and flag that we must not expire this queue
+                * just now
+                */
+               if (cfq_cfqq_wait_request(cfqq)) {
+                       cfq_mark_cfqq_must_dispatch(cfqq);
+                       del_timer(&cfqd->idle_slice_timer);
+                       cfq_start_queueing(cfqd, cfqq);
+               }
+       } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
+               /*
+                * not the active queue - expire current slice if it is
+                * idle and has expired it's mean thinktime or this new queue
+                * has some old slice time left and is of higher priority
+                */
+               cfq_preempt_queue(cfqd, cfqq);
+               cfq_mark_cfqq_must_dispatch(cfqq);
+               cfq_start_queueing(cfqd, cfqq);
+       }
+}
+
+static void cfq_insert_request(request_queue_t *q, struct request *rq)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_queue *cfqq = crq->cfq_queue;
+
+       cfq_init_prio_data(cfqq);
+
+       cfq_add_crq_rb(crq);
+
+       list_add_tail(&rq->queuelist, &cfqq->fifo);
+
+       if (rq_mergeable(rq))
+               cfq_add_crq_hash(cfqd, crq);
+
+       cfq_crq_enqueued(cfqd, cfqq, crq);
+}
+
+static void cfq_completed_request(request_queue_t *q, struct request *rq)
+{
+       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_queue *cfqq = crq->cfq_queue;
+       struct cfq_data *cfqd = cfqq->cfqd;
+       const int sync = cfq_crq_is_sync(crq);
+       unsigned long now;
+
+       now = jiffies;
+
+       WARN_ON(!cfqd->rq_in_driver);
+       WARN_ON(!cfqq->on_dispatch[sync]);
+       cfqd->rq_in_driver--;
+       cfqq->on_dispatch[sync]--;
+
+       if (!cfq_class_idle(cfqq))
+               cfqd->last_end_request = now;
+
+       if (!cfq_cfqq_dispatched(cfqq)) {
+               if (cfq_cfqq_on_rr(cfqq)) {
+                       cfqq->service_last = now;
+                       cfq_resort_rr_list(cfqq, 0);
+               }
+               if (cfq_cfqq_expired(cfqq)) {
+                       __cfq_slice_expired(cfqd, cfqq, 0);
+                       cfq_schedule_dispatch(cfqd);
+               }
+       }
+
+       if (cfq_crq_is_sync(crq))
+               crq->io_context->last_end_request = now;
+}
+
+static struct request *
+cfq_former_request(request_queue_t *q, struct request *rq)
+{
+       struct cfq_rq *crq = RQ_DATA(rq);
+       struct rb_node *rbprev = rb_prev(&crq->rb_node);
+
+       if (rbprev)
+               return rb_entry_crq(rbprev)->request;
+
+       return NULL;
+}
+
+static struct request *
+cfq_latter_request(request_queue_t *q, struct request *rq)
+{
+       struct cfq_rq *crq = RQ_DATA(rq);
+       struct rb_node *rbnext = rb_next(&crq->rb_node);
+
+       if (rbnext)
+               return rb_entry_crq(rbnext)->request;
+
+       return NULL;
+}
+
+/*
+ * we temporarily boost lower priority queues if they are holding fs exclusive
+ * resources. they are boosted to normal prio (CLASS_BE/4)
+ */
+static void cfq_prio_boost(struct cfq_queue *cfqq)
+{
+       const int ioprio_class = cfqq->ioprio_class;
+       const int ioprio = cfqq->ioprio;
+
+       if (has_fs_excl()) {
+               /*
+                * boost idle prio on transactions that would lock out other
+                * users of the filesystem
+                */
+               if (cfq_class_idle(cfqq))
+                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
+               if (cfqq->ioprio > IOPRIO_NORM)
+                       cfqq->ioprio = IOPRIO_NORM;
+       } else {
+               /*
+                * check if we need to unboost the queue
+                */
+               if (cfqq->ioprio_class != cfqq->org_ioprio_class)
+                       cfqq->ioprio_class = cfqq->org_ioprio_class;
+               if (cfqq->ioprio != cfqq->org_ioprio)
+                       cfqq->ioprio = cfqq->org_ioprio;
+       }
+
+       /*
+        * refile between round-robin lists if we moved the priority class
+        */
+       if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
+           cfq_cfqq_on_rr(cfqq))
+               cfq_resort_rr_list(cfqq, 0);
+}
+
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+{
+       if (rw == READ || process_sync(task))
+               return task->pid;
+
+       return CFQ_KEY_ASYNC;
+}
+
+static inline int
+__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+               struct task_struct *task, int rw)
+{
+#if 1
+       if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
+           !cfq_cfqq_must_alloc_slice(cfqq)) {
+               cfq_mark_cfqq_must_alloc_slice(cfqq);
+               return ELV_MQUEUE_MUST;
+       }
+
+       return ELV_MQUEUE_MAY;
+#else
+       if (!cfqq || task->flags & PF_MEMALLOC)
+               return ELV_MQUEUE_MAY;
+       if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
+               if (cfq_cfqq_wait_request(cfqq))
+                       return ELV_MQUEUE_MUST;
+
+               /*
+                * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
+                * can quickly flood the queue with writes from a single task
+                */
+               if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
+                       cfq_mark_cfqq_must_alloc_slice(cfqq);
+                       return ELV_MQUEUE_MUST;
+               }
+
+               return ELV_MQUEUE_MAY;
+       }
+       if (cfq_class_idle(cfqq))
+               return ELV_MQUEUE_NO;
+       if (cfqq->allocated[rw] >= cfqd->max_queued) {
+               struct io_context *ioc = get_io_context(GFP_ATOMIC);
+               int ret = ELV_MQUEUE_NO;
+
+               if (ioc && ioc->nr_batch_requests)
+                       ret = ELV_MQUEUE_MAY;
+
+               put_io_context(ioc);
+               return ret;
+       }
+
+       return ELV_MQUEUE_MAY;
+#endif
+}
+
+static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct task_struct *tsk = current;
+       struct cfq_queue *cfqq;
+
+       /*
+        * don't force setup of a queue from here, as a call to may_queue
+        * does not necessarily imply that a request actually will be queued.
+        * so just lookup a possibly existing queue, or return 'may queue'
+        * if that fails
+        */
+       cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
+       if (cfqq) {
+               cfq_init_prio_data(cfqq);
+               cfq_prio_boost(cfqq);
+
+               return __cfq_may_queue(cfqd, cfqq, tsk, rw);
+       }
+
+       return ELV_MQUEUE_MAY;
+}
+
+static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct request_list *rl = &q->rq;
+
+       if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
+               smp_mb();
+               if (waitqueue_active(&rl->wait[READ]))
+                       wake_up(&rl->wait[READ]);
+       }
+
+       if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
+               smp_mb();
+               if (waitqueue_active(&rl->wait[WRITE]))
+                       wake_up(&rl->wait[WRITE]);
+       }
+}
+
+/*
+ * queue lock held here
+ */
+static void cfq_put_request(request_queue_t *q, struct request *rq)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_rq *crq = RQ_DATA(rq);
+
+       if (crq) {
+               struct cfq_queue *cfqq = crq->cfq_queue;
+               const int rw = rq_data_dir(rq);
+
+               BUG_ON(!cfqq->allocated[rw]);
+               cfqq->allocated[rw]--;
+
+               put_io_context(crq->io_context->ioc);
+
+               mempool_free(crq, cfqd->crq_pool);
+               rq->elevator_private = NULL;
+
+               cfq_check_waiters(q, cfqq);
+               cfq_put_queue(cfqq);
+       }
+}
+
+/*
+ * Allocate cfq data structures associated with this request.
+ */
+static int
+cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
+               gfp_t gfp_mask)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct task_struct *tsk = current;
+       struct cfq_io_context *cic;
+       const int rw = rq_data_dir(rq);
+       pid_t key = cfq_queue_pid(tsk, rw);
+       struct cfq_queue *cfqq;
+       struct cfq_rq *crq;
+       unsigned long flags;
+
+       might_sleep_if(gfp_mask & __GFP_WAIT);
+
+       cic = cfq_get_io_context(cfqd, key, gfp_mask);
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       if (!cic)
+               goto queue_fail;
+
+       if (!cic->cfqq) {
+               cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);
+               if (!cfqq)
+                       goto queue_fail;
+
+               cic->cfqq = cfqq;
+       } else
+               cfqq = cic->cfqq;
+
+       cfqq->allocated[rw]++;
+       cfq_clear_cfqq_must_alloc(cfqq);
+       cfqd->rq_starved = 0;
+       atomic_inc(&cfqq->ref);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
+       if (crq) {
+               RB_CLEAR(&crq->rb_node);
+               crq->rb_key = 0;
+               crq->request = rq;
+               INIT_HLIST_NODE(&crq->hash);
+               crq->cfq_queue = cfqq;
+               crq->io_context = cic;
+
+               if (rw == READ || process_sync(tsk))
+                       cfq_mark_crq_is_sync(crq);
+               else
+                       cfq_clear_crq_is_sync(crq);
+
+               rq->elevator_private = crq;
+               return 0;
+       }
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       cfqq->allocated[rw]--;
+       if (!(cfqq->allocated[0] + cfqq->allocated[1]))
+               cfq_mark_cfqq_must_alloc(cfqq);
+       cfq_put_queue(cfqq);
+queue_fail:
+       if (cic)
+               put_io_context(cic->ioc);
+       /*
+        * mark us rq allocation starved. we need to kickstart the process
+        * ourselves if there are no pending requests that can do it for us.
+        * that would be an extremely rare OOM situation
+        */
+       cfqd->rq_starved = 1;
+       cfq_schedule_dispatch(cfqd);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+       return 1;
+}
+
+static void cfq_kick_queue(void *data)
+{
+       request_queue_t *q = data;
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       if (cfqd->rq_starved) {
+               struct request_list *rl = &q->rq;
+
+               /*
+                * we aren't guaranteed to get a request after this, but we
+                * have to be opportunistic
+                */
+               smp_mb();
+               if (waitqueue_active(&rl->wait[READ]))
+                       wake_up(&rl->wait[READ]);
+               if (waitqueue_active(&rl->wait[WRITE]))
+                       wake_up(&rl->wait[WRITE]);
+       }
+
+       blk_remove_plug(q);
+       q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Timer running if the active_queue is currently idling inside its time slice
+ */
+static void cfq_idle_slice_timer(unsigned long data)
+{
+       struct cfq_data *cfqd = (struct cfq_data *) data;
+       struct cfq_queue *cfqq;
+       unsigned long flags;
+
+       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+       if ((cfqq = cfqd->active_queue) != NULL) {
+               unsigned long now = jiffies;
+
+               /*
+                * expired
+                */
+               if (time_after(now, cfqq->slice_end))
+                       goto expire;
+
+               /*
+                * only expire and reinvoke request handler, if there are
+                * other queues with pending requests
+                */
+               if (!cfqd->busy_queues) {
+                       cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
+                       add_timer(&cfqd->idle_slice_timer);
+                       goto out_cont;
+               }
+
+               /*
+                * not expired and it has a request pending, let it dispatch
+                */
+               if (!RB_EMPTY(&cfqq->sort_list)) {
+                       cfq_mark_cfqq_must_dispatch(cfqq);
+                       goto out_kick;
+               }
+       }
+expire:
+       cfq_slice_expired(cfqd, 0);
+out_kick:
+       cfq_schedule_dispatch(cfqd);
+out_cont:
+       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+/*
+ * Timer running if an idle class queue is waiting for service
+ */
+static void cfq_idle_class_timer(unsigned long data)
+{
+       struct cfq_data *cfqd = (struct cfq_data *) data;
+       unsigned long flags, end;
+
+       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+       /*
+        * race with a non-idle queue, reset timer
+        */
+       end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+       if (!time_after_eq(jiffies, end)) {
+               cfqd->idle_class_timer.expires = end;
+               add_timer(&cfqd->idle_class_timer);
+       } else
+               cfq_schedule_dispatch(cfqd);
+
+       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
+{
+       del_timer_sync(&cfqd->idle_slice_timer);
+       del_timer_sync(&cfqd->idle_class_timer);
+       blk_sync_queue(cfqd->queue);
+}
+
+static void cfq_put_cfqd(struct cfq_data *cfqd)
+{
+       request_queue_t *q = cfqd->queue;
+
+       if (!atomic_dec_and_test(&cfqd->ref))
+               return;
+
+       cfq_shutdown_timer_wq(cfqd);
+       blk_put_queue(q);
+
+       mempool_destroy(cfqd->crq_pool);
+       kfree(cfqd->crq_hash);
+       kfree(cfqd->cfq_hash);
+       kfree(cfqd);
+}
+
+static void cfq_exit_queue(elevator_t *e)
+{
+       struct cfq_data *cfqd = e->elevator_data;
+
+       cfq_shutdown_timer_wq(cfqd);
+       cfq_put_cfqd(cfqd);
+}
+
+static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+{
+       struct cfq_data *cfqd;
+       int i;
+
+       cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
+       if (!cfqd)
+               return -ENOMEM;
+
+       memset(cfqd, 0, sizeof(*cfqd));
+
+       for (i = 0; i < CFQ_PRIO_LISTS; i++)
+               INIT_LIST_HEAD(&cfqd->rr_list[i]);
+
+       INIT_LIST_HEAD(&cfqd->busy_rr);
+       INIT_LIST_HEAD(&cfqd->cur_rr);
+       INIT_LIST_HEAD(&cfqd->idle_rr);
+       INIT_LIST_HEAD(&cfqd->empty_list);
+
+       cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
+       if (!cfqd->crq_hash)
+               goto out_crqhash;
+
+       cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
+       if (!cfqd->cfq_hash)
+               goto out_cfqhash;
+
+       cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
+       if (!cfqd->crq_pool)
+               goto out_crqpool;
+
+       for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
+               INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
+       for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
+               INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
+
+       e->elevator_data = cfqd;
+
+       cfqd->queue = q;
+       atomic_inc(&q->refcnt);
+
+       cfqd->max_queued = q->nr_requests / 4;
+       q->nr_batching = cfq_queued;
+
+       init_timer(&cfqd->idle_slice_timer);
+       cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
+       cfqd->idle_slice_timer.data = (unsigned long) cfqd;
+
+       init_timer(&cfqd->idle_class_timer);
+       cfqd->idle_class_timer.function = cfq_idle_class_timer;
+       cfqd->idle_class_timer.data = (unsigned long) cfqd;
+
+       INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+
+       atomic_set(&cfqd->ref, 1);
+
+       cfqd->cfq_queued = cfq_queued;
+       cfqd->cfq_quantum = cfq_quantum;
+       cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+       cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
+       cfqd->cfq_back_max = cfq_back_max;
+       cfqd->cfq_back_penalty = cfq_back_penalty;
+       cfqd->cfq_slice[0] = cfq_slice_async;
+       cfqd->cfq_slice[1] = cfq_slice_sync;
+       cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
+       cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_max_depth = cfq_max_depth;
+
+       return 0;
+out_crqpool:
+       kfree(cfqd->cfq_hash);
+out_cfqhash:
+       kfree(cfqd->crq_hash);
+out_crqhash:
+       kfree(cfqd);
+       return -ENOMEM;
+}
+
+static void cfq_slab_kill(void)
+{
+       if (crq_pool)
+               kmem_cache_destroy(crq_pool);
+       if (cfq_pool)
+               kmem_cache_destroy(cfq_pool);
+       if (cfq_ioc_pool)
+               kmem_cache_destroy(cfq_ioc_pool);
+}
+
+static int __init cfq_slab_setup(void)
+{
+       crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
+                                       NULL, NULL);
+       if (!crq_pool)
+               goto fail;
+
+       cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
+                                       NULL, NULL);
+       if (!cfq_pool)
+               goto fail;
+
+       cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
+                       sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
+       if (!cfq_ioc_pool)
+               goto fail;
+
+       return 0;
+fail:
+       cfq_slab_kill();
+       return -ENOMEM;
+}
+
+/*
+ * sysfs parts below -->
+ */
+struct cfq_fs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct cfq_data *, char *);
+       ssize_t (*store)(struct cfq_data *, const char *, size_t);
+};
+
+static ssize_t
+cfq_var_show(unsigned int var, char *page)
+{
+       return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+cfq_var_store(unsigned int *var, const char *page, size_t count)
+{
+       char *p = (char *) page;
+
+       *var = simple_strtoul(p, &p, 10);
+       return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
+static ssize_t __FUNC(struct cfq_data *cfqd, char *page)               \
+{                                                                      \
+       unsigned int __data = __VAR;                                    \
+       if (__CONV)                                                     \
+               __data = jiffies_to_msecs(__data);                      \
+       return cfq_var_show(__data, (page));                            \
+}
+SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
+SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
+SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
+SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
+SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
+SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
+SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
+SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
+SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
+static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)   \
+{                                                                      \
+       unsigned int __data;                                            \
+       int ret = cfq_var_store(&__data, (page), count);                \
+       if (__data < (MIN))                                             \
+               __data = (MIN);                                         \
+       else if (__data > (MAX))                                        \
+               __data = (MAX);                                         \
+       if (__CONV)                                                     \
+               *(__PTR) = msecs_to_jiffies(__data);                    \
+       else                                                            \
+               *(__PTR) = __data;                                      \
+       return ret;                                                     \
+}
+STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
+#undef STORE_FUNCTION
+
+static struct cfq_fs_entry cfq_quantum_entry = {
+       .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_quantum_show,
+       .store = cfq_quantum_store,
+};
+static struct cfq_fs_entry cfq_queued_entry = {
+       .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_queued_show,
+       .store = cfq_queued_store,
+};
+static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
+       .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_fifo_expire_sync_show,
+       .store = cfq_fifo_expire_sync_store,
+};
+static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
+       .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_fifo_expire_async_show,
+       .store = cfq_fifo_expire_async_store,
+};
+static struct cfq_fs_entry cfq_back_max_entry = {
+       .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_back_max_show,
+       .store = cfq_back_max_store,
+};
+static struct cfq_fs_entry cfq_back_penalty_entry = {
+       .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_back_penalty_show,
+       .store = cfq_back_penalty_store,
+};
+static struct cfq_fs_entry cfq_slice_sync_entry = {
+       .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_sync_show,
+       .store = cfq_slice_sync_store,
+};
+static struct cfq_fs_entry cfq_slice_async_entry = {
+       .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_async_show,
+       .store = cfq_slice_async_store,
+};
+static struct cfq_fs_entry cfq_slice_async_rq_entry = {
+       .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_async_rq_show,
+       .store = cfq_slice_async_rq_store,
+};
+static struct cfq_fs_entry cfq_slice_idle_entry = {
+       .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_idle_show,
+       .store = cfq_slice_idle_store,
+};
+static struct cfq_fs_entry cfq_max_depth_entry = {
+       .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_max_depth_show,
+       .store = cfq_max_depth_store,
+};
+
+static struct attribute *default_attrs[] = {
+       &cfq_quantum_entry.attr,
+       &cfq_queued_entry.attr,
+       &cfq_fifo_expire_sync_entry.attr,
+       &cfq_fifo_expire_async_entry.attr,
+       &cfq_back_max_entry.attr,
+       &cfq_back_penalty_entry.attr,
+       &cfq_slice_sync_entry.attr,
+       &cfq_slice_async_entry.attr,
+       &cfq_slice_async_rq_entry.attr,
+       &cfq_slice_idle_entry.attr,
+       &cfq_max_depth_entry.attr,
+       NULL,
+};
+
+#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
+
+static ssize_t
+cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct cfq_fs_entry *entry = to_cfq(attr);
+
+       if (!entry->show)
+               return -EIO;
+
+       return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+cfq_attr_store(struct kobject *kobj, struct attribute *attr,
+              const char *page, size_t length)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct cfq_fs_entry *entry = to_cfq(attr);
+
+       if (!entry->store)
+               return -EIO;
+
+       return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops cfq_sysfs_ops = {
+       .show   = cfq_attr_show,
+       .store  = cfq_attr_store,
+};
+
+static struct kobj_type cfq_ktype = {
+       .sysfs_ops      = &cfq_sysfs_ops,
+       .default_attrs  = default_attrs,
+};
+
+static struct elevator_type iosched_cfq = {
+       .ops = {
+               .elevator_merge_fn =            cfq_merge,
+               .elevator_merged_fn =           cfq_merged_request,
+               .elevator_merge_req_fn =        cfq_merged_requests,
+               .elevator_dispatch_fn =         cfq_dispatch_requests,
+               .elevator_add_req_fn =          cfq_insert_request,
+               .elevator_activate_req_fn =     cfq_activate_request,
+               .elevator_deactivate_req_fn =   cfq_deactivate_request,
+               .elevator_queue_empty_fn =      cfq_queue_empty,
+               .elevator_completed_req_fn =    cfq_completed_request,
+               .elevator_former_req_fn =       cfq_former_request,
+               .elevator_latter_req_fn =       cfq_latter_request,
+               .elevator_set_req_fn =          cfq_set_request,
+               .elevator_put_req_fn =          cfq_put_request,
+               .elevator_may_queue_fn =        cfq_may_queue,
+               .elevator_init_fn =             cfq_init_queue,
+               .elevator_exit_fn =             cfq_exit_queue,
+       },
+       .elevator_ktype =       &cfq_ktype,
+       .elevator_name =        "cfq",
+       .elevator_owner =       THIS_MODULE,
+};
+
+static int __init cfq_init(void)
+{
+       int ret;
+
+       /*
+        * could be 0 on HZ < 1000 setups
+        */
+       if (!cfq_slice_async)
+               cfq_slice_async = 1;
+       if (!cfq_slice_idle)
+               cfq_slice_idle = 1;
+
+       if (cfq_slab_setup())
+               return -ENOMEM;
+
+       ret = elv_register(&iosched_cfq);
+       if (ret)
+               cfq_slab_kill();
+
+       return ret;
+}
+
+static void __exit cfq_exit(void)
+{
+       elv_unregister(&iosched_cfq);
+       cfq_slab_kill();
+}
+
+module_init(cfq_init);
+module_exit(cfq_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
new file mode 100644 (file)
index 0000000..7929471
--- /dev/null
@@ -0,0 +1,878 @@
+/*
+ *  linux/drivers/block/deadline-iosched.c
+ *
+ *  Deadline i/o scheduler.
+ *
+ *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/rbtree.h>
+
+/*
+ * See Documentation/block/deadline-iosched.txt
+ */
+static int read_expire = HZ / 2;  /* max time before a read is submitted. */
+static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+static int writes_starved = 2;    /* max times reads can starve a write */
+static int fifo_batch = 16;       /* # of sequential requests treated as one
+                                    by the above parameters. For throughput. */
+
+static const int deadline_hash_shift = 5;
+#define DL_HASH_BLOCK(sec)     ((sec) >> 3)
+#define DL_HASH_FN(sec)                (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
+#define DL_HASH_ENTRIES                (1 << deadline_hash_shift)
+#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr)   list_entry((ptr), struct deadline_rq, hash)
+#define ON_HASH(drq)           (drq)->on_hash
+
+struct deadline_data {
+       /*
+        * run time data
+        */
+
+       /*
+        * requests (deadline_rq s) are present on both sort_list and fifo_list
+        */
+       struct rb_root sort_list[2];    
+       struct list_head fifo_list[2];
+       
+       /*
+        * next in sort order. read, write or both are NULL
+        */
+       struct deadline_rq *next_drq[2];
+       struct list_head *hash;         /* request hash */
+       unsigned int batching;          /* number of sequential requests made */
+       sector_t last_sector;           /* head position */
+       unsigned int starved;           /* times reads have starved writes */
+
+       /*
+        * settings that change how the i/o scheduler behaves
+        */
+       int fifo_expire[2];
+       int fifo_batch;
+       int writes_starved;
+       int front_merges;
+
+       mempool_t *drq_pool;
+};
+
+/*
+ * pre-request data.
+ */
+struct deadline_rq {
+       /*
+        * rbtree index, key is the starting offset
+        */
+       struct rb_node rb_node;
+       sector_t rb_key;
+
+       struct request *request;
+
+       /*
+        * request hash, key is the ending offset (for back merge lookup)
+        */
+       struct list_head hash;
+       char on_hash;
+
+       /*
+        * expire fifo
+        */
+       struct list_head fifo;
+       unsigned long expires;
+};
+
+static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
+
+static kmem_cache_t *drq_pool;
+
+#define RQ_DATA(rq)    ((struct deadline_rq *) (rq)->elevator_private)
+
+/*
+ * the back merge hash support functions
+ */
+static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
+{
+       drq->on_hash = 0;
+       list_del_init(&drq->hash);
+}
+
+static inline void deadline_del_drq_hash(struct deadline_rq *drq)
+{
+       if (ON_HASH(drq))
+               __deadline_del_drq_hash(drq);
+}
+
+static inline void
+deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
+{
+       struct request *rq = drq->request;
+
+       BUG_ON(ON_HASH(drq));
+
+       drq->on_hash = 1;
+       list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
+}
+
+/*
+ * move hot entry to front of chain
+ */
+static inline void
+deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
+{
+       struct request *rq = drq->request;
+       struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
+
+       if (ON_HASH(drq) && drq->hash.prev != head) {
+               list_del(&drq->hash);
+               list_add(&drq->hash, head);
+       }
+}
+
+static struct request *
+deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
+{
+       struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
+       struct list_head *entry, *next = hash_list->next;
+
+       while ((entry = next) != hash_list) {
+               struct deadline_rq *drq = list_entry_hash(entry);
+               struct request *__rq = drq->request;
+
+               next = entry->next;
+               
+               BUG_ON(!ON_HASH(drq));
+
+               if (!rq_mergeable(__rq)) {
+                       __deadline_del_drq_hash(drq);
+                       continue;
+               }
+
+               if (rq_hash_key(__rq) == offset)
+                       return __rq;
+       }
+
+       return NULL;
+}
+
+/*
+ * rb tree support functions
+ */
+#define RB_NONE                (2)
+#define RB_EMPTY(root) ((root)->rb_node == NULL)
+#define ON_RB(node)    ((node)->rb_color != RB_NONE)
+#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
+#define rb_entry_drq(node)     rb_entry((node), struct deadline_rq, rb_node)
+#define DRQ_RB_ROOT(dd, drq)   (&(dd)->sort_list[rq_data_dir((drq)->request)])
+#define rq_rb_key(rq)          (rq)->sector
+
+static struct deadline_rq *
+__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+{
+       struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
+       struct rb_node *parent = NULL;
+       struct deadline_rq *__drq;
+
+       while (*p) {
+               parent = *p;
+               __drq = rb_entry_drq(parent);
+
+               if (drq->rb_key < __drq->rb_key)
+                       p = &(*p)->rb_left;
+               else if (drq->rb_key > __drq->rb_key)
+                       p = &(*p)->rb_right;
+               else
+                       return __drq;
+       }
+
+       rb_link_node(&drq->rb_node, parent, p);
+       return NULL;
+}
+
+static void
+deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+{
+       struct deadline_rq *__alias;
+
+       drq->rb_key = rq_rb_key(drq->request);
+
+retry:
+       __alias = __deadline_add_drq_rb(dd, drq);
+       if (!__alias) {
+               rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
+               return;
+       }
+
+       deadline_move_request(dd, __alias);
+       goto retry;
+}
+
+static inline void
+deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+{
+       const int data_dir = rq_data_dir(drq->request);
+
+       if (dd->next_drq[data_dir] == drq) {
+               struct rb_node *rbnext = rb_next(&drq->rb_node);
+
+               dd->next_drq[data_dir] = NULL;
+               if (rbnext)
+                       dd->next_drq[data_dir] = rb_entry_drq(rbnext);
+       }
+
+       BUG_ON(!ON_RB(&drq->rb_node));
+       rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
+       RB_CLEAR(&drq->rb_node);
+}
+
+static struct request *
+deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
+{
+       struct rb_node *n = dd->sort_list[data_dir].rb_node;
+       struct deadline_rq *drq;
+
+       while (n) {
+               drq = rb_entry_drq(n);
+
+               if (sector < drq->rb_key)
+                       n = n->rb_left;
+               else if (sector > drq->rb_key)
+                       n = n->rb_right;
+               else
+                       return drq->request;
+       }
+
+       return NULL;
+}
+
+/*
+ * deadline_find_first_drq finds the first (lowest sector numbered) request
+ * for the specified data_dir. Used to sweep back to the start of the disk
+ * (1-way elevator) after we process the last (highest sector) request.
+ */
+static struct deadline_rq *
+deadline_find_first_drq(struct deadline_data *dd, int data_dir)
+{
+       struct rb_node *n = dd->sort_list[data_dir].rb_node;
+
+       for (;;) {
+               if (n->rb_left == NULL)
+                       return rb_entry_drq(n);
+               
+               n = n->rb_left;
+       }
+}
+
+/*
+ * add drq to rbtree and fifo
+ */
+static void
+deadline_add_request(struct request_queue *q, struct request *rq)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct deadline_rq *drq = RQ_DATA(rq);
+
+       const int data_dir = rq_data_dir(drq->request);
+
+       deadline_add_drq_rb(dd, drq);
+       /*
+        * set expire time (only used for reads) and add to fifo list
+        */
+       drq->expires = jiffies + dd->fifo_expire[data_dir];
+       list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
+
+       if (rq_mergeable(rq))
+               deadline_add_drq_hash(dd, drq);
+}
+
+/*
+ * remove rq from rbtree, fifo, and hash
+ */
+static void deadline_remove_request(request_queue_t *q, struct request *rq)
+{
+       struct deadline_rq *drq = RQ_DATA(rq);
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       list_del_init(&drq->fifo);
+       deadline_del_drq_rb(dd, drq);
+       deadline_del_drq_hash(drq);
+}
+
+static int
+deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct request *__rq;
+       int ret;
+
+       /*
+        * see if the merge hash can satisfy a back merge
+        */
+       __rq = deadline_find_drq_hash(dd, bio->bi_sector);
+       if (__rq) {
+               BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
+
+               if (elv_rq_merge_ok(__rq, bio)) {
+                       ret = ELEVATOR_BACK_MERGE;
+                       goto out;
+               }
+       }
+
+       /*
+        * check for front merge
+        */
+       if (dd->front_merges) {
+               sector_t rb_key = bio->bi_sector + bio_sectors(bio);
+
+               __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
+               if (__rq) {
+                       BUG_ON(rb_key != rq_rb_key(__rq));
+
+                       if (elv_rq_merge_ok(__rq, bio)) {
+                               ret = ELEVATOR_FRONT_MERGE;
+                               goto out;
+                       }
+               }
+       }
+
+       return ELEVATOR_NO_MERGE;
+out:
+       if (ret)
+               deadline_hot_drq_hash(dd, RQ_DATA(__rq));
+       *req = __rq;
+       return ret;
+}
+
+static void deadline_merged_request(request_queue_t *q, struct request *req)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct deadline_rq *drq = RQ_DATA(req);
+
+       /*
+        * hash always needs to be repositioned, key is end sector
+        */
+       deadline_del_drq_hash(drq);
+       deadline_add_drq_hash(dd, drq);
+
+       /*
+        * if the merge was a front merge, we need to reposition request
+        */
+       if (rq_rb_key(req) != drq->rb_key) {
+               deadline_del_drq_rb(dd, drq);
+               deadline_add_drq_rb(dd, drq);
+       }
+}
+
+static void
+deadline_merged_requests(request_queue_t *q, struct request *req,
+                        struct request *next)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct deadline_rq *drq = RQ_DATA(req);
+       struct deadline_rq *dnext = RQ_DATA(next);
+
+       BUG_ON(!drq);
+       BUG_ON(!dnext);
+
+       /*
+        * reposition drq (this is the merged request) in hash, and in rbtree
+        * in case of a front merge
+        */
+       deadline_del_drq_hash(drq);
+       deadline_add_drq_hash(dd, drq);
+
+       if (rq_rb_key(req) != drq->rb_key) {
+               deadline_del_drq_rb(dd, drq);
+               deadline_add_drq_rb(dd, drq);
+       }
+
+       /*
+        * if dnext expires before drq, assign its expire time to drq
+        * and move into dnext position (dnext will be deleted) in fifo
+        */
+       if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
+               if (time_before(dnext->expires, drq->expires)) {
+                       list_move(&drq->fifo, &dnext->fifo);
+                       drq->expires = dnext->expires;
+               }
+       }
+
+       /*
+        * kill knowledge of next, this one is a goner
+        */
+       deadline_remove_request(q, next);
+}
+
+/*
+ * move request from sort list to dispatch queue.
+ */
+static inline void
+deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
+{
+       request_queue_t *q = drq->request->q;
+
+       deadline_remove_request(q, drq->request);
+       elv_dispatch_add_tail(q, drq->request);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
+{
+       const int data_dir = rq_data_dir(drq->request);
+       struct rb_node *rbnext = rb_next(&drq->rb_node);
+
+       dd->next_drq[READ] = NULL;
+       dd->next_drq[WRITE] = NULL;
+
+       if (rbnext)
+               dd->next_drq[data_dir] = rb_entry_drq(rbnext);
+       
+       dd->last_sector = drq->request->sector + drq->request->nr_sectors;
+
+       /*
+        * take it off the sort and fifo list, move
+        * to dispatch queue
+        */
+       deadline_move_to_dispatch(dd, drq);
+}
+
+#define list_entry_fifo(ptr)   list_entry((ptr), struct deadline_rq, fifo)
+
+/*
+ * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
+ * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
+ */
+static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
+{
+       struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
+
+       /*
+        * drq is expired!
+        */
+       if (time_after(jiffies, drq->expires))
+               return 1;
+
+       return 0;
+}
+
+/*
+ * deadline_dispatch_requests selects the best request according to
+ * read/write expire, fifo_batch, etc
+ */
+static int deadline_dispatch_requests(request_queue_t *q, int force)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       const int reads = !list_empty(&dd->fifo_list[READ]);
+       const int writes = !list_empty(&dd->fifo_list[WRITE]);
+       struct deadline_rq *drq;
+       int data_dir;
+
+       /*
+        * batches are currently reads XOR writes
+        */
+       if (dd->next_drq[WRITE])
+               drq = dd->next_drq[WRITE];
+       else
+               drq = dd->next_drq[READ];
+
+       if (drq) {
+               /* we have a "next request" */
+               
+               if (dd->last_sector != drq->request->sector)
+                       /* end the batch on a non sequential request */
+                       dd->batching += dd->fifo_batch;
+               
+               if (dd->batching < dd->fifo_batch)
+                       /* we are still entitled to batch */
+                       goto dispatch_request;
+       }
+
+       /*
+        * at this point we are not running a batch. select the appropriate
+        * data direction (read / write)
+        */
+
+       if (reads) {
+               BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
+
+               if (writes && (dd->starved++ >= dd->writes_starved))
+                       goto dispatch_writes;
+
+               data_dir = READ;
+
+               goto dispatch_find_request;
+       }
+
+       /*
+        * there are either no reads or writes have been starved
+        */
+
+       if (writes) {
+dispatch_writes:
+               BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
+
+               dd->starved = 0;
+
+               data_dir = WRITE;
+
+               goto dispatch_find_request;
+       }
+
+       return 0;
+
+dispatch_find_request:
+       /*
+        * we are not running a batch, find best request for selected data_dir
+        */
+       if (deadline_check_fifo(dd, data_dir)) {
+               /* An expired request exists - satisfy it */
+               dd->batching = 0;
+               drq = list_entry_fifo(dd->fifo_list[data_dir].next);
+               
+       } else if (dd->next_drq[data_dir]) {
+               /*
+                * The last req was the same dir and we have a next request in
+                * sort order. No expired requests so continue on from here.
+                */
+               drq = dd->next_drq[data_dir];
+       } else {
+               /*
+                * The last req was the other direction or we have run out of
+                * higher-sectored requests. Go back to the lowest sectored
+                * request (1 way elevator) and start a new batch.
+                */
+               dd->batching = 0;
+               drq = deadline_find_first_drq(dd, data_dir);
+       }
+
+dispatch_request:
+       /*
+        * drq is the selected appropriate request.
+        */
+       dd->batching++;
+       deadline_move_request(dd, drq);
+
+       return 1;
+}
+
+static int deadline_queue_empty(request_queue_t *q)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+
+       return list_empty(&dd->fifo_list[WRITE])
+               && list_empty(&dd->fifo_list[READ]);
+}
+
+static struct request *
+deadline_former_request(request_queue_t *q, struct request *rq)
+{
+       struct deadline_rq *drq = RQ_DATA(rq);
+       struct rb_node *rbprev = rb_prev(&drq->rb_node);
+
+       if (rbprev)
+               return rb_entry_drq(rbprev)->request;
+
+       return NULL;
+}
+
+static struct request *
+deadline_latter_request(request_queue_t *q, struct request *rq)
+{
+       struct deadline_rq *drq = RQ_DATA(rq);
+       struct rb_node *rbnext = rb_next(&drq->rb_node);
+
+       if (rbnext)
+               return rb_entry_drq(rbnext)->request;
+
+       return NULL;
+}
+
+static void deadline_exit_queue(elevator_t *e)
+{
+       struct deadline_data *dd = e->elevator_data;
+
+       BUG_ON(!list_empty(&dd->fifo_list[READ]));
+       BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
+
+       mempool_destroy(dd->drq_pool);
+       kfree(dd->hash);
+       kfree(dd);
+}
+
+/*
+ * initialize elevator private data (deadline_data), and alloc a drq for
+ * each request on the free lists
+ */
+static int deadline_init_queue(request_queue_t *q, elevator_t *e)
+{
+       struct deadline_data *dd;
+       int i;
+
+       if (!drq_pool)
+               return -ENOMEM;
+
+       dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+       if (!dd)
+               return -ENOMEM;
+       memset(dd, 0, sizeof(*dd));
+
+       dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
+                               GFP_KERNEL, q->node);
+       if (!dd->hash) {
+               kfree(dd);
+               return -ENOMEM;
+       }
+
+       dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+                                       mempool_free_slab, drq_pool, q->node);
+       if (!dd->drq_pool) {
+               kfree(dd->hash);
+               kfree(dd);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < DL_HASH_ENTRIES; i++)
+               INIT_LIST_HEAD(&dd->hash[i]);
+
+       INIT_LIST_HEAD(&dd->fifo_list[READ]);
+       INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
+       dd->sort_list[READ] = RB_ROOT;
+       dd->sort_list[WRITE] = RB_ROOT;
+       dd->fifo_expire[READ] = read_expire;
+       dd->fifo_expire[WRITE] = write_expire;
+       dd->writes_starved = writes_starved;
+       dd->front_merges = 1;
+       dd->fifo_batch = fifo_batch;
+       e->elevator_data = dd;
+       return 0;
+}
+
+static void deadline_put_request(request_queue_t *q, struct request *rq)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct deadline_rq *drq = RQ_DATA(rq);
+
+       mempool_free(drq, dd->drq_pool);
+       rq->elevator_private = NULL;
+}
+
+static int
+deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
+                    gfp_t gfp_mask)
+{
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct deadline_rq *drq;
+
+       drq = mempool_alloc(dd->drq_pool, gfp_mask);
+       if (drq) {
+               memset(drq, 0, sizeof(*drq));
+               RB_CLEAR(&drq->rb_node);
+               drq->request = rq;
+
+               INIT_LIST_HEAD(&drq->hash);
+               drq->on_hash = 0;
+
+               INIT_LIST_HEAD(&drq->fifo);
+
+               rq->elevator_private = drq;
+               return 0;
+       }
+
+       return 1;
+}
+
+/*
+ * sysfs parts below
+ */
+struct deadline_fs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct deadline_data *, char *);
+       ssize_t (*store)(struct deadline_data *, const char *, size_t);
+};
+
+static ssize_t
+deadline_var_show(int var, char *page)
+{
+       return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+deadline_var_store(int *var, const char *page, size_t count)
+{
+       char *p = (char *) page;
+
+       *var = simple_strtol(p, &p, 10);
+       return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
+static ssize_t __FUNC(struct deadline_data *dd, char *page)            \
+{                                                                      \
+       int __data = __VAR;                                     \
+       if (__CONV)                                                     \
+               __data = jiffies_to_msecs(__data);                      \
+       return deadline_var_show(__data, (page));                       \
+}
+SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
+SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
+SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
+SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
+SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
+static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)        \
+{                                                                      \
+       int __data;                                                     \
+       int ret = deadline_var_store(&__data, (page), count);           \
+       if (__data < (MIN))                                             \
+               __data = (MIN);                                         \
+       else if (__data > (MAX))                                        \
+               __data = (MAX);                                         \
+       if (__CONV)                                                     \
+               *(__PTR) = msecs_to_jiffies(__data);                    \
+       else                                                            \
+               *(__PTR) = __data;                                      \
+       return ret;                                                     \
+}
+STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
+STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
+STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+static struct deadline_fs_entry deadline_readexpire_entry = {
+       .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = deadline_readexpire_show,
+       .store = deadline_readexpire_store,
+};
+static struct deadline_fs_entry deadline_writeexpire_entry = {
+       .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
+       .show = deadline_writeexpire_show,
+       .store = deadline_writeexpire_store,
+};
+static struct deadline_fs_entry deadline_writesstarved_entry = {
+       .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
+       .show = deadline_writesstarved_show,
+       .store = deadline_writesstarved_store,
+};
+static struct deadline_fs_entry deadline_frontmerges_entry = {
+       .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
+       .show = deadline_frontmerges_show,
+       .store = deadline_frontmerges_store,
+};
+static struct deadline_fs_entry deadline_fifobatch_entry = {
+       .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
+       .show = deadline_fifobatch_show,
+       .store = deadline_fifobatch_store,
+};
+
+static struct attribute *default_attrs[] = {
+       &deadline_readexpire_entry.attr,
+       &deadline_writeexpire_entry.attr,
+       &deadline_writesstarved_entry.attr,
+       &deadline_frontmerges_entry.attr,
+       &deadline_fifobatch_entry.attr,
+       NULL,
+};
+
+#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
+
+static ssize_t
+deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct deadline_fs_entry *entry = to_deadline(attr);
+
+       if (!entry->show)
+               return -EIO;
+
+       return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+deadline_attr_store(struct kobject *kobj, struct attribute *attr,
+                   const char *page, size_t length)
+{
+       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct deadline_fs_entry *entry = to_deadline(attr);
+
+       if (!entry->store)
+               return -EIO;
+
+       return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops deadline_sysfs_ops = {
+       .show   = deadline_attr_show,
+       .store  = deadline_attr_store,
+};
+
+static struct kobj_type deadline_ktype = {
+       .sysfs_ops      = &deadline_sysfs_ops,
+       .default_attrs  = default_attrs,
+};
+
+static struct elevator_type iosched_deadline = {
+       .ops = {
+               .elevator_merge_fn =            deadline_merge,
+               .elevator_merged_fn =           deadline_merged_request,
+               .elevator_merge_req_fn =        deadline_merged_requests,
+               .elevator_dispatch_fn =         deadline_dispatch_requests,
+               .elevator_add_req_fn =          deadline_add_request,
+               .elevator_queue_empty_fn =      deadline_queue_empty,
+               .elevator_former_req_fn =       deadline_former_request,
+               .elevator_latter_req_fn =       deadline_latter_request,
+               .elevator_set_req_fn =          deadline_set_request,
+               .elevator_put_req_fn =          deadline_put_request,
+               .elevator_init_fn =             deadline_init_queue,
+               .elevator_exit_fn =             deadline_exit_queue,
+       },
+
+       .elevator_ktype = &deadline_ktype,
+       .elevator_name = "deadline",
+       .elevator_owner = THIS_MODULE,
+};
+
+static int __init deadline_init(void)
+{
+       int ret;
+
+       drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
+                                    0, 0, NULL, NULL);
+
+       if (!drq_pool)
+               return -ENOMEM;
+
+       ret = elv_register(&iosched_deadline);
+       if (ret)
+               kmem_cache_destroy(drq_pool);
+
+       return ret;
+}
+
+static void __exit deadline_exit(void)
+{
+       kmem_cache_destroy(drq_pool);
+       elv_unregister(&iosched_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/block/elevator.c b/block/elevator.c
new file mode 100644 (file)
index 0000000..d4a49a3
--- /dev/null
@@ -0,0 +1,802 @@
+/*
+ *  linux/drivers/block/elevator.c
+ *
+ *  Block device elevator/IO-scheduler.
+ *
+ *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * 30042000 Jens Axboe <axboe@suse.de> :
+ *
+ * Split the elevator a bit so that it is possible to choose a different
+ * one or even write a new "plug in". There are three pieces:
+ * - elevator_fn, inserts a new request in the queue list
+ * - elevator_merge_fn, decides whether a new buffer can be merged with
+ *   an existing request
+ * - elevator_dequeue_fn, called when a request is taken off the active list
+ *
+ * 20082000 Dave Jones <davej@suse.de> :
+ * Removed tests for max-bomb-segments, which was breaking elvtune
+ *  when run without -bN
+ *
+ * Jens:
+ * - Rework again to work with bio instead of buffer_heads
+ * - loose bi_dev comparisons, partition handling is right now
+ * - completely modularize elevator setup and teardown
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+
+#include <asm/uaccess.h>
+
+static DEFINE_SPINLOCK(elv_list_lock);
+static LIST_HEAD(elv_list);
+
+/*
+ * can we safely merge with this request?
+ */
+inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+       if (!rq_mergeable(rq))
+               return 0;
+
+       /*
+        * different data direction or already started, don't merge
+        */
+       if (bio_data_dir(bio) != rq_data_dir(rq))
+               return 0;
+
+       /*
+        * same device and no special stuff set, merge is ok
+        */
+       if (rq->rq_disk == bio->bi_bdev->bd_disk &&
+           !rq->waiting && !rq->special)
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(elv_rq_merge_ok);
+
+inline int elv_try_merge(struct request *__rq, struct bio *bio)
+{
+       int ret = ELEVATOR_NO_MERGE;
+
+       /*
+        * we can merge and sequence is ok, check if it's possible
+        */
+       if (elv_rq_merge_ok(__rq, bio)) {
+               if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
+                       ret = ELEVATOR_BACK_MERGE;
+               else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
+                       ret = ELEVATOR_FRONT_MERGE;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(elv_try_merge);
+
+static struct elevator_type *elevator_find(const char *name)
+{
+       struct elevator_type *e = NULL;
+       struct list_head *entry;
+
+       list_for_each(entry, &elv_list) {
+               struct elevator_type *__e;
+
+               __e = list_entry(entry, struct elevator_type, list);
+
+               if (!strcmp(__e->elevator_name, name)) {
+                       e = __e;
+                       break;
+               }
+       }
+
+       return e;
+}
+
+static void elevator_put(struct elevator_type *e)
+{
+       module_put(e->elevator_owner);
+}
+
+static struct elevator_type *elevator_get(const char *name)
+{
+       struct elevator_type *e;
+
+       spin_lock_irq(&elv_list_lock);
+
+       e = elevator_find(name);
+       if (e && !try_module_get(e->elevator_owner))
+               e = NULL;
+
+       spin_unlock_irq(&elv_list_lock);
+
+       return e;
+}
+
+static int elevator_attach(request_queue_t *q, struct elevator_type *e,
+                          struct elevator_queue *eq)
+{
+       int ret = 0;
+
+       memset(eq, 0, sizeof(*eq));
+       eq->ops = &e->ops;
+       eq->elevator_type = e;
+
+       q->elevator = eq;
+
+       if (eq->ops->elevator_init_fn)
+               ret = eq->ops->elevator_init_fn(q, eq);
+
+       return ret;
+}
+
+static char chosen_elevator[16];
+
+static void elevator_setup_default(void)
+{
+       struct elevator_type *e;
+
+       /*
+        * If default has not been set, use the compiled-in selection.
+        */
+       if (!chosen_elevator[0])
+               strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
+
+       /*
+        * If the given scheduler is not available, fall back to no-op.
+        */
+       if (!(e = elevator_find(chosen_elevator)))
+               strcpy(chosen_elevator, "noop");
+       elevator_put(e);
+}
+
+static int __init elevator_setup(char *str)
+{
+       strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
+       return 0;
+}
+
+__setup("elevator=", elevator_setup);
+
+int elevator_init(request_queue_t *q, char *name)
+{
+       struct elevator_type *e = NULL;
+       struct elevator_queue *eq;
+       int ret = 0;
+
+       INIT_LIST_HEAD(&q->queue_head);
+       q->last_merge = NULL;
+       q->end_sector = 0;
+       q->boundary_rq = NULL;
+
+       elevator_setup_default();
+
+       if (!name)
+               name = chosen_elevator;
+
+       e = elevator_get(name);
+       if (!e)
+               return -EINVAL;
+
+       eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
+       if (!eq) {
+               elevator_put(e->elevator_type);
+               return -ENOMEM;
+       }
+
+       ret = elevator_attach(q, e, eq);
+       if (ret) {
+               kfree(eq);
+               elevator_put(e->elevator_type);
+       }
+
+       return ret;
+}
+
+void elevator_exit(elevator_t *e)
+{
+       if (e->ops->elevator_exit_fn)
+               e->ops->elevator_exit_fn(e);
+
+       elevator_put(e->elevator_type);
+       e->elevator_type = NULL;
+       kfree(e);
+}
+
+/*
+ * Insert rq into dispatch queue of q.  Queue lock must be held on
+ * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
+ * appended to the dispatch queue.  To be used by specific elevators.
+ */
+void elv_dispatch_sort(request_queue_t *q, struct request *rq)
+{
+       sector_t boundary;
+       struct list_head *entry;
+
+       if (q->last_merge == rq)
+               q->last_merge = NULL;
+
+       boundary = q->end_sector;
+
+       list_for_each_prev(entry, &q->queue_head) {
+               struct request *pos = list_entry_rq(entry);
+
+               if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
+                       break;
+               if (rq->sector >= boundary) {
+                       if (pos->sector < boundary)
+                               continue;
+               } else {
+                       if (pos->sector >= boundary)
+                               break;
+               }
+               if (rq->sector >= pos->sector)
+                       break;
+       }
+
+       list_add(&rq->queuelist, entry);
+}
+
+int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+       elevator_t *e = q->elevator;
+       int ret;
+
+       if (q->last_merge) {
+               ret = elv_try_merge(q->last_merge, bio);
+               if (ret != ELEVATOR_NO_MERGE) {
+                       *req = q->last_merge;
+                       return ret;
+               }
+       }
+
+       if (e->ops->elevator_merge_fn)
+               return e->ops->elevator_merge_fn(q, req, bio);
+
+       return ELEVATOR_NO_MERGE;
+}
+
+void elv_merged_request(request_queue_t *q, struct request *rq)
+{
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_merged_fn)
+               e->ops->elevator_merged_fn(q, rq);
+
+       q->last_merge = rq;
+}
+
+void elv_merge_requests(request_queue_t *q, struct request *rq,
+                            struct request *next)
+{
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_merge_req_fn)
+               e->ops->elevator_merge_req_fn(q, rq, next);
+
+       q->last_merge = rq;
+}
+
+void elv_requeue_request(request_queue_t *q, struct request *rq)
+{
+       elevator_t *e = q->elevator;
+
+       /*
+        * it already went through dequeue, we need to decrement the
+        * in_flight count again
+        */
+       if (blk_account_rq(rq)) {
+               q->in_flight--;
+               if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
+                       e->ops->elevator_deactivate_req_fn(q, rq);
+       }
+
+       rq->flags &= ~REQ_STARTED;
+
+       /*
+        * if this is the flush, requeue the original instead and drop the flush
+        */
+       if (rq->flags & REQ_BAR_FLUSH) {
+               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+               rq = rq->end_io_data;
+       }
+
+       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+}
+
+void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+                      int plug)
+{
+       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+               /*
+                * barriers implicitly indicate back insertion
+                */
+               if (where == ELEVATOR_INSERT_SORT)
+                       where = ELEVATOR_INSERT_BACK;
+
+               /*
+                * this request is scheduling boundary, update end_sector
+                */
+               if (blk_fs_request(rq)) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = rq;
+               }
+       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
+               where = ELEVATOR_INSERT_BACK;
+
+       if (plug)
+               blk_plug_device(q);
+
+       rq->q = q;
+
+       switch (where) {
+       case ELEVATOR_INSERT_FRONT:
+               rq->flags |= REQ_SOFTBARRIER;
+
+               list_add(&rq->queuelist, &q->queue_head);
+               break;
+
+       case ELEVATOR_INSERT_BACK:
+               rq->flags |= REQ_SOFTBARRIER;
+
+               while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+                       ;
+               list_add_tail(&rq->queuelist, &q->queue_head);
+               /*
+                * We kick the queue here for the following reasons.
+                * - The elevator might have returned NULL previously
+                *   to delay requests and returned them now.  As the
+                *   queue wasn't empty before this request, ll_rw_blk
+                *   won't run the queue on return, resulting in hang.
+                * - Usually, back inserted requests won't be merged
+                *   with anything.  There's no point in delaying queue
+                *   processing.
+                */
+               blk_remove_plug(q);
+               q->request_fn(q);
+               break;
+
+       case ELEVATOR_INSERT_SORT:
+               BUG_ON(!blk_fs_request(rq));
+               rq->flags |= REQ_SORTED;
+               if (q->last_merge == NULL && rq_mergeable(rq))
+                       q->last_merge = rq;
+               /*
+                * Some ioscheds (cfq) run q->request_fn directly, so
+                * rq cannot be accessed after calling
+                * elevator_add_req_fn.
+                */
+               q->elevator->ops->elevator_add_req_fn(q, rq);
+               break;
+
+       default:
+               printk(KERN_ERR "%s: bad insertion point %d\n",
+                      __FUNCTION__, where);
+               BUG();
+       }
+
+       if (blk_queue_plugged(q)) {
+               int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+                       - q->in_flight;
+
+               if (nrq >= q->unplug_thresh)
+                       __generic_unplug_device(q);
+       }
+}
+
+void elv_add_request(request_queue_t *q, struct request *rq, int where,
+                    int plug)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       __elv_add_request(q, rq, where, plug);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static inline struct request *__elv_next_request(request_queue_t *q)
+{
+       struct request *rq;
+
+       if (unlikely(list_empty(&q->queue_head) &&
+                    !q->elevator->ops->elevator_dispatch_fn(q, 0)))
+               return NULL;
+
+       rq = list_entry_rq(q->queue_head.next);
+
+       /*
+        * if this is a barrier write and the device has to issue a
+        * flush sequence to support it, check how far we are
+        */
+       if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
+               BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
+
+               if (q->ordered == QUEUE_ORDERED_FLUSH &&
+                   !blk_barrier_preflush(rq))
+                       rq = blk_start_pre_flush(q, rq);
+       }
+
+       return rq;
+}
+
+struct request *elv_next_request(request_queue_t *q)
+{
+       struct request *rq;
+       int ret;
+
+       while ((rq = __elv_next_request(q)) != NULL) {
+               if (!(rq->flags & REQ_STARTED)) {
+                       elevator_t *e = q->elevator;
+
+                       /*
+                        * This is the first time the device driver
+                        * sees this request (possibly after
+                        * requeueing).  Notify IO scheduler.
+                        */
+                       if (blk_sorted_rq(rq) &&
+                           e->ops->elevator_activate_req_fn)
+                               e->ops->elevator_activate_req_fn(q, rq);
+
+                       /*
+                        * just mark as started even if we don't start
+                        * it, a request that has been delayed should
+                        * not be passed by new incoming requests
+                        */
+                       rq->flags |= REQ_STARTED;
+               }
+
+               if (!q->boundary_rq || q->boundary_rq == rq) {
+                       q->end_sector = rq_end_sector(rq);
+                       q->boundary_rq = NULL;
+               }
+
+               if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
+                       break;
+
+               ret = q->prep_rq_fn(q, rq);
+               if (ret == BLKPREP_OK) {
+                       break;
+               } else if (ret == BLKPREP_DEFER) {
+                       /*
+                        * the request may have been (partially) prepped.
+                        * we need to keep this request in the front to
+                        * avoid resource deadlock.  REQ_STARTED will
+                        * prevent other fs requests from passing this one.
+                        */
+                       rq = NULL;
+                       break;
+               } else if (ret == BLKPREP_KILL) {
+                       int nr_bytes = rq->hard_nr_sectors << 9;
+
+                       if (!nr_bytes)
+                               nr_bytes = rq->data_len;
+
+                       blkdev_dequeue_request(rq);
+                       rq->flags |= REQ_QUIET;
+                       end_that_request_chunk(rq, 0, nr_bytes);
+                       end_that_request_last(rq);
+               } else {
+                       printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
+                                                               ret);
+                       break;
+               }
+       }
+
+       return rq;
+}
+
+void elv_dequeue_request(request_queue_t *q, struct request *rq)
+{
+       BUG_ON(list_empty(&rq->queuelist));
+
+       list_del_init(&rq->queuelist);
+
+       /*
+        * the time frame between a request being removed from the lists
+        * and to it is freed is accounted as io that is in progress at
+        * the driver side.
+        */
+       if (blk_account_rq(rq))
+               q->in_flight++;
+}
+
+int elv_queue_empty(request_queue_t *q)
+{
+       elevator_t *e = q->elevator;
+
+       if (!list_empty(&q->queue_head))
+               return 0;
+
+       if (e->ops->elevator_queue_empty_fn)
+               return e->ops->elevator_queue_empty_fn(q);
+
+       return 1;
+}
+
+struct request *elv_latter_request(request_queue_t *q, struct request *rq)
+{
+       struct list_head *next;
+
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_latter_req_fn)
+               return e->ops->elevator_latter_req_fn(q, rq);
+
+       next = rq->queuelist.next;
+       if (next != &q->queue_head && next != &rq->queuelist)
+               return list_entry_rq(next);
+
+       return NULL;
+}
+
+struct request *elv_former_request(request_queue_t *q, struct request *rq)
+{
+       struct list_head *prev;
+
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_former_req_fn)
+               return e->ops->elevator_former_req_fn(q, rq);
+
+       prev = rq->queuelist.prev;
+       if (prev != &q->queue_head && prev != &rq->queuelist)
+               return list_entry_rq(prev);
+
+       return NULL;
+}
+
+int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
+                   gfp_t gfp_mask)
+{
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_set_req_fn)
+               return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
+
+       rq->elevator_private = NULL;
+       return 0;
+}
+
+void elv_put_request(request_queue_t *q, struct request *rq)
+{
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_put_req_fn)
+               e->ops->elevator_put_req_fn(q, rq);
+}
+
+int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
+{
+       elevator_t *e = q->elevator;
+
+       if (e->ops->elevator_may_queue_fn)
+               return e->ops->elevator_may_queue_fn(q, rw, bio);
+
+       return ELV_MQUEUE_MAY;
+}
+
+void elv_completed_request(request_queue_t *q, struct request *rq)
+{
+       elevator_t *e = q->elevator;
+
+       /*
+        * request is released from the driver, io must be done
+        */
+       if (blk_account_rq(rq)) {
+               q->in_flight--;
+               if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+                       e->ops->elevator_completed_req_fn(q, rq);
+       }
+}
+
+int elv_register_queue(struct request_queue *q)
+{
+       elevator_t *e = q->elevator;
+
+       e->kobj.parent = kobject_get(&q->kobj);
+       if (!e->kobj.parent)
+               return -EBUSY;
+
+       snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
+       e->kobj.ktype = e->elevator_type->elevator_ktype;
+
+       return kobject_register(&e->kobj);
+}
+
+void elv_unregister_queue(struct request_queue *q)
+{
+       if (q) {
+               elevator_t *e = q->elevator;
+               kobject_unregister(&e->kobj);
+               kobject_put(&q->kobj);
+       }
+}
+
+int elv_register(struct elevator_type *e)
+{
+       spin_lock_irq(&elv_list_lock);
+       if (elevator_find(e->elevator_name))
+               BUG();
+       list_add_tail(&e->list, &elv_list);
+       spin_unlock_irq(&elv_list_lock);
+
+       printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
+       if (!strcmp(e->elevator_name, chosen_elevator))
+               printk(" (default)");
+       printk("\n");
+       return 0;
+}
+EXPORT_SYMBOL_GPL(elv_register);
+
+void elv_unregister(struct elevator_type *e)
+{
+       struct task_struct *g, *p;
+
+       /*
+        * Iterate every thread in the process to remove the io contexts.
+        */
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+               struct io_context *ioc = p->io_context;
+               if (ioc && ioc->cic) {
+                       ioc->cic->exit(ioc->cic);
+                       ioc->cic->dtor(ioc->cic);
+                       ioc->cic = NULL;
+               }
+               if (ioc && ioc->aic) {
+                       ioc->aic->exit(ioc->aic);
+                       ioc->aic->dtor(ioc->aic);
+                       ioc->aic = NULL;
+               }
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+
+       spin_lock_irq(&elv_list_lock);
+       list_del_init(&e->list);
+       spin_unlock_irq(&elv_list_lock);
+}
+EXPORT_SYMBOL_GPL(elv_unregister);
+
+/*
+ * switch to new_e io scheduler. be careful not to introduce deadlocks -
+ * we don't free the old io scheduler, before we have allocated what we
+ * need for the new one. this way we have a chance of going back to the old
+ * one, if the new one fails init for some reason.
+ */
+static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+{
+       elevator_t *old_elevator, *e;
+
+       /*
+        * Allocate new elevator
+        */
+       e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+       if (!e)
+               goto error;
+
+       /*
+        * Turn on BYPASS and drain all requests w/ elevator private data
+        */
+       spin_lock_irq(q->queue_lock);
+
+       set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+
+       while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+               ;
+
+       while (q->rq.elvpriv) {
+               spin_unlock_irq(q->queue_lock);
+               msleep(10);
+               spin_lock_irq(q->queue_lock);
+       }
+
+       spin_unlock_irq(q->queue_lock);
+
+       /*
+        * unregister old elevator data
+        */
+       elv_unregister_queue(q);
+       old_elevator = q->elevator;
+
+       /*
+        * attach and start new elevator
+        */
+       if (elevator_attach(q, new_e, e))
+               goto fail;
+
+       if (elv_register_queue(q))
+               goto fail_register;
+
+       /*
+        * finally exit old elevator and turn off BYPASS.
+        */
+       elevator_exit(old_elevator);
+       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+       return;
+
+fail_register:
+       /*
+        * switch failed, exit the new io scheduler and reattach the old
+        * one again (along with re-adding the sysfs dir)
+        */
+       elevator_exit(e);
+       e = NULL;
+fail:
+       q->elevator = old_elevator;
+       elv_register_queue(q);
+       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+       kfree(e);
+error:
+       elevator_put(new_e);
+       printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
+}
+
+ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
+{
+       char elevator_name[ELV_NAME_MAX];
+       struct elevator_type *e;
+
+       memset(elevator_name, 0, sizeof(elevator_name));
+       strncpy(elevator_name, name, sizeof(elevator_name));
+
+       if (elevator_name[strlen(elevator_name) - 1] == '\n')
+               elevator_name[strlen(elevator_name) - 1] = '\0';
+
+       e = elevator_get(elevator_name);
+       if (!e) {
+               printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
+               return -EINVAL;
+       }
+
+       if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
+               elevator_put(e);
+               return count;
+       }
+
+       elevator_switch(q, e);
+       return count;
+}
+
+ssize_t elv_iosched_show(request_queue_t *q, char *name)
+{
+       elevator_t *e = q->elevator;
+       struct elevator_type *elv = e->elevator_type;
+       struct list_head *entry;
+       int len = 0;
+
+       spin_lock_irq(q->queue_lock);
+       list_for_each(entry, &elv_list) {
+               struct elevator_type *__e;
+
+               __e = list_entry(entry, struct elevator_type, list);
+               if (!strcmp(elv->elevator_name, __e->elevator_name))
+                       len += sprintf(name+len, "[%s] ", elv->elevator_name);
+               else
+                       len += sprintf(name+len, "%s ", __e->elevator_name);
+       }
+       spin_unlock_irq(q->queue_lock);
+
+       len += sprintf(len+name, "\n");
+       return len;
+}
+
+EXPORT_SYMBOL(elv_dispatch_sort);
+EXPORT_SYMBOL(elv_add_request);
+EXPORT_SYMBOL(__elv_add_request);
+EXPORT_SYMBOL(elv_requeue_request);
+EXPORT_SYMBOL(elv_next_request);
+EXPORT_SYMBOL(elv_dequeue_request);
+EXPORT_SYMBOL(elv_queue_empty);
+EXPORT_SYMBOL(elv_completed_request);
+EXPORT_SYMBOL(elevator_exit);
+EXPORT_SYMBOL(elevator_init);
diff --git a/block/genhd.c b/block/genhd.c
new file mode 100644 (file)
index 0000000..54aec4a
--- /dev/null
@@ -0,0 +1,726 @@
+/*
+ *  gendisk handling
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/kobj_map.h>
+#include <linux/buffer_head.h>
+
+#define MAX_PROBE_HASH 255     /* random */
+
+static struct subsystem block_subsys;
+
+static DECLARE_MUTEX(block_subsys_sem);
+
+/*
+ * Can be deleted altogether. Later.
+ *
+ */
+static struct blk_major_name {
+       struct blk_major_name *next;
+       int major;
+       char name[16];
+} *major_names[MAX_PROBE_HASH];
+
+/* index in the above - for now: assume no multimajor ranges */
+static inline int major_to_index(int major)
+{
+       return major % MAX_PROBE_HASH;
+}
+
+#ifdef CONFIG_PROC_FS
+/* get block device names in somewhat random order */
+int get_blkdev_list(char *p, int used)
+{
+       struct blk_major_name *n;
+       int i, len;
+
+       len = snprintf(p, (PAGE_SIZE-used), "\nBlock devices:\n");
+
+       down(&block_subsys_sem);
+       for (i = 0; i < ARRAY_SIZE(major_names); i++) {
+               for (n = major_names[i]; n; n = n->next) {
+                       /*
+                        * If the curent string plus the 5 extra characters
+                        * in the line would run us off the page, then we're done
+                        */
+                       if ((len + used + strlen(n->name) + 5) >= PAGE_SIZE)
+                               goto page_full;
+                       len += sprintf(p+len, "%3d %s\n",
+                                      n->major, n->name);
+               }
+       }
+page_full:
+       up(&block_subsys_sem);
+
+       return len;
+}
+#endif
+
+int register_blkdev(unsigned int major, const char *name)
+{
+       struct blk_major_name **n, *p;
+       int index, ret = 0;
+
+       down(&block_subsys_sem);
+
+       /* temporary */
+       if (major == 0) {
+               for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
+                       if (major_names[index] == NULL)
+                               break;
+               }
+
+               if (index == 0) {
+                       printk("register_blkdev: failed to get major for %s\n",
+                              name);
+                       ret = -EBUSY;
+                       goto out;
+               }
+               major = index;
+               ret = major;
+       }
+
+       p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
+       if (p == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       p->major = major;
+       strlcpy(p->name, name, sizeof(p->name));
+       p->next = NULL;
+       index = major_to_index(major);
+
+       for (n = &major_names[index]; *n; n = &(*n)->next) {
+               if ((*n)->major == major)
+                       break;
+       }
+       if (!*n)
+               *n = p;
+       else
+               ret = -EBUSY;
+
+       if (ret < 0) {
+               printk("register_blkdev: cannot get major %d for %s\n",
+                      major, name);
+               kfree(p);
+       }
+out:
+       up(&block_subsys_sem);
+       return ret;
+}
+
+EXPORT_SYMBOL(register_blkdev);
+
+/* todo: make void - error printk here */
+int unregister_blkdev(unsigned int major, const char *name)
+{
+       struct blk_major_name **n;
+       struct blk_major_name *p = NULL;
+       int index = major_to_index(major);
+       int ret = 0;
+
+       down(&block_subsys_sem);
+       for (n = &major_names[index]; *n; n = &(*n)->next)
+               if ((*n)->major == major)
+                       break;
+       if (!*n || strcmp((*n)->name, name))
+               ret = -EINVAL;
+       else {
+               p = *n;
+               *n = p->next;
+       }
+       up(&block_subsys_sem);
+       kfree(p);
+
+       return ret;
+}
+
+EXPORT_SYMBOL(unregister_blkdev);
+
+static struct kobj_map *bdev_map;
+
+/*
+ * Register device numbers dev..(dev+range-1)
+ * range must be nonzero
+ * The hash chain is sorted on range, so that subranges can override.
+ */
+void blk_register_region(dev_t dev, unsigned long range, struct module *module,
+                        struct kobject *(*probe)(dev_t, int *, void *),
+                        int (*lock)(dev_t, void *), void *data)
+{
+       kobj_map(bdev_map, dev, range, module, probe, lock, data);
+}
+
+EXPORT_SYMBOL(blk_register_region);
+
+void blk_unregister_region(dev_t dev, unsigned long range)
+{
+       kobj_unmap(bdev_map, dev, range);
+}
+
+EXPORT_SYMBOL(blk_unregister_region);
+
+static struct kobject *exact_match(dev_t dev, int *part, void *data)
+{
+       struct gendisk *p = data;
+       return &p->kobj;
+}
+
+static int exact_lock(dev_t dev, void *data)
+{
+       struct gendisk *p = data;
+
+       if (!get_disk(p))
+               return -1;
+       return 0;
+}
+
+/**
+ * add_disk - add partitioning information to kernel list
+ * @disk: per-device partitioning information
+ *
+ * This function registers the partitioning information in @disk
+ * with the kernel.
+ */
+void add_disk(struct gendisk *disk)
+{
+       disk->flags |= GENHD_FL_UP;
+       blk_register_region(MKDEV(disk->major, disk->first_minor),
+                           disk->minors, NULL, exact_match, exact_lock, disk);
+       register_disk(disk);
+       blk_register_queue(disk);
+}
+
+EXPORT_SYMBOL(add_disk);
+EXPORT_SYMBOL(del_gendisk);    /* in partitions/check.c */
+
+void unlink_gendisk(struct gendisk *disk)
+{
+       blk_unregister_queue(disk);
+       blk_unregister_region(MKDEV(disk->major, disk->first_minor),
+                             disk->minors);
+}
+
+#define to_disk(obj) container_of(obj,struct gendisk,kobj)
+
+/**
+ * get_gendisk - get partitioning information for a given device
+ * @dev: device to get partitioning information for
+ *
+ * This function gets the structure containing partitioning
+ * information for the given device @dev.
+ */
+struct gendisk *get_gendisk(dev_t dev, int *part)
+{
+       struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
+       return  kobj ? to_disk(kobj) : NULL;
+}
+
+#ifdef CONFIG_PROC_FS
+/* iterator */
+static void *part_start(struct seq_file *part, loff_t *pos)
+{
+       struct list_head *p;
+       loff_t l = *pos;
+
+       down(&block_subsys_sem);
+       list_for_each(p, &block_subsys.kset.list)
+               if (!l--)
+                       return list_entry(p, struct gendisk, kobj.entry);
+       return NULL;
+}
+
+static void *part_next(struct seq_file *part, void *v, loff_t *pos)
+{
+       struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
+       ++*pos;
+       return p==&block_subsys.kset.list ? NULL : 
+               list_entry(p, struct gendisk, kobj.entry);
+}
+
+static void part_stop(struct seq_file *part, void *v)
+{
+       up(&block_subsys_sem);
+}
+
+static int show_partition(struct seq_file *part, void *v)
+{
+       struct gendisk *sgp = v;
+       int n;
+       char buf[BDEVNAME_SIZE];
+
+       if (&sgp->kobj.entry == block_subsys.kset.list.next)
+               seq_puts(part, "major minor  #blocks  name\n\n");
+
+       /* Don't show non-partitionable removeable devices or empty devices */
+       if (!get_capacity(sgp) ||
+                       (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
+               return 0;
+       if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
+               return 0;
+
+       /* show the full disk and all non-0 size partitions of it */
+       seq_printf(part, "%4d  %4d %10llu %s\n",
+               sgp->major, sgp->first_minor,
+               (unsigned long long)get_capacity(sgp) >> 1,
+               disk_name(sgp, 0, buf));
+       for (n = 0; n < sgp->minors - 1; n++) {
+               if (!sgp->part[n])
+                       continue;
+               if (sgp->part[n]->nr_sects == 0)
+                       continue;
+               seq_printf(part, "%4d  %4d %10llu %s\n",
+                       sgp->major, n + 1 + sgp->first_minor,
+                       (unsigned long long)sgp->part[n]->nr_sects >> 1 ,
+                       disk_name(sgp, n + 1, buf));
+       }
+
+       return 0;
+}
+
+struct seq_operations partitions_op = {
+       .start =part_start,
+       .next = part_next,
+       .stop = part_stop,
+       .show = show_partition
+};
+#endif
+
+
+extern int blk_dev_init(void);
+
+static struct kobject *base_probe(dev_t dev, int *part, void *data)
+{
+       if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
+               /* Make old-style 2.4 aliases work */
+               request_module("block-major-%d", MAJOR(dev));
+       return NULL;
+}
+
+static int __init genhd_device_init(void)
+{
+       bdev_map = kobj_map_init(base_probe, &block_subsys_sem);
+       blk_dev_init();
+       subsystem_register(&block_subsys);
+       return 0;
+}
+
+subsys_initcall(genhd_device_init);
+
+
+
+/*
+ * kobject & sysfs bindings for block devices
+ */
+static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
+                             char *page)
+{
+       struct gendisk *disk = to_disk(kobj);
+       struct disk_attribute *disk_attr =
+               container_of(attr,struct disk_attribute,attr);
+       ssize_t ret = -EIO;
+
+       if (disk_attr->show)
+               ret = disk_attr->show(disk,page);
+       return ret;
+}
+
+static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr,
+                              const char *page, size_t count)
+{
+       struct gendisk *disk = to_disk(kobj);
+       struct disk_attribute *disk_attr =
+               container_of(attr,struct disk_attribute,attr);
+       ssize_t ret = 0;
+
+       if (disk_attr->store)
+               ret = disk_attr->store(disk, page, count);
+       return ret;
+}
+
+static struct sysfs_ops disk_sysfs_ops = {
+       .show   = &disk_attr_show,
+       .store  = &disk_attr_store,
+};
+
+static ssize_t disk_uevent_store(struct gendisk * disk,
+                                const char *buf, size_t count)
+{
+       kobject_hotplug(&disk->kobj, KOBJ_ADD);
+       return count;
+}
+static ssize_t disk_dev_read(struct gendisk * disk, char *page)
+{
+       dev_t base = MKDEV(disk->major, disk->first_minor); 
+       return print_dev_t(page, base);
+}
+static ssize_t disk_range_read(struct gendisk * disk, char *page)
+{
+       return sprintf(page, "%d\n", disk->minors);
+}
+static ssize_t disk_removable_read(struct gendisk * disk, char *page)
+{
+       return sprintf(page, "%d\n",
+                      (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
+
+}
+static ssize_t disk_size_read(struct gendisk * disk, char *page)
+{
+       return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
+}
+
+static ssize_t disk_stats_read(struct gendisk * disk, char *page)
+{
+       preempt_disable();
+       disk_round_stats(disk);
+       preempt_enable();
+       return sprintf(page,
+               "%8u %8u %8llu %8u "
+               "%8u %8u %8llu %8u "
+               "%8u %8u %8u"
+               "\n",
+               disk_stat_read(disk, ios[0]), disk_stat_read(disk, merges[0]),
+               (unsigned long long)disk_stat_read(disk, sectors[0]),
+               jiffies_to_msecs(disk_stat_read(disk, ticks[0])),
+               disk_stat_read(disk, ios[1]), disk_stat_read(disk, merges[1]),
+               (unsigned long long)disk_stat_read(disk, sectors[1]),
+               jiffies_to_msecs(disk_stat_read(disk, ticks[1])),
+               disk->in_flight,
+               jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
+               jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
+}
+static struct disk_attribute disk_attr_uevent = {
+       .attr = {.name = "uevent", .mode = S_IWUSR },
+       .store  = disk_uevent_store
+};
+static struct disk_attribute disk_attr_dev = {
+       .attr = {.name = "dev", .mode = S_IRUGO },
+       .show   = disk_dev_read
+};
+static struct disk_attribute disk_attr_range = {
+       .attr = {.name = "range", .mode = S_IRUGO },
+       .show   = disk_range_read
+};
+static struct disk_attribute disk_attr_removable = {
+       .attr = {.name = "removable", .mode = S_IRUGO },
+       .show   = disk_removable_read
+};
+static struct disk_attribute disk_attr_size = {
+       .attr = {.name = "size", .mode = S_IRUGO },
+       .show   = disk_size_read
+};
+static struct disk_attribute disk_attr_stat = {
+       .attr = {.name = "stat", .mode = S_IRUGO },
+       .show   = disk_stats_read
+};
+
+static struct attribute * default_attrs[] = {
+       &disk_attr_uevent.attr,
+       &disk_attr_dev.attr,
+       &disk_attr_range.attr,
+       &disk_attr_removable.attr,
+       &disk_attr_size.attr,
+       &disk_attr_stat.attr,
+       NULL,
+};
+
+static void disk_release(struct kobject * kobj)
+{
+       struct gendisk *disk = to_disk(kobj);
+       kfree(disk->random);
+       kfree(disk->part);
+       free_disk_stats(disk);
+       kfree(disk);
+}
+
+static struct kobj_type ktype_block = {
+       .release        = disk_release,
+       .sysfs_ops      = &disk_sysfs_ops,
+       .default_attrs  = default_attrs,
+};
+
+extern struct kobj_type ktype_part;
+
+static int block_hotplug_filter(struct kset *kset, struct kobject *kobj)
+{
+       struct kobj_type *ktype = get_ktype(kobj);
+
+       return ((ktype == &ktype_block) || (ktype == &ktype_part));
+}
+
+static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
+                        int num_envp, char *buffer, int buffer_size)
+{
+       struct kobj_type *ktype = get_ktype(kobj);
+       struct device *physdev;
+       struct gendisk *disk;
+       struct hd_struct *part;
+       int length = 0;
+       int i = 0;
+
+       if (ktype == &ktype_block) {
+               disk = container_of(kobj, struct gendisk, kobj);
+               add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
+                                   &length, "MINOR=%u", disk->first_minor);
+       } else if (ktype == &ktype_part) {
+               disk = container_of(kobj->parent, struct gendisk, kobj);
+               part = container_of(kobj, struct hd_struct, kobj);
+               add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
+                                   &length, "MINOR=%u",
+                                   disk->first_minor + part->partno);
+       } else
+               return 0;
+
+       add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, &length,
+                           "MAJOR=%u", disk->major);
+
+       /* add physical device, backing this device  */
+       physdev = disk->driverfs_dev;
+       if (physdev) {
+               char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
+
+               add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
+                                   &length, "PHYSDEVPATH=%s", path);
+               kfree(path);
+
+               if (physdev->bus)
+                       add_hotplug_env_var(envp, num_envp, &i,
+                                           buffer, buffer_size, &length,
+                                           "PHYSDEVBUS=%s",
+                                           physdev->bus->name);
+
+               if (physdev->driver)
+                       add_hotplug_env_var(envp, num_envp, &i,
+                                           buffer, buffer_size, &length,
+                                           "PHYSDEVDRIVER=%s",
+                                           physdev->driver->name);
+       }
+
+       /* terminate, set to next free slot, shrink available space */
+       envp[i] = NULL;
+       envp = &envp[i];
+       num_envp -= i;
+       buffer = &buffer[length];
+       buffer_size -= length;
+
+       return 0;
+}
+
+static struct kset_hotplug_ops block_hotplug_ops = {
+       .filter         = block_hotplug_filter,
+       .hotplug        = block_hotplug,
+};
+
+/* declare block_subsys. */
+static decl_subsys(block, &ktype_block, &block_hotplug_ops);
+
+
+/*
+ * aggregate disk stat collector.  Uses the same stats that the sysfs
+ * entries do, above, but makes them available through one seq_file.
+ * Watching a few disks may be efficient through sysfs, but watching
+ * all of them will be more efficient through this interface.
+ *
+ * The output looks suspiciously like /proc/partitions with a bunch of
+ * extra fields.
+ */
+
+/* iterator */
+static void *diskstats_start(struct seq_file *part, loff_t *pos)
+{
+       loff_t k = *pos;
+       struct list_head *p;
+
+       down(&block_subsys_sem);
+       list_for_each(p, &block_subsys.kset.list)
+               if (!k--)
+                       return list_entry(p, struct gendisk, kobj.entry);
+       return NULL;
+}
+
+static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
+{
+       struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
+       ++*pos;
+       return p==&block_subsys.kset.list ? NULL :
+               list_entry(p, struct gendisk, kobj.entry);
+}
+
+static void diskstats_stop(struct seq_file *part, void *v)
+{
+       up(&block_subsys_sem);
+}
+
+static int diskstats_show(struct seq_file *s, void *v)
+{
+       struct gendisk *gp = v;
+       char buf[BDEVNAME_SIZE];
+       int n = 0;
+
+       /*
+       if (&sgp->kobj.entry == block_subsys.kset.list.next)
+               seq_puts(s,     "major minor name"
+                               "     rio rmerge rsect ruse wio wmerge "
+                               "wsect wuse running use aveq"
+                               "\n\n");
+       */
+       preempt_disable();
+       disk_round_stats(gp);
+       preempt_enable();
+       seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n",
+               gp->major, n + gp->first_minor, disk_name(gp, n, buf),
+               disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]),
+               (unsigned long long)disk_stat_read(gp, sectors[0]),
+               jiffies_to_msecs(disk_stat_read(gp, ticks[0])),
+               disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]),
+               (unsigned long long)disk_stat_read(gp, sectors[1]),
+               jiffies_to_msecs(disk_stat_read(gp, ticks[1])),
+               gp->in_flight,
+               jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
+               jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
+
+       /* now show all non-0 size partitions of it */
+       for (n = 0; n < gp->minors - 1; n++) {
+               struct hd_struct *hd = gp->part[n];
+
+               if (hd && hd->nr_sects)
+                       seq_printf(s, "%4d %4d %s %u %u %u %u\n",
+                               gp->major, n + gp->first_minor + 1,
+                               disk_name(gp, n + 1, buf),
+                               hd->ios[0], hd->sectors[0],
+                               hd->ios[1], hd->sectors[1]);
+       }
+       return 0;
+}
+
+struct seq_operations diskstats_op = {
+       .start  = diskstats_start,
+       .next   = diskstats_next,
+       .stop   = diskstats_stop,
+       .show   = diskstats_show
+};
+
+struct gendisk *alloc_disk(int minors)
+{
+       return alloc_disk_node(minors, -1);
+}
+
+struct gendisk *alloc_disk_node(int minors, int node_id)
+{
+       struct gendisk *disk;
+
+       disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
+       if (disk) {
+               memset(disk, 0, sizeof(struct gendisk));
+               if (!init_disk_stats(disk)) {
+                       kfree(disk);
+                       return NULL;
+               }
+               if (minors > 1) {
+                       int size = (minors - 1) * sizeof(struct hd_struct *);
+                       disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
+                       if (!disk->part) {
+                               kfree(disk);
+                               return NULL;
+                       }
+                       memset(disk->part, 0, size);
+               }
+               disk->minors = minors;
+               kobj_set_kset_s(disk,block_subsys);
+               kobject_init(&disk->kobj);
+               rand_initialize_disk(disk);
+       }
+       return disk;
+}
+
+EXPORT_SYMBOL(alloc_disk);
+EXPORT_SYMBOL(alloc_disk_node);
+
+struct kobject *get_disk(struct gendisk *disk)
+{
+       struct module *owner;
+       struct kobject *kobj;
+
+       if (!disk->fops)
+               return NULL;
+       owner = disk->fops->owner;
+       if (owner && !try_module_get(owner))
+               return NULL;
+       kobj = kobject_get(&disk->kobj);
+       if (kobj == NULL) {
+               module_put(owner);
+               return NULL;
+       }
+       return kobj;
+
+}
+
+EXPORT_SYMBOL(get_disk);
+
+void put_disk(struct gendisk *disk)
+{
+       if (disk)
+               kobject_put(&disk->kobj);
+}
+
+EXPORT_SYMBOL(put_disk);
+
+void set_device_ro(struct block_device *bdev, int flag)
+{
+       if (bdev->bd_contains != bdev)
+               bdev->bd_part->policy = flag;
+       else
+               bdev->bd_disk->policy = flag;
+}
+
+EXPORT_SYMBOL(set_device_ro);
+
+void set_disk_ro(struct gendisk *disk, int flag)
+{
+       int i;
+       disk->policy = flag;
+       for (i = 0; i < disk->minors - 1; i++)
+               if (disk->part[i]) disk->part[i]->policy = flag;
+}
+
+EXPORT_SYMBOL(set_disk_ro);
+
+int bdev_read_only(struct block_device *bdev)
+{
+       if (!bdev)
+               return 0;
+       else if (bdev->bd_contains != bdev)
+               return bdev->bd_part->policy;
+       else
+               return bdev->bd_disk->policy;
+}
+
+EXPORT_SYMBOL(bdev_read_only);
+
+int invalidate_partition(struct gendisk *disk, int index)
+{
+       int res = 0;
+       struct block_device *bdev = bdget_disk(disk, index);
+       if (bdev) {
+               fsync_bdev(bdev);
+               res = __invalidate_device(bdev);
+               bdput(bdev);
+       }
+       return res;
+}
+
+EXPORT_SYMBOL(invalidate_partition);
diff --git a/block/ioctl.c b/block/ioctl.c
new file mode 100644 (file)
index 0000000..6e27847
--- /dev/null
@@ -0,0 +1,275 @@
+#include <linux/sched.h>               /* for capable() */
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+
+static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
+{
+       struct block_device *bdevp;
+       struct gendisk *disk;
+       struct blkpg_ioctl_arg a;
+       struct blkpg_partition p;
+       long long start, length;
+       int part;
+       int i;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+               return -EFAULT;
+       if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+               return -EFAULT;
+       disk = bdev->bd_disk;
+       if (bdev != bdev->bd_contains)
+               return -EINVAL;
+       part = p.pno;
+       if (part <= 0 || part >= disk->minors)
+               return -EINVAL;
+       switch (a.op) {
+               case BLKPG_ADD_PARTITION:
+                       start = p.start >> 9;
+                       length = p.length >> 9;
+                       /* check for fit in a hd_struct */ 
+                       if (sizeof(sector_t) == sizeof(long) && 
+                           sizeof(long long) > sizeof(long)) {
+                               long pstart = start, plength = length;
+                               if (pstart != start || plength != length
+                                   || pstart < 0 || plength < 0)
+                                       return -EINVAL;
+                       }
+                       /* partition number in use? */
+                       down(&bdev->bd_sem);
+                       if (disk->part[part - 1]) {
+                               up(&bdev->bd_sem);
+                               return -EBUSY;
+                       }
+                       /* overlap? */
+                       for (i = 0; i < disk->minors - 1; i++) {
+                               struct hd_struct *s = disk->part[i];
+
+                               if (!s)
+                                       continue;
+                               if (!(start+length <= s->start_sect ||
+                                     start >= s->start_sect + s->nr_sects)) {
+                                       up(&bdev->bd_sem);
+                                       return -EBUSY;
+                               }
+                       }
+                       /* all seems OK */
+                       add_partition(disk, part, start, length);
+                       up(&bdev->bd_sem);
+                       return 0;
+               case BLKPG_DEL_PARTITION:
+                       if (!disk->part[part-1])
+                               return -ENXIO;
+                       if (disk->part[part - 1]->nr_sects == 0)
+                               return -ENXIO;
+                       bdevp = bdget_disk(disk, part);
+                       if (!bdevp)
+                               return -ENOMEM;
+                       down(&bdevp->bd_sem);
+                       if (bdevp->bd_openers) {
+                               up(&bdevp->bd_sem);
+                               bdput(bdevp);
+                               return -EBUSY;
+                       }
+                       /* all seems OK */
+                       fsync_bdev(bdevp);
+                       invalidate_bdev(bdevp, 0);
+
+                       down(&bdev->bd_sem);
+                       delete_partition(disk, part);
+                       up(&bdev->bd_sem);
+                       up(&bdevp->bd_sem);
+                       bdput(bdevp);
+
+                       return 0;
+               default:
+                       return -EINVAL;
+       }
+}
+
+static int blkdev_reread_part(struct block_device *bdev)
+{
+       struct gendisk *disk = bdev->bd_disk;
+       int res;
+
+       if (disk->minors == 1 || bdev != bdev->bd_contains)
+               return -EINVAL;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (down_trylock(&bdev->bd_sem))
+               return -EBUSY;
+       res = rescan_partitions(disk, bdev);
+       up(&bdev->bd_sem);
+       return res;
+}
+
+static int put_ushort(unsigned long arg, unsigned short val)
+{
+       return put_user(val, (unsigned short __user *)arg);
+}
+
+static int put_int(unsigned long arg, int val)
+{
+       return put_user(val, (int __user *)arg);
+}
+
+static int put_long(unsigned long arg, long val)
+{
+       return put_user(val, (long __user *)arg);
+}
+
+static int put_ulong(unsigned long arg, unsigned long val)
+{
+       return put_user(val, (unsigned long __user *)arg);
+}
+
+static int put_u64(unsigned long arg, u64 val)
+{
+       return put_user(val, (u64 __user *)arg);
+}
+
+static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev,
+                               unsigned cmd, unsigned long arg)
+{
+       struct backing_dev_info *bdi;
+       int ret, n;
+
+       switch (cmd) {
+       case BLKRAGET:
+       case BLKFRAGET:
+               if (!arg)
+                       return -EINVAL;
+               bdi = blk_get_backing_dev_info(bdev);
+               if (bdi == NULL)
+                       return -ENOTTY;
+               return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+       case BLKROGET:
+               return put_int(arg, bdev_read_only(bdev) != 0);
+       case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
+               return put_int(arg, block_size(bdev));
+       case BLKSSZGET: /* get block device hardware sector size */
+               return put_int(arg, bdev_hardsect_size(bdev));
+       case BLKSECTGET:
+               return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+       case BLKRASET:
+       case BLKFRASET:
+               if(!capable(CAP_SYS_ADMIN))
+                       return -EACCES;
+               bdi = blk_get_backing_dev_info(bdev);
+               if (bdi == NULL)
+                       return -ENOTTY;
+               bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+               return 0;
+       case BLKBSZSET:
+               /* set the logical block size */
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EACCES;
+               if (!arg)
+                       return -EINVAL;
+               if (get_user(n, (int __user *) arg))
+                       return -EFAULT;
+               if (bd_claim(bdev, file) < 0)
+                       return -EBUSY;
+               ret = set_blocksize(bdev, n);
+               bd_release(bdev);
+               return ret;
+       case BLKPG:
+               return blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
+       case BLKRRPART:
+               return blkdev_reread_part(bdev);
+       case BLKGETSIZE:
+               if ((bdev->bd_inode->i_size >> 9) > ~0UL)
+                       return -EFBIG;
+               return put_ulong(arg, bdev->bd_inode->i_size >> 9);
+       case BLKGETSIZE64:
+               return put_u64(arg, bdev->bd_inode->i_size);
+       }
+       return -ENOIOCTLCMD;
+}
+
+static int blkdev_driver_ioctl(struct inode *inode, struct file *file,
+               struct gendisk *disk, unsigned cmd, unsigned long arg)
+{
+       int ret;
+       if (disk->fops->unlocked_ioctl)
+               return disk->fops->unlocked_ioctl(file, cmd, arg);
+
+       if (disk->fops->ioctl) {
+               lock_kernel();
+               ret = disk->fops->ioctl(inode, file, cmd, arg);
+               unlock_kernel();
+               return ret;
+       }
+
+       return -ENOTTY;
+}
+
+int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+                       unsigned long arg)
+{
+       struct block_device *bdev = inode->i_bdev;
+       struct gendisk *disk = bdev->bd_disk;
+       int ret, n;
+
+       switch(cmd) {
+       case BLKFLSBUF:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EACCES;
+
+               ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+               /* -EINVAL to handle old uncorrected drivers */
+               if (ret != -EINVAL && ret != -ENOTTY)
+                       return ret;
+
+               lock_kernel();
+               fsync_bdev(bdev);
+               invalidate_bdev(bdev, 0);
+               unlock_kernel();
+               return 0;
+
+       case BLKROSET:
+               ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+               /* -EINVAL to handle old uncorrected drivers */
+               if (ret != -EINVAL && ret != -ENOTTY)
+                       return ret;
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EACCES;
+               if (get_user(n, (int __user *)(arg)))
+                       return -EFAULT;
+               lock_kernel();
+               set_device_ro(bdev, n);
+               unlock_kernel();
+               return 0;
+       }
+
+       lock_kernel();
+       ret = blkdev_locked_ioctl(file, bdev, cmd, arg);
+       unlock_kernel();
+       if (ret != -ENOIOCTLCMD)
+               return ret;
+
+       return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+}
+
+/* Most of the generic ioctls are handled in the normal fallback path.
+   This assumes the blkdev's low level compat_ioctl always returns
+   ENOIOCTLCMD for unknown ioctls. */
+long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+       struct block_device *bdev = file->f_dentry->d_inode->i_bdev;
+       struct gendisk *disk = bdev->bd_disk;
+       int ret = -ENOIOCTLCMD;
+       if (disk->fops->compat_ioctl) {
+               lock_kernel();
+               ret = disk->fops->compat_ioctl(file, cmd, arg);
+               unlock_kernel();
+       }
+       return ret;
+}
+
+EXPORT_SYMBOL_GPL(blkdev_ioctl);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
new file mode 100644 (file)
index 0000000..2747741
--- /dev/null
@@ -0,0 +1,3613 @@
+/*
+ *  linux/drivers/block/ll_rw_blk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
+ * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
+ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
+ * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
+ * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
+ */
+
+/*
+ * This handles all read/write requests to block devices
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>     /* for max_pfn/max_low_pfn */
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+
+/*
+ * for max sense size
+ */
+#include <scsi/scsi_cmnd.h>
+
+static void blk_unplug_work(void *data);
+static void blk_unplug_timeout(unsigned long data);
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
+
+/*
+ * For the allocated request tables
+ */
+static kmem_cache_t *request_cachep;
+
+/*
+ * For queue allocation
+ */
+static kmem_cache_t *requestq_cachep;
+
+/*
+ * For io context allocations
+ */
+static kmem_cache_t *iocontext_cachep;
+
+static wait_queue_head_t congestion_wqh[2] = {
+               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
+               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
+       };
+
+/*
+ * Controlling structure to kblockd
+ */
+static struct workqueue_struct *kblockd_workqueue; 
+
+unsigned long blk_max_low_pfn, blk_max_pfn;
+
+EXPORT_SYMBOL(blk_max_low_pfn);
+EXPORT_SYMBOL(blk_max_pfn);
+
+/* Amount of time in which a process may batch requests */
+#define BLK_BATCH_TIME (HZ/50UL)
+
+/* Number of requests a "batching" process may submit */
+#define BLK_BATCH_REQ  32
+
+/*
+ * Return the threshold (number of used requests) at which the queue is
+ * considered to be congested.  It include a little hysteresis to keep the
+ * context switch rate down.
+ */
+static inline int queue_congestion_on_threshold(struct request_queue *q)
+{
+       return q->nr_congestion_on;
+}
+
+/*
+ * The threshold at which a queue is considered to be uncongested
+ */
+static inline int queue_congestion_off_threshold(struct request_queue *q)
+{
+       return q->nr_congestion_off;
+}
+
+static void blk_queue_congestion_threshold(struct request_queue *q)
+{
+       int nr;
+
+       nr = q->nr_requests - (q->nr_requests / 8) + 1;
+       if (nr > q->nr_requests)
+               nr = q->nr_requests;
+       q->nr_congestion_on = nr;
+
+       nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
+       if (nr < 1)
+               nr = 1;
+       q->nr_congestion_off = nr;
+}
+
+/*
+ * A queue has just exitted congestion.  Note this in the global counter of
+ * congested queues, and wake up anyone who was waiting for requests to be
+ * put back.
+ */
+static void clear_queue_congested(request_queue_t *q, int rw)
+{
+       enum bdi_state bit;
+       wait_queue_head_t *wqh = &congestion_wqh[rw];
+
+       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
+       clear_bit(bit, &q->backing_dev_info.state);
+       smp_mb__after_clear_bit();
+       if (waitqueue_active(wqh))
+               wake_up(wqh);
+}
+
+/*
+ * A queue has just entered congestion.  Flag that in the queue's VM-visible
+ * state flags and increment the global gounter of congested queues.
+ */
+static void set_queue_congested(request_queue_t *q, int rw)
+{
+       enum bdi_state bit;
+
+       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
+       set_bit(bit, &q->backing_dev_info.state);
+}
+
+/**
+ * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
+ * @bdev:      device
+ *
+ * Locates the passed device's request queue and returns the address of its
+ * backing_dev_info
+ *
+ * Will return NULL if the request queue cannot be located.
+ */
+struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
+{
+       struct backing_dev_info *ret = NULL;
+       request_queue_t *q = bdev_get_queue(bdev);
+
+       if (q)
+               ret = &q->backing_dev_info;
+       return ret;
+}
+
+EXPORT_SYMBOL(blk_get_backing_dev_info);
+
+void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
+{
+       q->activity_fn = fn;
+       q->activity_data = data;
+}
+
+EXPORT_SYMBOL(blk_queue_activity_fn);
+
+/**
+ * blk_queue_prep_rq - set a prepare_request function for queue
+ * @q:         queue
+ * @pfn:       prepare_request function
+ *
+ * It's possible for a queue to register a prepare_request callback which
+ * is invoked before the request is handed to the request_fn. The goal of
+ * the function is to prepare a request for I/O, it can be used to build a
+ * cdb from the request data for instance.
+ *
+ */
+void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
+{
+       q->prep_rq_fn = pfn;
+}
+
+EXPORT_SYMBOL(blk_queue_prep_rq);
+
+/**
+ * blk_queue_merge_bvec - set a merge_bvec function for queue
+ * @q:         queue
+ * @mbfn:      merge_bvec_fn
+ *
+ * Usually queues have static limitations on the max sectors or segments that
+ * we can put in a request. Stacking drivers may have some settings that
+ * are dynamic, and thus we have to query the queue whether it is ok to
+ * add a new bio_vec to a bio at a given offset or not. If the block device
+ * has such limitations, it needs to register a merge_bvec_fn to control
+ * the size of bio's sent to it. Note that a block device *must* allow a
+ * single page to be added to an empty bio. The block device driver may want
+ * to use the bio_split() function to deal with these bio's. By default
+ * no merge_bvec_fn is defined for a queue, and only the fixed limits are
+ * honored.
+ */
+void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
+{
+       q->merge_bvec_fn = mbfn;
+}
+
+EXPORT_SYMBOL(blk_queue_merge_bvec);
+
+/**
+ * blk_queue_make_request - define an alternate make_request function for a device
+ * @q:  the request queue for the device to be affected
+ * @mfn: the alternate make_request function
+ *
+ * Description:
+ *    The normal way for &struct bios to be passed to a device
+ *    driver is for them to be collected into requests on a request
+ *    queue, and then to allow the device driver to select requests
+ *    off that queue when it is ready.  This works well for many block
+ *    devices. However some block devices (typically virtual devices
+ *    such as md or lvm) do not benefit from the processing on the
+ *    request queue, and are served best by having the requests passed
+ *    directly to them.  This can be achieved by providing a function
+ *    to blk_queue_make_request().
+ *
+ * Caveat:
+ *    The driver that does this *must* be able to deal appropriately
+ *    with buffers in "highmemory". This can be accomplished by either calling
+ *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
+ *    blk_queue_bounce() to create a buffer in normal memory.
+ **/
+void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
+{
+       /*
+        * set defaults
+        */
+       q->nr_requests = BLKDEV_MAX_RQ;
+       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+       q->make_request_fn = mfn;
+       q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+       q->backing_dev_info.state = 0;
+       q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+       blk_queue_max_sectors(q, MAX_SECTORS);
+       blk_queue_hardsect_size(q, 512);
+       blk_queue_dma_alignment(q, 511);
+       blk_queue_congestion_threshold(q);
+       q->nr_batching = BLK_BATCH_REQ;
+
+       q->unplug_thresh = 4;           /* hmm */
+       q->unplug_delay = (3 * HZ) / 1000;      /* 3 milliseconds */
+       if (q->unplug_delay == 0)
+               q->unplug_delay = 1;
+
+       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+
+       q->unplug_timer.function = blk_unplug_timeout;
+       q->unplug_timer.data = (unsigned long)q;
+
+       /*
+        * by default assume old behaviour and bounce for any highmem page
+        */
+       blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+
+       blk_queue_activity_fn(q, NULL, NULL);
+}
+
+EXPORT_SYMBOL(blk_queue_make_request);
+
+static inline void rq_init(request_queue_t *q, struct request *rq)
+{
+       INIT_LIST_HEAD(&rq->queuelist);
+
+       rq->errors = 0;
+       rq->rq_status = RQ_ACTIVE;
+       rq->bio = rq->biotail = NULL;
+       rq->ioprio = 0;
+       rq->buffer = NULL;
+       rq->ref_count = 1;
+       rq->q = q;
+       rq->waiting = NULL;
+       rq->special = NULL;
+       rq->data_len = 0;
+       rq->data = NULL;
+       rq->nr_phys_segments = 0;
+       rq->sense = NULL;
+       rq->end_io = NULL;
+       rq->end_io_data = NULL;
+}
+
+/**
+ * blk_queue_ordered - does this queue support ordered writes
+ * @q:     the request queue
+ * @flag:  see below
+ *
+ * Description:
+ *   For journalled file systems, doing ordered writes on a commit
+ *   block instead of explicitly doing wait_on_buffer (which is bad
+ *   for performance) can be a big win. Block drivers supporting this
+ *   feature should call this function and indicate so.
+ *
+ **/
+void blk_queue_ordered(request_queue_t *q, int flag)
+{
+       switch (flag) {
+               case QUEUE_ORDERED_NONE:
+                       if (q->flush_rq)
+                               kmem_cache_free(request_cachep, q->flush_rq);
+                       q->flush_rq = NULL;
+                       q->ordered = flag;
+                       break;
+               case QUEUE_ORDERED_TAG:
+                       q->ordered = flag;
+                       break;
+               case QUEUE_ORDERED_FLUSH:
+                       q->ordered = flag;
+                       if (!q->flush_rq)
+                               q->flush_rq = kmem_cache_alloc(request_cachep,
+                                                               GFP_KERNEL);
+                       break;
+               default:
+                       printk("blk_queue_ordered: bad value %d\n", flag);
+                       break;
+       }
+}
+
+EXPORT_SYMBOL(blk_queue_ordered);
+
+/**
+ * blk_queue_issue_flush_fn - set function for issuing a flush
+ * @q:     the request queue
+ * @iff:   the function to be called issuing the flush
+ *
+ * Description:
+ *   If a driver supports issuing a flush command, the support is notified
+ *   to the block layer by defining it through this call.
+ *
+ **/
+void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
+{
+       q->issue_flush_fn = iff;
+}
+
+EXPORT_SYMBOL(blk_queue_issue_flush_fn);
+
+/*
+ * Cache flushing for ordered writes handling
+ */
+static void blk_pre_flush_end_io(struct request *flush_rq)
+{
+       struct request *rq = flush_rq->end_io_data;
+       request_queue_t *q = rq->q;
+
+       elv_completed_request(q, flush_rq);
+
+       rq->flags |= REQ_BAR_PREFLUSH;
+
+       if (!flush_rq->errors)
+               elv_requeue_request(q, rq);
+       else {
+               q->end_flush_fn(q, flush_rq);
+               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+               q->request_fn(q);
+       }
+}
+
+static void blk_post_flush_end_io(struct request *flush_rq)
+{
+       struct request *rq = flush_rq->end_io_data;
+       request_queue_t *q = rq->q;
+
+       elv_completed_request(q, flush_rq);
+
+       rq->flags |= REQ_BAR_POSTFLUSH;
+
+       q->end_flush_fn(q, flush_rq);
+       clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+       q->request_fn(q);
+}
+
+struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
+{
+       struct request *flush_rq = q->flush_rq;
+
+       BUG_ON(!blk_barrier_rq(rq));
+
+       if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
+               return NULL;
+
+       rq_init(q, flush_rq);
+       flush_rq->elevator_private = NULL;
+       flush_rq->flags = REQ_BAR_FLUSH;
+       flush_rq->rq_disk = rq->rq_disk;
+       flush_rq->rl = NULL;
+
+       /*
+        * prepare_flush returns 0 if no flush is needed, just mark both
+        * pre and post flush as done in that case
+        */
+       if (!q->prepare_flush_fn(q, flush_rq)) {
+               rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
+               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+               return rq;
+       }
+
+       /*
+        * some drivers dequeue requests right away, some only after io
+        * completion. make sure the request is dequeued.
+        */
+       if (!list_empty(&rq->queuelist))
+               blkdev_dequeue_request(rq);
+
+       flush_rq->end_io_data = rq;
+       flush_rq->end_io = blk_pre_flush_end_io;
+
+       __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
+       return flush_rq;
+}
+
+static void blk_start_post_flush(request_queue_t *q, struct request *rq)
+{
+       struct request *flush_rq = q->flush_rq;
+
+       BUG_ON(!blk_barrier_rq(rq));
+
+       rq_init(q, flush_rq);
+       flush_rq->elevator_private = NULL;
+       flush_rq->flags = REQ_BAR_FLUSH;
+       flush_rq->rq_disk = rq->rq_disk;
+       flush_rq->rl = NULL;
+
+       if (q->prepare_flush_fn(q, flush_rq)) {
+               flush_rq->end_io_data = rq;
+               flush_rq->end_io = blk_post_flush_end_io;
+
+               __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
+               q->request_fn(q);
+       }
+}
+
+static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
+                                       int sectors)
+{
+       if (sectors > rq->nr_sectors)
+               sectors = rq->nr_sectors;
+
+       rq->nr_sectors -= sectors;
+       return rq->nr_sectors;
+}
+
+static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
+                                    int sectors, int queue_locked)
+{
+       if (q->ordered != QUEUE_ORDERED_FLUSH)
+               return 0;
+       if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
+               return 0;
+       if (blk_barrier_postflush(rq))
+               return 0;
+
+       if (!blk_check_end_barrier(q, rq, sectors)) {
+               unsigned long flags = 0;
+
+               if (!queue_locked)
+                       spin_lock_irqsave(q->queue_lock, flags);
+
+               blk_start_post_flush(q, rq);
+
+               if (!queue_locked)
+                       spin_unlock_irqrestore(q->queue_lock, flags);
+       }
+
+       return 1;
+}
+
+/**
+ * blk_complete_barrier_rq - complete possible barrier request
+ * @q:  the request queue for the device
+ * @rq:  the request
+ * @sectors:  number of sectors to complete
+ *
+ * Description:
+ *   Used in driver end_io handling to determine whether to postpone
+ *   completion of a barrier request until a post flush has been done. This
+ *   is the unlocked variant, used if the caller doesn't already hold the
+ *   queue lock.
+ **/
+int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
+{
+       return __blk_complete_barrier_rq(q, rq, sectors, 0);
+}
+EXPORT_SYMBOL(blk_complete_barrier_rq);
+
+/**
+ * blk_complete_barrier_rq_locked - complete possible barrier request
+ * @q:  the request queue for the device
+ * @rq:  the request
+ * @sectors:  number of sectors to complete
+ *
+ * Description:
+ *   See blk_complete_barrier_rq(). This variant must be used if the caller
+ *   holds the queue lock.
+ **/
+int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
+                                  int sectors)
+{
+       return __blk_complete_barrier_rq(q, rq, sectors, 1);
+}
+EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
+
+/**
+ * blk_queue_bounce_limit - set bounce buffer limit for queue
+ * @q:  the request queue for the device
+ * @dma_addr:   bus address limit
+ *
+ * Description:
+ *    Different hardware can have different requirements as to what pages
+ *    it can do I/O directly to. A low level driver can call
+ *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
+ *    buffers for doing I/O to pages residing above @page. By default
+ *    the block layer sets this to the highest numbered "low" memory page.
+ **/
+void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
+{
+       unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
+
+       /*
+        * set appropriate bounce gfp mask -- unfortunately we don't have a
+        * full 4GB zone, so we have to resort to low memory for any bounces.
+        * ISA has its own < 16MB zone.
+        */
+       if (bounce_pfn < blk_max_low_pfn) {
+               BUG_ON(dma_addr < BLK_BOUNCE_ISA);
+               init_emergency_isa_pool();
+               q->bounce_gfp = GFP_NOIO | GFP_DMA;
+       } else
+               q->bounce_gfp = GFP_NOIO;
+
+       q->bounce_pfn = bounce_pfn;
+}
+
+EXPORT_SYMBOL(blk_queue_bounce_limit);
+
+/**
+ * blk_queue_max_sectors - set max sectors for a request for this queue
+ * @q:  the request queue for the device
+ * @max_sectors:  max sectors in the usual 512b unit
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the size of
+ *    received requests.
+ **/
+void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
+{
+       if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
+               max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+               printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
+       }
+
+       q->max_sectors = q->max_hw_sectors = max_sectors;
+}
+
+EXPORT_SYMBOL(blk_queue_max_sectors);
+
+/**
+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
+ * @q:  the request queue for the device
+ * @max_segments:  max number of segments
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the number of
+ *    physical data segments in a request.  This would be the largest sized
+ *    scatter list the driver could handle.
+ **/
+void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
+{
+       if (!max_segments) {
+               max_segments = 1;
+               printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+       }
+
+       q->max_phys_segments = max_segments;
+}
+
+EXPORT_SYMBOL(blk_queue_max_phys_segments);
+
+/**
+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+ * @q:  the request queue for the device
+ * @max_segments:  max number of segments
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the number of
+ *    hw data segments in a request.  This would be the largest number of
+ *    address/length pairs the host adapter can actually give as once
+ *    to the device.
+ **/
+void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+{
+       if (!max_segments) {
+               max_segments = 1;
+               printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+       }
+
+       q->max_hw_segments = max_segments;
+}
+
+EXPORT_SYMBOL(blk_queue_max_hw_segments);
+
+/**
+ * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
+ * @q:  the request queue for the device
+ * @max_size:  max size of segment in bytes
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the size of a
+ *    coalesced segment
+ **/
+void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
+{
+       if (max_size < PAGE_CACHE_SIZE) {
+               max_size = PAGE_CACHE_SIZE;
+               printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
+       }
+
+       q->max_segment_size = max_size;
+}
+
+EXPORT_SYMBOL(blk_queue_max_segment_size);
+
+/**
+ * blk_queue_hardsect_size - set hardware sector size for the queue
+ * @q:  the request queue for the device
+ * @size:  the hardware sector size, in bytes
+ *
+ * Description:
+ *   This should typically be set to the lowest possible sector size
+ *   that the hardware can operate on (possible without reverting to
+ *   even internal read-modify-write operations). Usually the default
+ *   of 512 covers most hardware.
+ **/
+void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
+{
+       q->hardsect_size = size;
+}
+
+EXPORT_SYMBOL(blk_queue_hardsect_size);
+
+/*
+ * Returns the minimum that is _not_ zero, unless both are zero.
+ */
+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+
+/**
+ * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
+ * @t: the stacking driver (top)
+ * @b:  the underlying device (bottom)
+ **/
+void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
+{
+       /* zero is "infinity" */
+       t->max_sectors = t->max_hw_sectors =
+               min_not_zero(t->max_sectors,b->max_sectors);
+
+       t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
+       t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
+       t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
+       t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+}
+
+EXPORT_SYMBOL(blk_queue_stack_limits);
+
+/**
+ * blk_queue_segment_boundary - set boundary rules for segment merging
+ * @q:  the request queue for the device
+ * @mask:  the memory boundary mask
+ **/
+void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
+{
+       if (mask < PAGE_CACHE_SIZE - 1) {
+               mask = PAGE_CACHE_SIZE - 1;
+               printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
+       }
+
+       q->seg_boundary_mask = mask;
+}
+
+EXPORT_SYMBOL(blk_queue_segment_boundary);
+
+/**
+ * blk_queue_dma_alignment - set dma length and memory alignment
+ * @q:     the request queue for the device
+ * @mask:  alignment mask
+ *
+ * description:
+ *    set required memory and length aligment for direct dma transactions.
+ *    this is used when buiding direct io requests for the queue.
+ *
+ **/
+void blk_queue_dma_alignment(request_queue_t *q, int mask)
+{
+       q->dma_alignment = mask;
+}
+
+EXPORT_SYMBOL(blk_queue_dma_alignment);
+
+/**
+ * blk_queue_find_tag - find a request by its tag and queue
+ *
+ * @q:  The request queue for the device
+ * @tag: The tag of the request
+ *
+ * Notes:
+ *    Should be used when a device returns a tag and you want to match
+ *    it with a request.
+ *
+ *    no locks need be held.
+ **/
+struct request *blk_queue_find_tag(request_queue_t *q, int tag)
+{
+       struct blk_queue_tag *bqt = q->queue_tags;
+
+       if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
+               return NULL;
+
+       return bqt->tag_index[tag];
+}
+
+EXPORT_SYMBOL(blk_queue_find_tag);
+
+/**
+ * __blk_queue_free_tags - release tag maintenance info
+ * @q:  the request queue for the device
+ *
+ *  Notes:
+ *    blk_cleanup_queue() will take care of calling this function, if tagging
+ *    has been used. So there's no need to call this directly.
+ **/
+static void __blk_queue_free_tags(request_queue_t *q)
+{
+       struct blk_queue_tag *bqt = q->queue_tags;
+
+       if (!bqt)
+               return;
+
+       if (atomic_dec_and_test(&bqt->refcnt)) {
+               BUG_ON(bqt->busy);
+               BUG_ON(!list_empty(&bqt->busy_list));
+
+               kfree(bqt->tag_index);
+               bqt->tag_index = NULL;
+
+               kfree(bqt->tag_map);
+               bqt->tag_map = NULL;
+
+               kfree(bqt);
+       }
+
+       q->queue_tags = NULL;
+       q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+}
+
+/**
+ * blk_queue_free_tags - release tag maintenance info
+ * @q:  the request queue for the device
+ *
+ *  Notes:
+ *     This is used to disabled tagged queuing to a device, yet leave
+ *     queue in function.
+ **/
+void blk_queue_free_tags(request_queue_t *q)
+{
+       clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+}
+
+EXPORT_SYMBOL(blk_queue_free_tags);
+
+static int
+init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
+{
+       struct request **tag_index;
+       unsigned long *tag_map;
+       int nr_ulongs;
+
+       if (depth > q->nr_requests * 2) {
+               depth = q->nr_requests * 2;
+               printk(KERN_ERR "%s: adjusted depth to %d\n",
+                               __FUNCTION__, depth);
+       }
+
+       tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+       if (!tag_index)
+               goto fail;
+
+       nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+       tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+       if (!tag_map)
+               goto fail;
+
+       memset(tag_index, 0, depth * sizeof(struct request *));
+       memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
+       tags->real_max_depth = depth;
+       tags->max_depth = depth;
+       tags->tag_index = tag_index;
+       tags->tag_map = tag_map;
+
+       return 0;
+fail:
+       kfree(tag_index);
+       return -ENOMEM;
+}
+
+/**
+ * blk_queue_init_tags - initialize the queue tag info
+ * @q:  the request queue for the device
+ * @depth:  the maximum queue depth supported
+ * @tags: the tag to use
+ **/
+int blk_queue_init_tags(request_queue_t *q, int depth,
+                       struct blk_queue_tag *tags)
+{
+       int rc;
+
+       BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+
+       if (!tags && !q->queue_tags) {
+               tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+               if (!tags)
+                       goto fail;
+
+               if (init_tag_map(q, tags, depth))
+                       goto fail;
+
+               INIT_LIST_HEAD(&tags->busy_list);
+               tags->busy = 0;
+               atomic_set(&tags->refcnt, 1);
+       } else if (q->queue_tags) {
+               if ((rc = blk_queue_resize_tags(q, depth)))
+                       return rc;
+               set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+               return 0;
+       } else
+               atomic_inc(&tags->refcnt);
+
+       /*
+        * assign it, all done
+        */
+       q->queue_tags = tags;
+       q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+       return 0;
+fail:
+       kfree(tags);
+       return -ENOMEM;
+}
+
+EXPORT_SYMBOL(blk_queue_init_tags);
+
+/**
+ * blk_queue_resize_tags - change the queueing depth
+ * @q:  the request queue for the device
+ * @new_depth: the new max command queueing depth
+ *
+ *  Notes:
+ *    Must be called with the queue lock held.
+ **/
+int blk_queue_resize_tags(request_queue_t *q, int new_depth)
+{
+       struct blk_queue_tag *bqt = q->queue_tags;
+       struct request **tag_index;
+       unsigned long *tag_map;
+       int max_depth, nr_ulongs;
+
+       if (!bqt)
+               return -ENXIO;
+
+       /*
+        * if we already have large enough real_max_depth.  just
+        * adjust max_depth.  *NOTE* as requests with tag value
+        * between new_depth and real_max_depth can be in-flight, tag
+        * map can not be shrunk blindly here.
+        */
+       if (new_depth <= bqt->real_max_depth) {
+               bqt->max_depth = new_depth;
+               return 0;
+       }
+
+       /*
+        * save the old state info, so we can copy it back
+        */
+       tag_index = bqt->tag_index;
+       tag_map = bqt->tag_map;
+       max_depth = bqt->real_max_depth;
+
+       if (init_tag_map(q, bqt, new_depth))
+               return -ENOMEM;
+
+       memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
+       nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
+       memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
+
+       kfree(tag_index);
+       kfree(tag_map);
+       return 0;
+}
+
+EXPORT_SYMBOL(blk_queue_resize_tags);
+
+/**
+ * blk_queue_end_tag - end tag operations for a request
+ * @q:  the request queue for the device
+ * @rq: the request that has completed
+ *
+ *  Description:
+ *    Typically called when end_that_request_first() returns 0, meaning
+ *    all transfers have been done for a request. It's important to call
+ *    this function before end_that_request_last(), as that will put the
+ *    request back on the free list thus corrupting the internal tag list.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+void blk_queue_end_tag(request_queue_t *q, struct request *rq)
+{
+       struct blk_queue_tag *bqt = q->queue_tags;
+       int tag = rq->tag;
+
+       BUG_ON(tag == -1);
+
+       if (unlikely(tag >= bqt->real_max_depth))
+               /*
+                * This can happen after tag depth has been reduced.
+                * FIXME: how about a warning or info message here?
+                */
+               return;
+
+       if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
+               printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+                      __FUNCTION__, tag);
+               return;
+       }
+
+       list_del_init(&rq->queuelist);
+       rq->flags &= ~REQ_QUEUED;
+       rq->tag = -1;
+
+       if (unlikely(bqt->tag_index[tag] == NULL))
+               printk(KERN_ERR "%s: tag %d is missing\n",
+                      __FUNCTION__, tag);
+
+       bqt->tag_index[tag] = NULL;
+       bqt->busy--;
+}
+
+EXPORT_SYMBOL(blk_queue_end_tag);
+
+/**
+ * blk_queue_start_tag - find a free tag and assign it
+ * @q:  the request queue for the device
+ * @rq:  the block request that needs tagging
+ *
+ *  Description:
+ *    This can either be used as a stand-alone helper, or possibly be
+ *    assigned as the queue &prep_rq_fn (in which case &struct request
+ *    automagically gets a tag assigned). Note that this function
+ *    assumes that any type of request can be queued! if this is not
+ *    true for your device, you must check the request type before
+ *    calling this function.  The request will also be removed from
+ *    the request queue, so it's the drivers responsibility to readd
+ *    it if it should need to be restarted for some reason.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+int blk_queue_start_tag(request_queue_t *q, struct request *rq)
+{
+       struct blk_queue_tag *bqt = q->queue_tags;
+       int tag;
+
+       if (unlikely((rq->flags & REQ_QUEUED))) {
+               printk(KERN_ERR 
+                      "%s: request %p for device [%s] already tagged %d",
+                      __FUNCTION__, rq,
+                      rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+               BUG();
+       }
+
+       tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
+       if (tag >= bqt->max_depth)
+               return 1;
+
+       __set_bit(tag, bqt->tag_map);
+
+       rq->flags |= REQ_QUEUED;
+       rq->tag = tag;
+       bqt->tag_index[tag] = rq;
+       blkdev_dequeue_request(rq);
+       list_add(&rq->queuelist, &bqt->busy_list);
+       bqt->busy++;
+       return 0;
+}
+
+EXPORT_SYMBOL(blk_queue_start_tag);
+
+/**
+ * blk_queue_invalidate_tags - invalidate all pending tags
+ * @q:  the request queue for the device
+ *
+ *  Description:
+ *   Hardware conditions may dictate a need to stop all pending requests.
+ *   In this case, we will safely clear the block side of the tag queue and
+ *   readd all requests to the request queue in the right order.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+void blk_queue_invalidate_tags(request_queue_t *q)
+{
+       struct blk_queue_tag *bqt = q->queue_tags;
+       struct list_head *tmp, *n;
+       struct request *rq;
+
+       list_for_each_safe(tmp, n, &bqt->busy_list) {
+               rq = list_entry_rq(tmp);
+
+               if (rq->tag == -1) {
+                       printk(KERN_ERR
+                              "%s: bad tag found on list\n", __FUNCTION__);
+                       list_del_init(&rq->queuelist);
+                       rq->flags &= ~REQ_QUEUED;
+               } else
+                       blk_queue_end_tag(q, rq);
+
+               rq->flags &= ~REQ_STARTED;
+               __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+       }
+}
+
+EXPORT_SYMBOL(blk_queue_invalidate_tags);
+
+static char *rq_flags[] = {
+       "REQ_RW",
+       "REQ_FAILFAST",
+       "REQ_SORTED",
+       "REQ_SOFTBARRIER",
+       "REQ_HARDBARRIER",
+       "REQ_CMD",
+       "REQ_NOMERGE",
+       "REQ_STARTED",
+       "REQ_DONTPREP",
+       "REQ_QUEUED",
+       "REQ_ELVPRIV",
+       "REQ_PC",
+       "REQ_BLOCK_PC",
+       "REQ_SENSE",
+       "REQ_FAILED",
+       "REQ_QUIET",
+       "REQ_SPECIAL",
+       "REQ_DRIVE_CMD",
+       "REQ_DRIVE_TASK",
+       "REQ_DRIVE_TASKFILE",
+       "REQ_PREEMPT",
+       "REQ_PM_SUSPEND",
+       "REQ_PM_RESUME",
+       "REQ_PM_SHUTDOWN",
+};
+
+void blk_dump_rq_flags(struct request *rq, char *msg)
+{
+       int bit;
+
+       printk("%s: dev %s: flags = ", msg,
+               rq->rq_disk ? rq->rq_disk->disk_name : "?");
+       bit = 0;
+       do {
+               if (rq->flags & (1 << bit))
+                       printk("%s ", rq_flags[bit]);
+               bit++;
+       } while (bit < __REQ_NR_BITS);
+
+       printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
+                                                      rq->nr_sectors,
+                                                      rq->current_nr_sectors);
+       printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+
+       if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
+               printk("cdb: ");
+               for (bit = 0; bit < sizeof(rq->cmd); bit++)
+                       printk("%02x ", rq->cmd[bit]);
+               printk("\n");
+       }
+}
+
+EXPORT_SYMBOL(blk_dump_rq_flags);
+
+void blk_recount_segments(request_queue_t *q, struct bio *bio)
+{
+       struct bio_vec *bv, *bvprv = NULL;
+       int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
+       int high, highprv = 1;
+
+       if (unlikely(!bio->bi_io_vec))
+               return;
+
+       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+       hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
+       bio_for_each_segment(bv, bio, i) {
+               /*
+                * the trick here is making sure that a high page is never
+                * considered part of another segment, since that might
+                * change with the bounce page.
+                */
+               high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
+               if (high || highprv)
+                       goto new_hw_segment;
+               if (cluster) {
+                       if (seg_size + bv->bv_len > q->max_segment_size)
+                               goto new_segment;
+                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+                               goto new_segment;
+                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+                               goto new_segment;
+                       if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
+                               goto new_hw_segment;
+
+                       seg_size += bv->bv_len;
+                       hw_seg_size += bv->bv_len;
+                       bvprv = bv;
+                       continue;
+               }
+new_segment:
+               if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
+                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
+                       hw_seg_size += bv->bv_len;
+               } else {
+new_hw_segment:
+                       if (hw_seg_size > bio->bi_hw_front_size)
+                               bio->bi_hw_front_size = hw_seg_size;
+                       hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
+                       nr_hw_segs++;
+               }
+
+               nr_phys_segs++;
+               bvprv = bv;
+               seg_size = bv->bv_len;
+               highprv = high;
+       }
+       if (hw_seg_size > bio->bi_hw_back_size)
+               bio->bi_hw_back_size = hw_seg_size;
+       if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
+               bio->bi_hw_front_size = hw_seg_size;
+       bio->bi_phys_segments = nr_phys_segs;
+       bio->bi_hw_segments = nr_hw_segs;
+       bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+
+
+static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+                                  struct bio *nxt)
+{
+       if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+               return 0;
+
+       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+               return 0;
+       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+               return 0;
+
+       /*
+        * bio and nxt are contigous in memory, check if the queue allows
+        * these two to be merged into one
+        */
+       if (BIO_SEG_BOUNDARY(q, bio, nxt))
+               return 1;
+
+       return 0;
+}
+
+static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+                                struct bio *nxt)
+{
+       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+               blk_recount_segments(q, bio);
+       if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
+               blk_recount_segments(q, nxt);
+       if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
+           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
+               return 0;
+       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+               return 0;
+
+       return 1;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
+{
+       struct bio_vec *bvec, *bvprv;
+       struct bio *bio;
+       int nsegs, i, cluster;
+
+       nsegs = 0;
+       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+
+       /*
+        * for each bio in rq
+        */
+       bvprv = NULL;
+       rq_for_each_bio(bio, rq) {
+               /*
+                * for each segment in bio
+                */
+               bio_for_each_segment(bvec, bio, i) {
+                       int nbytes = bvec->bv_len;
+
+                       if (bvprv && cluster) {
+                               if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
+                                       goto new_segment;
+
+                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+                                       goto new_segment;
+                               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+                                       goto new_segment;
+
+                               sg[nsegs - 1].length += nbytes;
+                       } else {
+new_segment:
+                               memset(&sg[nsegs],0,sizeof(struct scatterlist));
+                               sg[nsegs].page = bvec->bv_page;
+                               sg[nsegs].length = nbytes;
+                               sg[nsegs].offset = bvec->bv_offset;
+
+                               nsegs++;
+                       }
+                       bvprv = bvec;
+               } /* segments in bio */
+       } /* bios in rq */
+
+       return nsegs;
+}
+
+EXPORT_SYMBOL(blk_rq_map_sg);
+
+/*
+ * the standard queue merge functions, can be overridden with device
+ * specific ones if so desired
+ */
+
+static inline int ll_new_mergeable(request_queue_t *q,
+                                  struct request *req,
+                                  struct bio *bio)
+{
+       int nr_phys_segs = bio_phys_segments(q, bio);
+
+       if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+               req->flags |= REQ_NOMERGE;
+               if (req == q->last_merge)
+                       q->last_merge = NULL;
+               return 0;
+       }
+
+       /*
+        * A hw segment is just getting larger, bump just the phys
+        * counter.
+        */
+       req->nr_phys_segments += nr_phys_segs;
+       return 1;
+}
+
+static inline int ll_new_hw_segment(request_queue_t *q,
+                                   struct request *req,
+                                   struct bio *bio)
+{
+       int nr_hw_segs = bio_hw_segments(q, bio);
+       int nr_phys_segs = bio_phys_segments(q, bio);
+
+       if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+           || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+               req->flags |= REQ_NOMERGE;
+               if (req == q->last_merge)
+                       q->last_merge = NULL;
+               return 0;
+       }
+
+       /*
+        * This will form the start of a new hw segment.  Bump both
+        * counters.
+        */
+       req->nr_hw_segments += nr_hw_segs;
+       req->nr_phys_segments += nr_phys_segs;
+       return 1;
+}
+
+static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
+                           struct bio *bio)
+{
+       int len;
+
+       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+               req->flags |= REQ_NOMERGE;
+               if (req == q->last_merge)
+                       q->last_merge = NULL;
+               return 0;
+       }
+       if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
+               blk_recount_segments(q, req->biotail);
+       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+               blk_recount_segments(q, bio);
+       len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
+       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
+           !BIOVEC_VIRT_OVERSIZE(len)) {
+               int mergeable =  ll_new_mergeable(q, req, bio);
+
+               if (mergeable) {
+                       if (req->nr_hw_segments == 1)
+                               req->bio->bi_hw_front_size = len;
+                       if (bio->bi_hw_segments == 1)
+                               bio->bi_hw_back_size = len;
+               }
+               return mergeable;
+       }
+
+       return ll_new_hw_segment(q, req, bio);
+}
+
+static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
+                            struct bio *bio)
+{
+       int len;
+
+       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+               req->flags |= REQ_NOMERGE;
+               if (req == q->last_merge)
+                       q->last_merge = NULL;
+               return 0;
+       }
+       len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
+       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+               blk_recount_segments(q, bio);
+       if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
+               blk_recount_segments(q, req->bio);
+       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
+           !BIOVEC_VIRT_OVERSIZE(len)) {
+               int mergeable =  ll_new_mergeable(q, req, bio);
+
+               if (mergeable) {
+                       if (bio->bi_hw_segments == 1)
+                               bio->bi_hw_front_size = len;
+                       if (req->nr_hw_segments == 1)
+                               req->biotail->bi_hw_back_size = len;
+               }
+               return mergeable;
+       }
+
+       return ll_new_hw_segment(q, req, bio);
+}
+
+static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+                               struct request *next)
+{
+       int total_phys_segments;
+       int total_hw_segments;
+
+       /*
+        * First check if the either of the requests are re-queued
+        * requests.  Can't merge them if they are.
+        */
+       if (req->special || next->special)
+               return 0;
+
+       /*
+        * Will it become too large?
+        */
+       if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+               return 0;
+
+       total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+       if (blk_phys_contig_segment(q, req->biotail, next->bio))
+               total_phys_segments--;
+
+       if (total_phys_segments > q->max_phys_segments)
+               return 0;
+
+       total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+       if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
+               int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
+               /*
+                * propagate the combined length to the end of the requests
+                */
+               if (req->nr_hw_segments == 1)
+                       req->bio->bi_hw_front_size = len;
+               if (next->nr_hw_segments == 1)
+                       next->biotail->bi_hw_back_size = len;
+               total_hw_segments--;
+       }
+
+       if (total_hw_segments > q->max_hw_segments)
+               return 0;
+
+       /* Merge is OK... */
+       req->nr_phys_segments = total_phys_segments;
+       req->nr_hw_segments = total_hw_segments;
+       return 1;
+}
+
+/*
+ * "plug" the device if there are no outstanding requests: this will
+ * force the transfer to start only after we have put all the requests
+ * on the list.
+ *
+ * This is called with interrupts off and no requests on the queue and
+ * with the queue lock held.
+ */
+void blk_plug_device(request_queue_t *q)
+{
+       WARN_ON(!irqs_disabled());
+
+       /*
+        * don't plug a stopped queue, it must be paired with blk_start_queue()
+        * which will restart the queueing
+        */
+       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+               return;
+
+       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+               mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+}
+
+EXPORT_SYMBOL(blk_plug_device);
+
+/*
+ * remove the queue from the plugged list, if present. called with
+ * queue lock held and interrupts disabled.
+ */
+int blk_remove_plug(request_queue_t *q)
+{
+       WARN_ON(!irqs_disabled());
+
+       if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+               return 0;
+
+       del_timer(&q->unplug_timer);
+       return 1;
+}
+
+EXPORT_SYMBOL(blk_remove_plug);
+
+/*
+ * remove the plug and let it rip..
+ */
+void __generic_unplug_device(request_queue_t *q)
+{
+       if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
+               return;
+
+       if (!blk_remove_plug(q))
+               return;
+
+       q->request_fn(q);
+}
+EXPORT_SYMBOL(__generic_unplug_device);
+
+/**
+ * generic_unplug_device - fire a request queue
+ * @q:    The &request_queue_t in question
+ *
+ * Description:
+ *   Linux uses plugging to build bigger requests queues before letting
+ *   the device have at them. If a queue is plugged, the I/O scheduler
+ *   is still adding and merging requests on the queue. Once the queue
+ *   gets unplugged, the request_fn defined for the queue is invoked and
+ *   transfers started.
+ **/
+void generic_unplug_device(request_queue_t *q)
+{
+       spin_lock_irq(q->queue_lock);
+       __generic_unplug_device(q);
+       spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(generic_unplug_device);
+
+static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
+                                  struct page *page)
+{
+       request_queue_t *q = bdi->unplug_io_data;
+
+       /*
+        * devices don't necessarily have an ->unplug_fn defined
+        */
+       if (q->unplug_fn)
+               q->unplug_fn(q);
+}
+
+static void blk_unplug_work(void *data)
+{
+       request_queue_t *q = data;
+
+       q->unplug_fn(q);
+}
+
+static void blk_unplug_timeout(unsigned long data)
+{
+       request_queue_t *q = (request_queue_t *)data;
+
+       kblockd_schedule_work(&q->unplug_work);
+}
+
+/**
+ * blk_start_queue - restart a previously stopped queue
+ * @q:    The &request_queue_t in question
+ *
+ * Description:
+ *   blk_start_queue() will clear the stop flag on the queue, and call
+ *   the request_fn for the queue if it was in a stopped state when
+ *   entered. Also see blk_stop_queue(). Queue lock must be held.
+ **/
+void blk_start_queue(request_queue_t *q)
+{
+       clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+
+       /*
+        * one level of recursion is ok and is much faster than kicking
+        * the unplug handling
+        */
+       if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+               q->request_fn(q);
+               clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+       } else {
+               blk_plug_device(q);
+               kblockd_schedule_work(&q->unplug_work);
+       }
+}
+
+EXPORT_SYMBOL(blk_start_queue);
+
+/**
+ * blk_stop_queue - stop a queue
+ * @q:    The &request_queue_t in question
+ *
+ * Description:
+ *   The Linux block layer assumes that a block driver will consume all
+ *   entries on the request queue when the request_fn strategy is called.
+ *   Often this will not happen, because of hardware limitations (queue
+ *   depth settings). If a device driver gets a 'queue full' response,
+ *   or if it simply chooses not to queue more I/O at one point, it can
+ *   call this function to prevent the request_fn from being called until
+ *   the driver has signalled it's ready to go again. This happens by calling
+ *   blk_start_queue() to restart queue operations. Queue lock must be held.
+ **/
+void blk_stop_queue(request_queue_t *q)
+{
+       blk_remove_plug(q);
+       set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+}
+EXPORT_SYMBOL(blk_stop_queue);
+
+/**
+ * blk_sync_queue - cancel any pending callbacks on a queue
+ * @q: the queue
+ *
+ * Description:
+ *     The block layer may perform asynchronous callback activity
+ *     on a queue, such as calling the unplug function after a timeout.
+ *     A block device may call blk_sync_queue to ensure that any
+ *     such activity is cancelled, thus allowing it to release resources
+ *     the the callbacks might use. The caller must already have made sure
+ *     that its ->make_request_fn will not re-add plugging prior to calling
+ *     this function.
+ *
+ */
+void blk_sync_queue(struct request_queue *q)
+{
+       del_timer_sync(&q->unplug_timer);
+       kblockd_flush();
+}
+EXPORT_SYMBOL(blk_sync_queue);
+
+/**
+ * blk_run_queue - run a single device queue
+ * @q: The queue to run
+ */
+void blk_run_queue(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       blk_remove_plug(q);
+       if (!elv_queue_empty(q))
+               q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_run_queue);
+
+/**
+ * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
+ * @q:    the request queue to be released
+ *
+ * Description:
+ *     blk_cleanup_queue is the pair to blk_init_queue() or
+ *     blk_queue_make_request().  It should be called when a request queue is
+ *     being released; typically when a block device is being de-registered.
+ *     Currently, its primary task it to free all the &struct request
+ *     structures that were allocated to the queue and the queue itself.
+ *
+ * Caveat:
+ *     Hopefully the low level driver will have finished any
+ *     outstanding requests first...
+ **/
+void blk_cleanup_queue(request_queue_t * q)
+{
+       struct request_list *rl = &q->rq;
+
+       if (!atomic_dec_and_test(&q->refcnt))
+               return;
+
+       if (q->elevator)
+               elevator_exit(q->elevator);
+
+       blk_sync_queue(q);
+
+       if (rl->rq_pool)
+               mempool_destroy(rl->rq_pool);
+
+       if (q->queue_tags)
+               __blk_queue_free_tags(q);
+
+       blk_queue_ordered(q, QUEUE_ORDERED_NONE);
+
+       kmem_cache_free(requestq_cachep, q);
+}
+
+EXPORT_SYMBOL(blk_cleanup_queue);
+
+static int blk_init_free_list(request_queue_t *q)
+{
+       struct request_list *rl = &q->rq;
+
+       rl->count[READ] = rl->count[WRITE] = 0;
+       rl->starved[READ] = rl->starved[WRITE] = 0;
+       rl->elvpriv = 0;
+       init_waitqueue_head(&rl->wait[READ]);
+       init_waitqueue_head(&rl->wait[WRITE]);
+
+       rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+                               mempool_free_slab, request_cachep, q->node);
+
+       if (!rl->rq_pool)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int __make_request(request_queue_t *, struct bio *);
+
+request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
+{
+       return blk_alloc_queue_node(gfp_mask, -1);
+}
+EXPORT_SYMBOL(blk_alloc_queue);
+
+request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+{
+       request_queue_t *q;
+
+       q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
+       if (!q)
+               return NULL;
+
+       memset(q, 0, sizeof(*q));
+       init_timer(&q->unplug_timer);
+       atomic_set(&q->refcnt, 1);
+
+       q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+       q->backing_dev_info.unplug_io_data = q;
+
+       return q;
+}
+EXPORT_SYMBOL(blk_alloc_queue_node);
+
+/**
+ * blk_init_queue  - prepare a request queue for use with a block device
+ * @rfn:  The function to be called to process requests that have been
+ *        placed on the queue.
+ * @lock: Request queue spin lock
+ *
+ * Description:
+ *    If a block device wishes to use the standard request handling procedures,
+ *    which sorts requests and coalesces adjacent requests, then it must
+ *    call blk_init_queue().  The function @rfn will be called when there
+ *    are requests on the queue that need to be processed.  If the device
+ *    supports plugging, then @rfn may not be called immediately when requests
+ *    are available on the queue, but may be called at some time later instead.
+ *    Plugged queues are generally unplugged when a buffer belonging to one
+ *    of the requests on the queue is needed, or due to memory pressure.
+ *
+ *    @rfn is not required, or even expected, to remove all requests off the
+ *    queue, but only as many as it can handle at a time.  If it does leave
+ *    requests on the queue, it is responsible for arranging that the requests
+ *    get dealt with eventually.
+ *
+ *    The queue spin lock must be held while manipulating the requests on the
+ *    request queue.
+ *
+ *    Function returns a pointer to the initialized request queue, or NULL if
+ *    it didn't succeed.
+ *
+ * Note:
+ *    blk_init_queue() must be paired with a blk_cleanup_queue() call
+ *    when the block device is deactivated (such as at module unload).
+ **/
+
+request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+{
+       return blk_init_queue_node(rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_queue);
+
+request_queue_t *
+blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+{
+       request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+
+       if (!q)
+               return NULL;
+
+       q->node = node_id;
+       if (blk_init_free_list(q))
+               goto out_init;
+
+       /*
+        * if caller didn't supply a lock, they get per-queue locking with
+        * our embedded lock
+        */
+       if (!lock) {
+               spin_lock_init(&q->__queue_lock);
+               lock = &q->__queue_lock;
+       }
+
+       q->request_fn           = rfn;
+       q->back_merge_fn        = ll_back_merge_fn;
+       q->front_merge_fn       = ll_front_merge_fn;
+       q->merge_requests_fn    = ll_merge_requests_fn;
+       q->prep_rq_fn           = NULL;
+       q->unplug_fn            = generic_unplug_device;
+       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
+       q->queue_lock           = lock;
+
+       blk_queue_segment_boundary(q, 0xffffffff);
+
+       blk_queue_make_request(q, __make_request);
+       blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+
+       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+
+       /*
+        * all done
+        */
+       if (!elevator_init(q, NULL)) {
+               blk_queue_congestion_threshold(q);
+               return q;
+       }
+
+       blk_cleanup_queue(q);
+out_init:
+       kmem_cache_free(requestq_cachep, q);
+       return NULL;
+}
+EXPORT_SYMBOL(blk_init_queue_node);
+
+int blk_get_queue(request_queue_t *q)
+{
+       if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+               atomic_inc(&q->refcnt);
+               return 0;
+       }
+
+       return 1;
+}
+
+EXPORT_SYMBOL(blk_get_queue);
+
+static inline void blk_free_request(request_queue_t *q, struct request *rq)
+{
+       if (rq->flags & REQ_ELVPRIV)
+               elv_put_request(q, rq);
+       mempool_free(rq, q->rq.rq_pool);
+}
+
+static inline struct request *
+blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
+                 int priv, gfp_t gfp_mask)
+{
+       struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+
+       if (!rq)
+               return NULL;
+
+       /*
+        * first three bits are identical in rq->flags and bio->bi_rw,
+        * see bio.h and blkdev.h
+        */
+       rq->flags = rw;
+
+       if (priv) {
+               if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
+                       mempool_free(rq, q->rq.rq_pool);
+                       return NULL;
+               }
+               rq->flags |= REQ_ELVPRIV;
+       }
+
+       return rq;
+}
+
+/*
+ * ioc_batching returns true if the ioc is a valid batching request and
+ * should be given priority access to a request.
+ */
+static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
+{
+       if (!ioc)
+               return 0;
+
+       /*
+        * Make sure the process is able to allocate at least 1 request
+        * even if the batch times out, otherwise we could theoretically
+        * lose wakeups.
+        */
+       return ioc->nr_batch_requests == q->nr_batching ||
+               (ioc->nr_batch_requests > 0
+               && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
+}
+
+/*
+ * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
+ * will cause the process to be a "batcher" on all queues in the system. This
+ * is the behaviour we want though - once it gets a wakeup it should be given
+ * a nice run.
+ */
+static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+{
+       if (!ioc || ioc_batching(q, ioc))
+               return;
+
+       ioc->nr_batch_requests = q->nr_batching;
+       ioc->last_waited = jiffies;
+}
+
+static void __freed_request(request_queue_t *q, int rw)
+{
+       struct request_list *rl = &q->rq;
+
+       if (rl->count[rw] < queue_congestion_off_threshold(q))
+               clear_queue_congested(q, rw);
+
+       if (rl->count[rw] + 1 <= q->nr_requests) {
+               if (waitqueue_active(&rl->wait[rw]))
+                       wake_up(&rl->wait[rw]);
+
+               blk_clear_queue_full(q, rw);
+       }
+}
+
+/*
+ * A request has just been released.  Account for it, update the full and
+ * congestion status, wake up any waiters.   Called under q->queue_lock.
+ */
+static void freed_request(request_queue_t *q, int rw, int priv)
+{
+       struct request_list *rl = &q->rq;
+
+       rl->count[rw]--;
+       if (priv)
+               rl->elvpriv--;
+
+       __freed_request(q, rw);
+
+       if (unlikely(rl->starved[rw ^ 1]))
+               __freed_request(q, rw ^ 1);
+}
+
+#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
+/*
+ * Get a free request, queue_lock must be held.
+ * Returns NULL on failure, with queue_lock held.
+ * Returns !NULL on success, with queue_lock *not held*.
+ */
+static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
+                                  gfp_t gfp_mask)
+{
+       struct request *rq = NULL;
+       struct request_list *rl = &q->rq;
+       struct io_context *ioc = current_io_context(GFP_ATOMIC);
+       int priv;
+
+       if (rl->count[rw]+1 >= q->nr_requests) {
+               /*
+                * The queue will fill after this allocation, so set it as
+                * full, and mark this process as "batching". This process
+                * will be allowed to complete a batch of requests, others
+                * will be blocked.
+                */
+               if (!blk_queue_full(q, rw)) {
+                       ioc_set_batching(q, ioc);
+                       blk_set_queue_full(q, rw);
+               }
+       }
+
+       switch (elv_may_queue(q, rw, bio)) {
+               case ELV_MQUEUE_NO:
+                       goto rq_starved;
+               case ELV_MQUEUE_MAY:
+                       break;
+               case ELV_MQUEUE_MUST:
+                       goto get_rq;
+       }
+
+       if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
+               /*
+                * The queue is full and the allocating process is not a
+                * "batcher", and not exempted by the IO scheduler
+                */
+               goto out;
+       }
+
+get_rq:
+       /*
+        * Only allow batching queuers to allocate up to 50% over the defined
+        * limit of requests, otherwise we could have thousands of requests
+        * allocated with any setting of ->nr_requests
+        */
+       if (rl->count[rw] >= (3 * q->nr_requests / 2))
+               goto out;
+
+       rl->count[rw]++;
+       rl->starved[rw] = 0;
+       if (rl->count[rw] >= queue_congestion_on_threshold(q))
+               set_queue_congested(q, rw);
+
+       priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+       if (priv)
+               rl->elvpriv++;
+
+       spin_unlock_irq(q->queue_lock);
+
+       rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
+       if (!rq) {
+               /*
+                * Allocation failed presumably due to memory. Undo anything
+                * we might have messed up.
+                *
+                * Allocating task should really be put onto the front of the
+                * wait queue, but this is pretty rare.
+                */
+               spin_lock_irq(q->queue_lock);
+               freed_request(q, rw, priv);
+
+               /*
+                * in the very unlikely event that allocation failed and no
+                * requests for this direction was pending, mark us starved
+                * so that freeing of a request in the other direction will
+                * notice us. another possible fix would be to split the
+                * rq mempool into READ and WRITE
+                */
+rq_starved:
+               if (unlikely(rl->count[rw] == 0))
+                       rl->starved[rw] = 1;
+
+               goto out;
+       }
+
+       if (ioc_batching(q, ioc))
+               ioc->nr_batch_requests--;
+       
+       rq_init(q, rq);
+       rq->rl = rl;
+out:
+       return rq;
+}
+
+/*
+ * No available requests for this queue, unplug the device and wait for some
+ * requests to become available.
+ *
+ * Called with q->queue_lock held, and returns with it unlocked.
+ */
+static struct request *get_request_wait(request_queue_t *q, int rw,
+                                       struct bio *bio)
+{
+       struct request *rq;
+
+       rq = get_request(q, rw, bio, GFP_NOIO);
+       while (!rq) {
+               DEFINE_WAIT(wait);
+               struct request_list *rl = &q->rq;
+
+               prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+                               TASK_UNINTERRUPTIBLE);
+
+               rq = get_request(q, rw, bio, GFP_NOIO);
+
+               if (!rq) {
+                       struct io_context *ioc;
+
+                       __generic_unplug_device(q);
+                       spin_unlock_irq(q->queue_lock);
+                       io_schedule();
+
+                       /*
+                        * After sleeping, we become a "batching" process and
+                        * will be able to allocate at least one request, and
+                        * up to a big batch of them for a small period time.
+                        * See ioc_batching, ioc_set_batching
+                        */
+                       ioc = current_io_context(GFP_NOIO);
+                       ioc_set_batching(q, ioc);
+
+                       spin_lock_irq(q->queue_lock);
+               }
+               finish_wait(&rl->wait[rw], &wait);
+       }
+
+       return rq;
+}
+
+struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
+{
+       struct request *rq;
+
+       BUG_ON(rw != READ && rw != WRITE);
+
+       spin_lock_irq(q->queue_lock);
+       if (gfp_mask & __GFP_WAIT) {
+               rq = get_request_wait(q, rw, NULL);
+       } else {
+               rq = get_request(q, rw, NULL, gfp_mask);
+               if (!rq)
+                       spin_unlock_irq(q->queue_lock);
+       }
+       /* q->queue_lock is unlocked at this point */
+
+       return rq;
+}
+EXPORT_SYMBOL(blk_get_request);
+
+/**
+ * blk_requeue_request - put a request back on queue
+ * @q:         request queue where request should be inserted
+ * @rq:                request to be inserted
+ *
+ * Description:
+ *    Drivers often keep queueing requests until the hardware cannot accept
+ *    more, when that condition happens we need to put the request back
+ *    on the queue. Must be called with queue lock held.
+ */
+void blk_requeue_request(request_queue_t *q, struct request *rq)
+{
+       if (blk_rq_tagged(rq))
+               blk_queue_end_tag(q, rq);
+
+       elv_requeue_request(q, rq);
+}
+
+EXPORT_SYMBOL(blk_requeue_request);
+
+/**
+ * blk_insert_request - insert a special request in to a request queue
+ * @q:         request queue where request should be inserted
+ * @rq:                request to be inserted
+ * @at_head:   insert request at head or tail of queue
+ * @data:      private data
+ *
+ * Description:
+ *    Many block devices need to execute commands asynchronously, so they don't
+ *    block the whole kernel from preemption during request execution.  This is
+ *    accomplished normally by inserting aritficial requests tagged as
+ *    REQ_SPECIAL in to the corresponding request queue, and letting them be
+ *    scheduled for actual execution by the request queue.
+ *
+ *    We have the option of inserting the head or the tail of the queue.
+ *    Typically we use the tail for new ioctls and so forth.  We use the head
+ *    of the queue for things like a QUEUE_FULL message from a device, or a
+ *    host that is unable to accept a particular command.
+ */
+void blk_insert_request(request_queue_t *q, struct request *rq,
+                       int at_head, void *data)
+{
+       int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+       unsigned long flags;
+
+       /*
+        * tell I/O scheduler that this isn't a regular read/write (ie it
+        * must not attempt merges on this) and that it acts as a soft
+        * barrier
+        */
+       rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
+
+       rq->special = data;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       /*
+        * If command is tagged, release the tag
+        */
+       if (blk_rq_tagged(rq))
+               blk_queue_end_tag(q, rq);
+
+       drive_stat_acct(rq, rq->nr_sectors, 1);
+       __elv_add_request(q, rq, where, 0);
+
+       if (blk_queue_plugged(q))
+               __generic_unplug_device(q);
+       else
+               q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+EXPORT_SYMBOL(blk_insert_request);
+
+/**
+ * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
+ * @q:         request queue where request should be inserted
+ * @rq:                request structure to fill
+ * @ubuf:      the user buffer
+ * @len:       length of user data
+ *
+ * Description:
+ *    Data will be mapped directly for zero copy io, if possible. Otherwise
+ *    a kernel bounce buffer is used.
+ *
+ *    A matching blk_rq_unmap_user() must be issued at the end of io, while
+ *    still in process context.
+ *
+ *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ *    before being submitted to the device, as pages mapped may be out of
+ *    reach. It's the callers responsibility to make sure this happens. The
+ *    original bio must be passed back in to blk_rq_unmap_user() for proper
+ *    unmapping.
+ */
+int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
+                   unsigned int len)
+{
+       unsigned long uaddr;
+       struct bio *bio;
+       int reading;
+
+       if (len > (q->max_sectors << 9))
+               return -EINVAL;
+       if (!len || !ubuf)
+               return -EINVAL;
+
+       reading = rq_data_dir(rq) == READ;
+
+       /*
+        * if alignment requirement is satisfied, map in user pages for
+        * direct dma. else, set up kernel bounce buffers
+        */
+       uaddr = (unsigned long) ubuf;
+       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+               bio = bio_map_user(q, NULL, uaddr, len, reading);
+       else
+               bio = bio_copy_user(q, uaddr, len, reading);
+
+       if (!IS_ERR(bio)) {
+               rq->bio = rq->biotail = bio;
+               blk_rq_bio_prep(q, rq, bio);
+
+               rq->buffer = rq->data = NULL;
+               rq->data_len = len;
+               return 0;
+       }
+
+       /*
+        * bio is the err-ptr
+        */
+       return PTR_ERR(bio);
+}
+
+EXPORT_SYMBOL(blk_rq_map_user);
+
+/**
+ * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
+ * @q:         request queue where request should be inserted
+ * @rq:                request to map data to
+ * @iov:       pointer to the iovec
+ * @iov_count: number of elements in the iovec
+ *
+ * Description:
+ *    Data will be mapped directly for zero copy io, if possible. Otherwise
+ *    a kernel bounce buffer is used.
+ *
+ *    A matching blk_rq_unmap_user() must be issued at the end of io, while
+ *    still in process context.
+ *
+ *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ *    before being submitted to the device, as pages mapped may be out of
+ *    reach. It's the callers responsibility to make sure this happens. The
+ *    original bio must be passed back in to blk_rq_unmap_user() for proper
+ *    unmapping.
+ */
+int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+                       struct sg_iovec *iov, int iov_count)
+{
+       struct bio *bio;
+
+       if (!iov || iov_count <= 0)
+               return -EINVAL;
+
+       /* we don't allow misaligned data like bio_map_user() does.  If the
+        * user is using sg, they're expected to know the alignment constraints
+        * and respect them accordingly */
+       bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+
+       rq->bio = rq->biotail = bio;
+       blk_rq_bio_prep(q, rq, bio);
+       rq->buffer = rq->data = NULL;
+       rq->data_len = bio->bi_size;
+       return 0;
+}
+
+EXPORT_SYMBOL(blk_rq_map_user_iov);
+
+/**
+ * blk_rq_unmap_user - unmap a request with user data
+ * @bio:       bio to be unmapped
+ * @ulen:      length of user buffer
+ *
+ * Description:
+ *    Unmap a bio previously mapped by blk_rq_map_user().
+ */
+int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+{
+       int ret = 0;
+
+       if (bio) {
+               if (bio_flagged(bio, BIO_USER_MAPPED))
+                       bio_unmap_user(bio);
+               else
+                       ret = bio_uncopy_user(bio);
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(blk_rq_unmap_user);
+
+/**
+ * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
+ * @q:         request queue where request should be inserted
+ * @rq:                request to fill
+ * @kbuf:      the kernel buffer
+ * @len:       length of user data
+ * @gfp_mask:  memory allocation flags
+ */
+int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
+                   unsigned int len, gfp_t gfp_mask)
+{
+       struct bio *bio;
+
+       if (len > (q->max_sectors << 9))
+               return -EINVAL;
+       if (!len || !kbuf)
+               return -EINVAL;
+
+       bio = bio_map_kern(q, kbuf, len, gfp_mask);
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+
+       if (rq_data_dir(rq) == WRITE)
+               bio->bi_rw |= (1 << BIO_RW);
+
+       rq->bio = rq->biotail = bio;
+       blk_rq_bio_prep(q, rq, bio);
+
+       rq->buffer = rq->data = NULL;
+       rq->data_len = len;
+       return 0;
+}
+
+EXPORT_SYMBOL(blk_rq_map_kern);
+
+/**
+ * blk_execute_rq_nowait - insert a request into queue for execution
+ * @q:         queue to insert the request in
+ * @bd_disk:   matching gendisk
+ * @rq:                request to insert
+ * @at_head:    insert request at head or tail of queue
+ * @done:      I/O completion handler
+ *
+ * Description:
+ *    Insert a fully prepared request at the back of the io scheduler queue
+ *    for execution.  Don't wait for completion.
+ */
+void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
+                          struct request *rq, int at_head,
+                          void (*done)(struct request *))
+{
+       int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+
+       rq->rq_disk = bd_disk;
+       rq->flags |= REQ_NOMERGE;
+       rq->end_io = done;
+       elv_add_request(q, rq, where, 1);
+       generic_unplug_device(q);
+}
+
+/**
+ * blk_execute_rq - insert a request into queue for execution
+ * @q:         queue to insert the request in
+ * @bd_disk:   matching gendisk
+ * @rq:                request to insert
+ * @at_head:    insert request at head or tail of queue
+ *
+ * Description:
+ *    Insert a fully prepared request at the back of the io scheduler queue
+ *    for execution and wait for completion.
+ */
+int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
+                  struct request *rq, int at_head)
+{
+       DECLARE_COMPLETION(wait);
+       char sense[SCSI_SENSE_BUFFERSIZE];
+       int err = 0;
+
+       /*
+        * we need an extra reference to the request, so we can look at
+        * it after io completion
+        */
+       rq->ref_count++;
+
+       if (!rq->sense) {
+               memset(sense, 0, sizeof(sense));
+               rq->sense = sense;
+               rq->sense_len = 0;
+       }
+
+       rq->waiting = &wait;
+       blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
+       wait_for_completion(&wait);
+       rq->waiting = NULL;
+
+       if (rq->errors)
+               err = -EIO;
+
+       return err;
+}
+
+EXPORT_SYMBOL(blk_execute_rq);
+
+/**
+ * blkdev_issue_flush - queue a flush
+ * @bdev:      blockdev to issue flush for
+ * @error_sector:      error sector
+ *
+ * Description:
+ *    Issue a flush for the block device in question. Caller can supply
+ *    room for storing the error offset in case of a flush error, if they
+ *    wish to.  Caller must run wait_for_completion() on its own.
+ */
+int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+{
+       request_queue_t *q;
+
+       if (bdev->bd_disk == NULL)
+               return -ENXIO;
+
+       q = bdev_get_queue(bdev);
+       if (!q)
+               return -ENXIO;
+       if (!q->issue_flush_fn)
+               return -EOPNOTSUPP;
+
+       return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+}
+
+EXPORT_SYMBOL(blkdev_issue_flush);
+
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
+{
+       int rw = rq_data_dir(rq);
+
+       if (!blk_fs_request(rq) || !rq->rq_disk)
+               return;
+
+       if (!new_io) {
+               __disk_stat_inc(rq->rq_disk, merges[rw]);
+       } else {
+               disk_round_stats(rq->rq_disk);
+               rq->rq_disk->in_flight++;
+       }
+}
+
+/*
+ * add-request adds a request to the linked list.
+ * queue lock is held and interrupts disabled, as we muck with the
+ * request queue list.
+ */
+static inline void add_request(request_queue_t * q, struct request * req)
+{
+       drive_stat_acct(req, req->nr_sectors, 1);
+
+       if (q->activity_fn)
+               q->activity_fn(q->activity_data, rq_data_dir(req));
+
+       /*
+        * elevator indicated where it wants this request to be
+        * inserted at elevator_merge time
+        */
+       __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
+}
+/*
+ * disk_round_stats()  - Round off the performance stats on a struct
+ * disk_stats.
+ *
+ * The average IO queue length and utilisation statistics are maintained
+ * by observing the current state of the queue length and the amount of
+ * time it has been in this state for.
+ *
+ * Normally, that accounting is done on IO completion, but that can result
+ * in more than a second's worth of IO being accounted for within any one
+ * second, leading to >100% utilisation.  To deal with that, we call this
+ * function to do a round-off before returning the results when reading
+ * /proc/diskstats.  This accounts immediately for all queue usage up to
+ * the current jiffies and restarts the counters again.
+ */
+void disk_round_stats(struct gendisk *disk)
+{
+       unsigned long now = jiffies;
+
+       if (now == disk->stamp)
+               return;
+
+       if (disk->in_flight) {
+               __disk_stat_add(disk, time_in_queue,
+                               disk->in_flight * (now - disk->stamp));
+               __disk_stat_add(disk, io_ticks, (now - disk->stamp));
+       }
+       disk->stamp = now;
+}
+
+/*
+ * queue lock must be held
+ */
+static void __blk_put_request(request_queue_t *q, struct request *req)
+{
+       struct request_list *rl = req->rl;
+
+       if (unlikely(!q))
+               return;
+       if (unlikely(--req->ref_count))
+               return;
+
+       elv_completed_request(q, req);
+
+       req->rq_status = RQ_INACTIVE;
+       req->rl = NULL;
+
+       /*
+        * Request may not have originated from ll_rw_blk. if not,
+        * it didn't come out of our reserved rq pools
+        */
+       if (rl) {
+               int rw = rq_data_dir(req);
+               int priv = req->flags & REQ_ELVPRIV;
+
+               BUG_ON(!list_empty(&req->queuelist));
+
+               blk_free_request(q, req);
+               freed_request(q, rw, priv);
+       }
+}
+
+void blk_put_request(struct request *req)
+{
+       unsigned long flags;
+       request_queue_t *q = req->q;
+
+       /*
+        * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
+        * following if (q) test.
+        */
+       if (q) {
+               spin_lock_irqsave(q->queue_lock, flags);
+               __blk_put_request(q, req);
+               spin_unlock_irqrestore(q->queue_lock, flags);
+       }
+}
+
+EXPORT_SYMBOL(blk_put_request);
+
+/**
+ * blk_end_sync_rq - executes a completion event on a request
+ * @rq: request to complete
+ */
+void blk_end_sync_rq(struct request *rq)
+{
+       struct completion *waiting = rq->waiting;
+
+       rq->waiting = NULL;
+       __blk_put_request(rq->q, rq);
+
+       /*
+        * complete last, if this is a stack request the process (and thus
+        * the rq pointer) could be invalid right after this complete()
+        */
+       complete(waiting);
+}
+EXPORT_SYMBOL(blk_end_sync_rq);
+
+/**
+ * blk_congestion_wait - wait for a queue to become uncongested
+ * @rw: READ or WRITE
+ * @timeout: timeout in jiffies
+ *
+ * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
+ * If no queues are congested then just wait for the next request to be
+ * returned.
+ */
+long blk_congestion_wait(int rw, long timeout)
+{
+       long ret;
+       DEFINE_WAIT(wait);
+       wait_queue_head_t *wqh = &congestion_wqh[rw];
+
+       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+       ret = io_schedule_timeout(timeout);
+       finish_wait(wqh, &wait);
+       return ret;
+}
+
+EXPORT_SYMBOL(blk_congestion_wait);
+
+/*
+ * Has to be called with the request spinlock acquired
+ */
+static int attempt_merge(request_queue_t *q, struct request *req,
+                         struct request *next)
+{
+       if (!rq_mergeable(req) || !rq_mergeable(next))
+               return 0;
+
+       /*
+        * not contigious
+        */
+       if (req->sector + req->nr_sectors != next->sector)
+               return 0;
+
+       if (rq_data_dir(req) != rq_data_dir(next)
+           || req->rq_disk != next->rq_disk
+           || next->waiting || next->special)
+               return 0;
+
+       /*
+        * If we are allowed to merge, then append bio list
+        * from next to rq and release next. merge_requests_fn
+        * will have updated segment counts, update sector
+        * counts here.
+        */
+       if (!q->merge_requests_fn(q, req, next))
+               return 0;
+
+       /*
+        * At this point we have either done a back merge
+        * or front merge. We need the smaller start_time of
+        * the merged requests to be the current request
+        * for accounting purposes.
+        */
+       if (time_after(req->start_time, next->start_time))
+               req->start_time = next->start_time;
+
+       req->biotail->bi_next = next->bio;
+       req->biotail = next->biotail;
+
+       req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+
+       elv_merge_requests(q, req, next);
+
+       if (req->rq_disk) {
+               disk_round_stats(req->rq_disk);
+               req->rq_disk->in_flight--;
+       }
+
+       req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+
+       __blk_put_request(q, next);
+       return 1;
+}
+
+static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
+{
+       struct request *next = elv_latter_request(q, rq);
+
+       if (next)
+               return attempt_merge(q, rq, next);
+
+       return 0;
+}
+
+static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
+{
+       struct request *prev = elv_former_request(q, rq);
+
+       if (prev)
+               return attempt_merge(q, prev, rq);
+
+       return 0;
+}
+
+/**
+ * blk_attempt_remerge  - attempt to remerge active head with next request
+ * @q:    The &request_queue_t belonging to the device
+ * @rq:   The head request (usually)
+ *
+ * Description:
+ *    For head-active devices, the queue can easily be unplugged so quickly
+ *    that proper merging is not done on the front request. This may hurt
+ *    performance greatly for some devices. The block layer cannot safely
+ *    do merging on that first request for these queues, but the driver can
+ *    call this function and make it happen any way. Only the driver knows
+ *    when it is safe to do so.
+ **/
+void blk_attempt_remerge(request_queue_t *q, struct request *rq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       attempt_back_merge(q, rq);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+EXPORT_SYMBOL(blk_attempt_remerge);
+
+static int __make_request(request_queue_t *q, struct bio *bio)
+{
+       struct request *req;
+       int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
+       unsigned short prio;
+       sector_t sector;
+
+       sector = bio->bi_sector;
+       nr_sectors = bio_sectors(bio);
+       cur_nr_sectors = bio_cur_sectors(bio);
+       prio = bio_prio(bio);
+
+       rw = bio_data_dir(bio);
+       sync = bio_sync(bio);
+
+       /*
+        * low level driver can indicate that it wants pages above a
+        * certain limit bounced to low memory (ie for highmem, or even
+        * ISA dma in theory)
+        */
+       blk_queue_bounce(q, &bio);
+
+       spin_lock_prefetch(q->queue_lock);
+
+       barrier = bio_barrier(bio);
+       if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) {
+               err = -EOPNOTSUPP;
+               goto end_io;
+       }
+
+       spin_lock_irq(q->queue_lock);
+
+       if (unlikely(barrier) || elv_queue_empty(q))
+               goto get_rq;
+
+       el_ret = elv_merge(q, &req, bio);
+       switch (el_ret) {
+               case ELEVATOR_BACK_MERGE:
+                       BUG_ON(!rq_mergeable(req));
+
+                       if (!q->back_merge_fn(q, req, bio))
+                               break;
+
+                       req->biotail->bi_next = bio;
+                       req->biotail = bio;
+                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+                       req->ioprio = ioprio_best(req->ioprio, prio);
+                       drive_stat_acct(req, nr_sectors, 0);
+                       if (!attempt_back_merge(q, req))
+                               elv_merged_request(q, req);
+                       goto out;
+
+               case ELEVATOR_FRONT_MERGE:
+                       BUG_ON(!rq_mergeable(req));
+
+                       if (!q->front_merge_fn(q, req, bio))
+                               break;
+
+                       bio->bi_next = req->bio;
+                       req->bio = bio;
+
+                       /*
+                        * may not be valid. if the low level driver said
+                        * it didn't need a bounce buffer then it better
+                        * not touch req->buffer either...
+                        */
+                       req->buffer = bio_data(bio);
+                       req->current_nr_sectors = cur_nr_sectors;
+                       req->hard_cur_sectors = cur_nr_sectors;
+                       req->sector = req->hard_sector = sector;
+                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+                       req->ioprio = ioprio_best(req->ioprio, prio);
+                       drive_stat_acct(req, nr_sectors, 0);
+                       if (!attempt_front_merge(q, req))
+                               elv_merged_request(q, req);
+                       goto out;
+
+               /* ELV_NO_MERGE: elevator says don't/can't merge. */
+               default:
+                       ;
+       }
+
+get_rq:
+       /*
+        * Grab a free request. This is might sleep but can not fail.
+        * Returns with the queue unlocked.
+        */
+       req = get_request_wait(q, rw, bio);
+
+       /*
+        * After dropping the lock and possibly sleeping here, our request
+        * may now be mergeable after it had proven unmergeable (above).
+        * We don't worry about that case for efficiency. It won't happen
+        * often, and the elevators are able to handle it.
+        */
+
+       req->flags |= REQ_CMD;
+
+       /*
+        * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+        */
+       if (bio_rw_ahead(bio) || bio_failfast(bio))
+               req->flags |= REQ_FAILFAST;
+
+       /*
+        * REQ_BARRIER implies no merging, but lets make it explicit
+        */
+       if (unlikely(barrier))
+               req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+
+       req->errors = 0;
+       req->hard_sector = req->sector = sector;
+       req->hard_nr_sectors = req->nr_sectors = nr_sectors;
+       req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
+       req->nr_phys_segments = bio_phys_segments(q, bio);
+       req->nr_hw_segments = bio_hw_segments(q, bio);
+       req->buffer = bio_data(bio);    /* see ->buffer comment above */
+       req->waiting = NULL;
+       req->bio = req->biotail = bio;
+       req->ioprio = prio;
+       req->rq_disk = bio->bi_bdev->bd_disk;
+       req->start_time = jiffies;
+
+       spin_lock_irq(q->queue_lock);
+       if (elv_queue_empty(q))
+               blk_plug_device(q);
+       add_request(q, req);
+out:
+       if (sync)
+               __generic_unplug_device(q);
+
+       spin_unlock_irq(q->queue_lock);
+       return 0;
+
+end_io:
+       bio_endio(bio, nr_sectors << 9, err);
+       return 0;
+}
+
+/*
+ * If bio->bi_dev is a partition, remap the location
+ */
+static inline void blk_partition_remap(struct bio *bio)
+{
+       struct block_device *bdev = bio->bi_bdev;
+
+       if (bdev != bdev->bd_contains) {
+               struct hd_struct *p = bdev->bd_part;
+               const int rw = bio_data_dir(bio);
+
+               p->sectors[rw] += bio_sectors(bio);
+               p->ios[rw]++;
+
+               bio->bi_sector += p->start_sect;
+               bio->bi_bdev = bdev->bd_contains;
+       }
+}
+
+static void handle_bad_sector(struct bio *bio)
+{
+       char b[BDEVNAME_SIZE];
+
+       printk(KERN_INFO "attempt to access beyond end of device\n");
+       printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+                       bdevname(bio->bi_bdev, b),
+                       bio->bi_rw,
+                       (unsigned long long)bio->bi_sector + bio_sectors(bio),
+                       (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+
+       set_bit(BIO_EOF, &bio->bi_flags);
+}
+
+/**
+ * generic_make_request: hand a buffer to its device driver for I/O
+ * @bio:  The bio describing the location in memory and on the device.
+ *
+ * generic_make_request() is used to make I/O requests of block
+ * devices. It is passed a &struct bio, which describes the I/O that needs
+ * to be done.
+ *
+ * generic_make_request() does not return any status.  The
+ * success/failure status of the request, along with notification of
+ * completion, is delivered asynchronously through the bio->bi_end_io
+ * function described (one day) else where.
+ *
+ * The caller of generic_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffer, and that bi_dev and bi_sector are
+ * set to describe the device address, and the
+ * bi_end_io and optionally bi_private are set to describe how
+ * completion notification should be signaled.
+ *
+ * generic_make_request and the drivers it calls may use bi_next if this
+ * bio happens to be merged with someone else, and may change bi_dev and
+ * bi_sector for remaps as it sees fit.  So the values of these fields
+ * should NOT be depended on after the call to generic_make_request.
+ */
+void generic_make_request(struct bio *bio)
+{
+       request_queue_t *q;
+       sector_t maxsector;
+       int ret, nr_sectors = bio_sectors(bio);
+
+       might_sleep();
+       /* Test device or partition size, when known. */
+       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+       if (maxsector) {
+               sector_t sector = bio->bi_sector;
+
+               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+                       /*
+                        * This may well happen - the kernel calls bread()
+                        * without checking the size of the device, e.g., when
+                        * mounting a device.
+                        */
+                       handle_bad_sector(bio);
+                       goto end_io;
+               }
+       }
+
+       /*
+        * Resolve the mapping until finished. (drivers are
+        * still free to implement/resolve their own stacking
+        * by explicitly returning 0)
+        *
+        * NOTE: we don't repeat the blk_size check for each new device.
+        * Stacking drivers are expected to know what they are doing.
+        */
+       do {
+               char b[BDEVNAME_SIZE];
+
+               q = bdev_get_queue(bio->bi_bdev);
+               if (!q) {
+                       printk(KERN_ERR
+                              "generic_make_request: Trying to access "
+                               "nonexistent block-device %s (%Lu)\n",
+                               bdevname(bio->bi_bdev, b),
+                               (long long) bio->bi_sector);
+end_io:
+                       bio_endio(bio, bio->bi_size, -EIO);
+                       break;
+               }
+
+               if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+                       printk("bio too big device %s (%u > %u)\n", 
+                               bdevname(bio->bi_bdev, b),
+                               bio_sectors(bio),
+                               q->max_hw_sectors);
+                       goto end_io;
+               }
+
+               if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+                       goto end_io;
+
+               /*
+                * If this device has partitions, remap block n
+                * of partition p to block n+start(p) of the disk.
+                */
+               blk_partition_remap(bio);
+
+               ret = q->make_request_fn(q, bio);
+       } while (ret);
+}
+
+EXPORT_SYMBOL(generic_make_request);
+
+/**
+ * submit_bio: submit a bio to the block device layer for I/O
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+ * @bio: The &struct bio which describes the I/O
+ *
+ * submit_bio() is very similar in purpose to generic_make_request(), and
+ * uses that function to do most of the work. Both are fairly rough
+ * interfaces, @bio must be presetup and ready for I/O.
+ *
+ */
+void submit_bio(int rw, struct bio *bio)
+{
+       int count = bio_sectors(bio);
+
+       BIO_BUG_ON(!bio->bi_size);
+       BIO_BUG_ON(!bio->bi_io_vec);
+       bio->bi_rw |= rw;
+       if (rw & WRITE)
+               mod_page_state(pgpgout, count);
+       else
+               mod_page_state(pgpgin, count);
+
+       if (unlikely(block_dump)) {
+               char b[BDEVNAME_SIZE];
+               printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+                       current->comm, current->pid,
+                       (rw & WRITE) ? "WRITE" : "READ",
+                       (unsigned long long)bio->bi_sector,
+                       bdevname(bio->bi_bdev,b));
+       }
+
+       generic_make_request(bio);
+}
+
+EXPORT_SYMBOL(submit_bio);
+
+static void blk_recalc_rq_segments(struct request *rq)
+{
+       struct bio *bio, *prevbio = NULL;
+       int nr_phys_segs, nr_hw_segs;
+       unsigned int phys_size, hw_size;
+       request_queue_t *q = rq->q;
+
+       if (!rq->bio)
+               return;
+
+       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+       rq_for_each_bio(bio, rq) {
+               /* Force bio hw/phys segs to be recalculated. */
+               bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+               nr_phys_segs += bio_phys_segments(q, bio);
+               nr_hw_segs += bio_hw_segments(q, bio);
+               if (prevbio) {
+                       int pseg = phys_size + prevbio->bi_size + bio->bi_size;
+                       int hseg = hw_size + prevbio->bi_size + bio->bi_size;
+
+                       if (blk_phys_contig_segment(q, prevbio, bio) &&
+                           pseg <= q->max_segment_size) {
+                               nr_phys_segs--;
+                               phys_size += prevbio->bi_size + bio->bi_size;
+                       } else
+                               phys_size = 0;
+
+                       if (blk_hw_contig_segment(q, prevbio, bio) &&
+                           hseg <= q->max_segment_size) {
+                               nr_hw_segs--;
+                               hw_size += prevbio->bi_size + bio->bi_size;
+                       } else
+                               hw_size = 0;
+               }
+               prevbio = bio;
+       }
+
+       rq->nr_phys_segments = nr_phys_segs;
+       rq->nr_hw_segments = nr_hw_segs;
+}
+
+static void blk_recalc_rq_sectors(struct request *rq, int nsect)
+{
+       if (blk_fs_request(rq)) {
+               rq->hard_sector += nsect;
+               rq->hard_nr_sectors -= nsect;
+
+               /*
+                * Move the I/O submission pointers ahead if required.
+                */
+               if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
+                   (rq->sector <= rq->hard_sector)) {
+                       rq->sector = rq->hard_sector;
+                       rq->nr_sectors = rq->hard_nr_sectors;
+                       rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
+                       rq->current_nr_sectors = rq->hard_cur_sectors;
+                       rq->buffer = bio_data(rq->bio);
+               }
+
+               /*
+                * if total number of sectors is less than the first segment
+                * size, something has gone terribly wrong
+                */
+               if (rq->nr_sectors < rq->current_nr_sectors) {
+                       printk("blk: request botched\n");
+                       rq->nr_sectors = rq->current_nr_sectors;
+               }
+       }
+}
+
+static int __end_that_request_first(struct request *req, int uptodate,
+                                   int nr_bytes)
+{
+       int total_bytes, bio_nbytes, error, next_idx = 0;
+       struct bio *bio;
+
+       /*
+        * extend uptodate bool to allow < 0 value to be direct io error
+        */
+       error = 0;
+       if (end_io_error(uptodate))
+               error = !uptodate ? -EIO : uptodate;
+
+       /*
+        * for a REQ_BLOCK_PC request, we want to carry any eventual
+        * sense key with us all the way through
+        */
+       if (!blk_pc_request(req))
+               req->errors = 0;
+
+       if (!uptodate) {
+               if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
+                       printk("end_request: I/O error, dev %s, sector %llu\n",
+                               req->rq_disk ? req->rq_disk->disk_name : "?",
+                               (unsigned long long)req->sector);
+       }
+
+       if (blk_fs_request(req) && req->rq_disk) {
+               const int rw = rq_data_dir(req);
+
+               __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+       }
+
+       total_bytes = bio_nbytes = 0;
+       while ((bio = req->bio) != NULL) {
+               int nbytes;
+
+               if (nr_bytes >= bio->bi_size) {
+                       req->bio = bio->bi_next;
+                       nbytes = bio->bi_size;
+                       bio_endio(bio, nbytes, error);
+                       next_idx = 0;
+                       bio_nbytes = 0;
+               } else {
+                       int idx = bio->bi_idx + next_idx;
+
+                       if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+                               blk_dump_rq_flags(req, "__end_that");
+                               printk("%s: bio idx %d >= vcnt %d\n",
+                                               __FUNCTION__,
+                                               bio->bi_idx, bio->bi_vcnt);
+                               break;
+                       }
+
+                       nbytes = bio_iovec_idx(bio, idx)->bv_len;
+                       BIO_BUG_ON(nbytes > bio->bi_size);
+
+                       /*
+                        * not a complete bvec done
+                        */
+                       if (unlikely(nbytes > nr_bytes)) {
+                               bio_nbytes += nr_bytes;
+                               total_bytes += nr_bytes;
+                               break;
+                       }
+
+                       /*
+                        * advance to the next vector
+                        */
+                       next_idx++;
+                       bio_nbytes += nbytes;
+               }
+
+               total_bytes += nbytes;
+               nr_bytes -= nbytes;
+
+               if ((bio = req->bio)) {
+                       /*
+                        * end more in this run, or just return 'not-done'
+                        */
+                       if (unlikely(nr_bytes <= 0))
+                               break;
+               }
+       }
+
+       /*
+        * completely done
+        */
+       if (!req->bio)
+               return 0;
+
+       /*
+        * if the request wasn't completed, update state
+        */
+       if (bio_nbytes) {
+               bio_endio(bio, bio_nbytes, error);
+               bio->bi_idx += next_idx;
+               bio_iovec(bio)->bv_offset += nr_bytes;
+               bio_iovec(bio)->bv_len -= nr_bytes;
+       }
+
+       blk_recalc_rq_sectors(req, total_bytes >> 9);
+       blk_recalc_rq_segments(req);
+       return 1;
+}
+
+/**
+ * end_that_request_first - end I/O on a request
+ * @req:      the request being processed
+ * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
+ * @nr_sectors: number of sectors to end I/O on
+ *
+ * Description:
+ *     Ends I/O on a number of sectors attached to @req, and sets it up
+ *     for the next range of segments (if any) in the cluster.
+ *
+ * Return:
+ *     0 - we are done with this request, call end_that_request_last()
+ *     1 - still buffers pending for this request
+ **/
+int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
+{
+       return __end_that_request_first(req, uptodate, nr_sectors << 9);
+}
+
+EXPORT_SYMBOL(end_that_request_first);
+
+/**
+ * end_that_request_chunk - end I/O on a request
+ * @req:      the request being processed
+ * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @req, and sets it up
+ *     for the next range of segments (if any). Like end_that_request_first(),
+ *     but deals with bytes instead of sectors.
+ *
+ * Return:
+ *     0 - we are done with this request, call end_that_request_last()
+ *     1 - still buffers pending for this request
+ **/
+int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
+{
+       return __end_that_request_first(req, uptodate, nr_bytes);
+}
+
+EXPORT_SYMBOL(end_that_request_chunk);
+
+/*
+ * queue lock must be held
+ */
+void end_that_request_last(struct request *req)
+{
+       struct gendisk *disk = req->rq_disk;
+
+       if (unlikely(laptop_mode) && blk_fs_request(req))
+               laptop_io_completion();
+
+       if (disk && blk_fs_request(req)) {
+               unsigned long duration = jiffies - req->start_time;
+               const int rw = rq_data_dir(req);
+
+               __disk_stat_inc(disk, ios[rw]);
+               __disk_stat_add(disk, ticks[rw], duration);
+               disk_round_stats(disk);
+               disk->in_flight--;
+       }
+       if (req->end_io)
+               req->end_io(req);
+       else
+               __blk_put_request(req->q, req);
+}
+
+EXPORT_SYMBOL(end_that_request_last);
+
+void end_request(struct request *req, int uptodate)
+{
+       if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
+               add_disk_randomness(req->rq_disk);
+               blkdev_dequeue_request(req);
+               end_that_request_last(req);
+       }
+}
+
+EXPORT_SYMBOL(end_request);
+
+void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
+{
+       /* first three bits are identical in rq->flags and bio->bi_rw */
+       rq->flags |= (bio->bi_rw & 7);
+
+       rq->nr_phys_segments = bio_phys_segments(q, bio);
+       rq->nr_hw_segments = bio_hw_segments(q, bio);
+       rq->current_nr_sectors = bio_cur_sectors(bio);
+       rq->hard_cur_sectors = rq->current_nr_sectors;
+       rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
+       rq->buffer = bio_data(bio);
+
+       rq->bio = rq->biotail = bio;
+}
+
+EXPORT_SYMBOL(blk_rq_bio_prep);
+
+int kblockd_schedule_work(struct work_struct *work)
+{
+       return queue_work(kblockd_workqueue, work);
+}
+
+EXPORT_SYMBOL(kblockd_schedule_work);
+
+void kblockd_flush(void)
+{
+       flush_workqueue(kblockd_workqueue);
+}
+EXPORT_SYMBOL(kblockd_flush);
+
+int __init blk_dev_init(void)
+{
+       kblockd_workqueue = create_workqueue("kblockd");
+       if (!kblockd_workqueue)
+               panic("Failed to create kblockd\n");
+
+       request_cachep = kmem_cache_create("blkdev_requests",
+                       sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+
+       requestq_cachep = kmem_cache_create("blkdev_queue",
+                       sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+
+       iocontext_cachep = kmem_cache_create("blkdev_ioc",
+                       sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
+
+       blk_max_low_pfn = max_low_pfn;
+       blk_max_pfn = max_pfn;
+
+       return 0;
+}
+
+/*
+ * IO Context helper functions
+ */
+void put_io_context(struct io_context *ioc)
+{
+       if (ioc == NULL)
+               return;
+
+       BUG_ON(atomic_read(&ioc->refcount) == 0);
+
+       if (atomic_dec_and_test(&ioc->refcount)) {
+               if (ioc->aic && ioc->aic->dtor)
+                       ioc->aic->dtor(ioc->aic);
+               if (ioc->cic && ioc->cic->dtor)
+                       ioc->cic->dtor(ioc->cic);
+
+               kmem_cache_free(iocontext_cachep, ioc);
+       }
+}
+EXPORT_SYMBOL(put_io_context);
+
+/* Called by the exitting task */
+void exit_io_context(void)
+{
+       unsigned long flags;
+       struct io_context *ioc;
+
+       local_irq_save(flags);
+       task_lock(current);
+       ioc = current->io_context;
+       current->io_context = NULL;
+       ioc->task = NULL;
+       task_unlock(current);
+       local_irq_restore(flags);
+
+       if (ioc->aic && ioc->aic->exit)
+               ioc->aic->exit(ioc->aic);
+       if (ioc->cic && ioc->cic->exit)
+               ioc->cic->exit(ioc->cic);
+
+       put_io_context(ioc);
+}
+
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * Otherwise, return its existing IO context.
+ *
+ * This returned IO context doesn't have a specifically elevated refcount,
+ * but since the current task itself holds a reference, the context can be
+ * used in general code, so long as it stays within `current` context.
+ */
+struct io_context *current_io_context(gfp_t gfp_flags)
+{
+       struct task_struct *tsk = current;
+       struct io_context *ret;
+
+       ret = tsk->io_context;
+       if (likely(ret))
+               return ret;
+
+       ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
+       if (ret) {
+               atomic_set(&ret->refcount, 1);
+               ret->task = current;
+               ret->set_ioprio = NULL;
+               ret->last_waited = jiffies; /* doesn't matter... */
+               ret->nr_batch_requests = 0; /* because this is 0 */
+               ret->aic = NULL;
+               ret->cic = NULL;
+               tsk->io_context = ret;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(current_io_context);
+
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * If it does have a context, take a ref on it.
+ *
+ * This is always called in the context of the task which submitted the I/O.
+ */
+struct io_context *get_io_context(gfp_t gfp_flags)
+{
+       struct io_context *ret;
+       ret = current_io_context(gfp_flags);
+       if (likely(ret))
+               atomic_inc(&ret->refcount);
+       return ret;
+}
+EXPORT_SYMBOL(get_io_context);
+
+void copy_io_context(struct io_context **pdst, struct io_context **psrc)
+{
+       struct io_context *src = *psrc;
+       struct io_context *dst = *pdst;
+
+       if (src) {
+               BUG_ON(atomic_read(&src->refcount) == 0);
+               atomic_inc(&src->refcount);
+               put_io_context(dst);
+               *pdst = src;
+       }
+}
+EXPORT_SYMBOL(copy_io_context);
+
+void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
+{
+       struct io_context *temp;
+       temp = *ioc1;
+       *ioc1 = *ioc2;
+       *ioc2 = temp;
+}
+EXPORT_SYMBOL(swap_io_context);
+
+/*
+ * sysfs parts below
+ */
+struct queue_sysfs_entry {
+       struct attribute attr;
+       ssize_t (*show)(struct request_queue *, char *);
+       ssize_t (*store)(struct request_queue *, const char *, size_t);
+};
+
+static ssize_t
+queue_var_show(unsigned int var, char *page)
+{
+       return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+queue_var_store(unsigned long *var, const char *page, size_t count)
+{
+       char *p = (char *) page;
+
+       *var = simple_strtoul(p, &p, 10);
+       return count;
+}
+
+static ssize_t queue_requests_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(q->nr_requests, (page));
+}
+
+static ssize_t
+queue_requests_store(struct request_queue *q, const char *page, size_t count)
+{
+       struct request_list *rl = &q->rq;
+
+       int ret = queue_var_store(&q->nr_requests, page, count);
+       if (q->nr_requests < BLKDEV_MIN_RQ)
+               q->nr_requests = BLKDEV_MIN_RQ;
+       blk_queue_congestion_threshold(q);
+
+       if (rl->count[READ] >= queue_congestion_on_threshold(q))
+               set_queue_congested(q, READ);
+       else if (rl->count[READ] < queue_congestion_off_threshold(q))
+               clear_queue_congested(q, READ);
+
+       if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
+               set_queue_congested(q, WRITE);
+       else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
+               clear_queue_congested(q, WRITE);
+
+       if (rl->count[READ] >= q->nr_requests) {
+               blk_set_queue_full(q, READ);
+       } else if (rl->count[READ]+1 <= q->nr_requests) {
+               blk_clear_queue_full(q, READ);
+               wake_up(&rl->wait[READ]);
+       }
+
+       if (rl->count[WRITE] >= q->nr_requests) {
+               blk_set_queue_full(q, WRITE);
+       } else if (rl->count[WRITE]+1 <= q->nr_requests) {
+               blk_clear_queue_full(q, WRITE);
+               wake_up(&rl->wait[WRITE]);
+       }
+       return ret;
+}
+
+static ssize_t queue_ra_show(struct request_queue *q, char *page)
+{
+       int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+
+       return queue_var_show(ra_kb, (page));
+}
+
+static ssize_t
+queue_ra_store(struct request_queue *q, const char *page, size_t count)
+{
+       unsigned long ra_kb;
+       ssize_t ret = queue_var_store(&ra_kb, page, count);
+
+       spin_lock_irq(q->queue_lock);
+       if (ra_kb > (q->max_sectors >> 1))
+               ra_kb = (q->max_sectors >> 1);
+
+       q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+       spin_unlock_irq(q->queue_lock);
+
+       return ret;
+}
+
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+       int max_sectors_kb = q->max_sectors >> 1;
+
+       return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+       unsigned long max_sectors_kb,
+                       max_hw_sectors_kb = q->max_hw_sectors >> 1,
+                       page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+       ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+       int ra_kb;
+
+       if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+               return -EINVAL;
+       /*
+        * Take the queue lock to update the readahead and max_sectors
+        * values synchronously:
+        */
+       spin_lock_irq(q->queue_lock);
+       /*
+        * Trim readahead window as well, if necessary:
+        */
+       ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+       if (ra_kb > max_sectors_kb)
+               q->backing_dev_info.ra_pages =
+                               max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
+
+       q->max_sectors = max_sectors_kb << 1;
+       spin_unlock_irq(q->queue_lock);
+
+       return ret;
+}
+
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+       int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+
+       return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+
+static struct queue_sysfs_entry queue_requests_entry = {
+       .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_requests_show,
+       .store = queue_requests_store,
+};
+
+static struct queue_sysfs_entry queue_ra_entry = {
+       .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_ra_show,
+       .store = queue_ra_store,
+};
+
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+       .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_max_sectors_show,
+       .store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+       .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+       .show = queue_max_hw_sectors_show,
+};
+
+static struct queue_sysfs_entry queue_iosched_entry = {
+       .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
+       .show = elv_iosched_show,
+       .store = elv_iosched_store,
+};
+
+static struct attribute *default_attrs[] = {
+       &queue_requests_entry.attr,
+       &queue_ra_entry.attr,
+       &queue_max_hw_sectors_entry.attr,
+       &queue_max_sectors_entry.attr,
+       &queue_iosched_entry.attr,
+       NULL,
+};
+
+#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
+
+static ssize_t
+queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+       struct queue_sysfs_entry *entry = to_queue(attr);
+       struct request_queue *q;
+
+       q = container_of(kobj, struct request_queue, kobj);
+       if (!entry->show)
+               return -EIO;
+
+       return entry->show(q, page);
+}
+
+static ssize_t
+queue_attr_store(struct kobject *kobj, struct attribute *attr,
+                   const char *page, size_t length)
+{
+       struct queue_sysfs_entry *entry = to_queue(attr);
+       struct request_queue *q;
+
+       q = container_of(kobj, struct request_queue, kobj);
+       if (!entry->store)
+               return -EIO;
+
+       return entry->store(q, page, length);
+}
+
+static struct sysfs_ops queue_sysfs_ops = {
+       .show   = queue_attr_show,
+       .store  = queue_attr_store,
+};
+
+static struct kobj_type queue_ktype = {
+       .sysfs_ops      = &queue_sysfs_ops,
+       .default_attrs  = default_attrs,
+};
+
+int blk_register_queue(struct gendisk *disk)
+{
+       int ret;
+
+       request_queue_t *q = disk->queue;
+
+       if (!q || !q->request_fn)
+               return -ENXIO;
+
+       q->kobj.parent = kobject_get(&disk->kobj);
+       if (!q->kobj.parent)
+               return -EBUSY;
+
+       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+       q->kobj.ktype = &queue_ktype;
+
+       ret = kobject_register(&q->kobj);
+       if (ret < 0)
+               return ret;
+
+       ret = elv_register_queue(q);
+       if (ret) {
+               kobject_unregister(&q->kobj);
+               return ret;
+       }
+
+       return 0;
+}
+
+void blk_unregister_queue(struct gendisk *disk)
+{
+       request_queue_t *q = disk->queue;
+
+       if (q && q->request_fn) {
+               elv_unregister_queue(q);
+
+               kobject_unregister(&q->kobj);
+               kobject_put(&disk->kobj);
+       }
+}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
new file mode 100644 (file)
index 0000000..e54f006
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * elevator noop
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
+{
+       rq->flags |= REQ_NOMERGE;
+       elv_dispatch_add_tail(q, rq);
+}
+
+static int elevator_noop_dispatch(request_queue_t *q, int force)
+{
+       return 0;
+}
+
+static struct elevator_type elevator_noop = {
+       .ops = {
+               .elevator_dispatch_fn           = elevator_noop_dispatch,
+               .elevator_add_req_fn            = elevator_noop_add_request,
+       },
+       .elevator_name = "noop",
+       .elevator_owner = THIS_MODULE,
+};
+
+static int __init noop_init(void)
+{
+       return elv_register(&elevator_noop);
+}
+
+static void __exit noop_exit(void)
+{
+       elv_unregister(&elevator_noop);
+}
+
+module_init(noop_init);
+module_exit(noop_exit);
+
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
new file mode 100644 (file)
index 0000000..382dea7
--- /dev/null
@@ -0,0 +1,589 @@
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+#include <linux/slab.h>
+#include <linux/times.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+
+/* Command group 3 is reserved and should never be used.  */
+const unsigned char scsi_command_size[8] =
+{
+       6, 10, 10, 12,
+       16, 12, 10, 10
+};
+
+EXPORT_SYMBOL(scsi_command_size);
+
+#define BLK_DEFAULT_TIMEOUT    (60 * HZ)
+
+#include <scsi/sg.h>
+
+static int sg_get_version(int __user *p)
+{
+       static int sg_version_num = 30527;
+       return put_user(sg_version_num, p);
+}
+
+static int scsi_get_idlun(request_queue_t *q, int __user *p)
+{
+       return put_user(0, p);
+}
+
+static int scsi_get_bus(request_queue_t *q, int __user *p)
+{
+       return put_user(0, p);
+}
+
+static int sg_get_timeout(request_queue_t *q)
+{
+       return q->sg_timeout / (HZ / USER_HZ);
+}
+
+static int sg_set_timeout(request_queue_t *q, int __user *p)
+{
+       int timeout, err = get_user(timeout, p);
+
+       if (!err)
+               q->sg_timeout = timeout * (HZ / USER_HZ);
+
+       return err;
+}
+
+static int sg_get_reserved_size(request_queue_t *q, int __user *p)
+{
+       return put_user(q->sg_reserved_size, p);
+}
+
+static int sg_set_reserved_size(request_queue_t *q, int __user *p)
+{
+       int size, err = get_user(size, p);
+
+       if (err)
+               return err;
+
+       if (size < 0)
+               return -EINVAL;
+       if (size > (q->max_sectors << 9))
+               size = q->max_sectors << 9;
+
+       q->sg_reserved_size = size;
+       return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(request_queue_t *q, int __user *p)
+{
+       return put_user(1, p);
+}
+
+#define CMD_READ_SAFE  0x01
+#define CMD_WRITE_SAFE 0x02
+#define CMD_WARNED     0x04
+#define safe_for_read(cmd)     [cmd] = CMD_READ_SAFE
+#define safe_for_write(cmd)    [cmd] = CMD_WRITE_SAFE
+
+static int verify_command(struct file *file, unsigned char *cmd)
+{
+       static unsigned char cmd_type[256] = {
+
+               /* Basic read-only commands */
+               safe_for_read(TEST_UNIT_READY),
+               safe_for_read(REQUEST_SENSE),
+               safe_for_read(READ_6),
+               safe_for_read(READ_10),
+               safe_for_read(READ_12),
+               safe_for_read(READ_16),
+               safe_for_read(READ_BUFFER),
+               safe_for_read(READ_DEFECT_DATA),
+               safe_for_read(READ_LONG),
+               safe_for_read(INQUIRY),
+               safe_for_read(MODE_SENSE),
+               safe_for_read(MODE_SENSE_10),
+               safe_for_read(LOG_SENSE),
+               safe_for_read(START_STOP),
+               safe_for_read(GPCMD_VERIFY_10),
+               safe_for_read(VERIFY_16),
+
+               /* Audio CD commands */
+               safe_for_read(GPCMD_PLAY_CD),
+               safe_for_read(GPCMD_PLAY_AUDIO_10),
+               safe_for_read(GPCMD_PLAY_AUDIO_MSF),
+               safe_for_read(GPCMD_PLAY_AUDIO_TI),
+               safe_for_read(GPCMD_PAUSE_RESUME),
+
+               /* CD/DVD data reading */
+               safe_for_read(GPCMD_READ_BUFFER_CAPACITY),
+               safe_for_read(GPCMD_READ_CD),
+               safe_for_read(GPCMD_READ_CD_MSF),
+               safe_for_read(GPCMD_READ_DISC_INFO),
+               safe_for_read(GPCMD_READ_CDVD_CAPACITY),
+               safe_for_read(GPCMD_READ_DVD_STRUCTURE),
+               safe_for_read(GPCMD_READ_HEADER),
+               safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
+               safe_for_read(GPCMD_READ_SUBCHANNEL),
+               safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
+               safe_for_read(GPCMD_REPORT_KEY),
+               safe_for_read(GPCMD_SCAN),
+               safe_for_read(GPCMD_GET_CONFIGURATION),
+               safe_for_read(GPCMD_READ_FORMAT_CAPACITIES),
+               safe_for_read(GPCMD_GET_EVENT_STATUS_NOTIFICATION),
+               safe_for_read(GPCMD_GET_PERFORMANCE),
+               safe_for_read(GPCMD_SEEK),
+               safe_for_read(GPCMD_STOP_PLAY_SCAN),
+
+               /* Basic writing commands */
+               safe_for_write(WRITE_6),
+               safe_for_write(WRITE_10),
+               safe_for_write(WRITE_VERIFY),
+               safe_for_write(WRITE_12),
+               safe_for_write(WRITE_VERIFY_12),
+               safe_for_write(WRITE_16),
+               safe_for_write(WRITE_LONG),
+               safe_for_write(WRITE_LONG_2),
+               safe_for_write(ERASE),
+               safe_for_write(GPCMD_MODE_SELECT_10),
+               safe_for_write(MODE_SELECT),
+               safe_for_write(LOG_SELECT),
+               safe_for_write(GPCMD_BLANK),
+               safe_for_write(GPCMD_CLOSE_TRACK),
+               safe_for_write(GPCMD_FLUSH_CACHE),
+               safe_for_write(GPCMD_FORMAT_UNIT),
+               safe_for_write(GPCMD_REPAIR_RZONE_TRACK),
+               safe_for_write(GPCMD_RESERVE_RZONE_TRACK),
+               safe_for_write(GPCMD_SEND_DVD_STRUCTURE),
+               safe_for_write(GPCMD_SEND_EVENT),
+               safe_for_write(GPCMD_SEND_KEY),
+               safe_for_write(GPCMD_SEND_OPC),
+               safe_for_write(GPCMD_SEND_CUE_SHEET),
+               safe_for_write(GPCMD_SET_SPEED),
+               safe_for_write(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL),
+               safe_for_write(GPCMD_LOAD_UNLOAD),
+               safe_for_write(GPCMD_SET_STREAMING),
+       };
+       unsigned char type = cmd_type[cmd[0]];
+
+       /* Anybody who can open the device can do a read-safe command */
+       if (type & CMD_READ_SAFE)
+               return 0;
+
+       /* Write-safe commands just require a writable open.. */
+       if (type & CMD_WRITE_SAFE) {
+               if (file->f_mode & FMODE_WRITE)
+                       return 0;
+       }
+
+       /* And root can do any command.. */
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
+       if (!type) {
+               cmd_type[cmd[0]] = CMD_WARNED;
+               printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]);
+       }
+
+       /* Otherwise fail it with an "Operation not permitted" */
+       return -EPERM;
+}
+
+static int sg_io(struct file *file, request_queue_t *q,
+               struct gendisk *bd_disk, struct sg_io_hdr *hdr)
+{
+       unsigned long start_time;
+       int writing = 0, ret = 0;
+       struct request *rq;
+       struct bio *bio;
+       char sense[SCSI_SENSE_BUFFERSIZE];
+       unsigned char cmd[BLK_MAX_CDB];
+
+       if (hdr->interface_id != 'S')
+               return -EINVAL;
+       if (hdr->cmd_len > BLK_MAX_CDB)
+               return -EINVAL;
+       if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
+               return -EFAULT;
+       if (verify_command(file, cmd))
+               return -EPERM;
+
+       if (hdr->dxfer_len > (q->max_sectors << 9))
+               return -EIO;
+
+       if (hdr->dxfer_len)
+               switch (hdr->dxfer_direction) {
+               default:
+                       return -EINVAL;
+               case SG_DXFER_TO_FROM_DEV:
+               case SG_DXFER_TO_DEV:
+                       writing = 1;
+                       break;
+               case SG_DXFER_FROM_DEV:
+                       break;
+               }
+
+       rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
+       if (!rq)
+               return -ENOMEM;
+
+       if (hdr->iovec_count) {
+               const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
+               struct sg_iovec *iov;
+
+               iov = kmalloc(size, GFP_KERNEL);
+               if (!iov) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               if (copy_from_user(iov, hdr->dxferp, size)) {
+                       kfree(iov);
+                       ret = -EFAULT;
+                       goto out;
+               }
+
+               ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
+               kfree(iov);
+       } else if (hdr->dxfer_len)
+               ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
+
+       if (ret)
+               goto out;
+
+       /*
+        * fill in request structure
+        */
+       rq->cmd_len = hdr->cmd_len;
+       memcpy(rq->cmd, cmd, hdr->cmd_len);
+       if (sizeof(rq->cmd) != hdr->cmd_len)
+               memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
+
+       memset(sense, 0, sizeof(sense));
+       rq->sense = sense;
+       rq->sense_len = 0;
+
+       rq->flags |= REQ_BLOCK_PC;
+       bio = rq->bio;
+
+       /*
+        * bounce this after holding a reference to the original bio, it's
+        * needed for proper unmapping
+        */
+       if (rq->bio)
+               blk_queue_bounce(q, &rq->bio);
+
+       rq->timeout = (hdr->timeout * HZ) / 1000;
+       if (!rq->timeout)
+               rq->timeout = q->sg_timeout;
+       if (!rq->timeout)
+               rq->timeout = BLK_DEFAULT_TIMEOUT;
+
+       start_time = jiffies;
+
+       /* ignore return value. All information is passed back to caller
+        * (if he doesn't check that is his problem).
+        * N.B. a non-zero SCSI status is _not_ necessarily an error.
+        */
+       blk_execute_rq(q, bd_disk, rq, 0);
+
+       /* write to all output members */
+       hdr->status = 0xff & rq->errors;
+       hdr->masked_status = status_byte(rq->errors);
+       hdr->msg_status = msg_byte(rq->errors);
+       hdr->host_status = host_byte(rq->errors);
+       hdr->driver_status = driver_byte(rq->errors);
+       hdr->info = 0;
+       if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+               hdr->info |= SG_INFO_CHECK;
+       hdr->resid = rq->data_len;
+       hdr->duration = ((jiffies - start_time) * 1000) / HZ;
+       hdr->sb_len_wr = 0;
+
+       if (rq->sense_len && hdr->sbp) {
+               int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
+
+               if (!copy_to_user(hdr->sbp, rq->sense, len))
+                       hdr->sb_len_wr = len;
+       }
+
+       if (blk_rq_unmap_user(bio, hdr->dxfer_len))
+               ret = -EFAULT;
+
+       /* may not have succeeded, but output values written to control
+        * structure (struct sg_io_hdr).  */
+out:
+       blk_put_request(rq);
+       return ret;
+}
+
+#define OMAX_SB_LEN 16          /* For backward compatibility */
+
+static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
+                        struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
+{
+       struct request *rq;
+       int err;
+       unsigned int in_len, out_len, bytes, opcode, cmdlen;
+       char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
+
+       /*
+        * get in an out lengths, verify they don't exceed a page worth of data
+        */
+       if (get_user(in_len, &sic->inlen))
+               return -EFAULT;
+       if (get_user(out_len, &sic->outlen))
+               return -EFAULT;
+       if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+               return -EINVAL;
+       if (get_user(opcode, sic->data))
+               return -EFAULT;
+
+       bytes = max(in_len, out_len);
+       if (bytes) {
+               buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+               if (!buffer)
+                       return -ENOMEM;
+
+               memset(buffer, 0, bytes);
+       }
+
+       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+
+       cmdlen = COMMAND_SIZE(opcode);
+
+       /*
+        * get command and data to send to device, if any
+        */
+       err = -EFAULT;
+       rq->cmd_len = cmdlen;
+       if (copy_from_user(rq->cmd, sic->data, cmdlen))
+               goto error;
+
+       if (copy_from_user(buffer, sic->data + cmdlen, in_len))
+               goto error;
+
+       err = verify_command(file, rq->cmd);
+       if (err)
+               goto error;
+
+       switch (opcode) {
+               case SEND_DIAGNOSTIC:
+               case FORMAT_UNIT:
+                       rq->timeout = FORMAT_UNIT_TIMEOUT;
+                       break;
+               case START_STOP:
+                       rq->timeout = START_STOP_TIMEOUT;
+                       break;
+               case MOVE_MEDIUM:
+                       rq->timeout = MOVE_MEDIUM_TIMEOUT;
+                       break;
+               case READ_ELEMENT_STATUS:
+                       rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+                       break;
+               case READ_DEFECT_DATA:
+                       rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+                       break;
+               default:
+                       rq->timeout = BLK_DEFAULT_TIMEOUT;
+                       break;
+       }
+
+       memset(sense, 0, sizeof(sense));
+       rq->sense = sense;
+       rq->sense_len = 0;
+
+       rq->data = buffer;
+       rq->data_len = bytes;
+       rq->flags |= REQ_BLOCK_PC;
+
+       blk_execute_rq(q, bd_disk, rq, 0);
+       err = rq->errors & 0xff;        /* only 8 bit SCSI status */
+       if (err) {
+               if (rq->sense_len && rq->sense) {
+                       bytes = (OMAX_SB_LEN > rq->sense_len) ?
+                               rq->sense_len : OMAX_SB_LEN;
+                       if (copy_to_user(sic->data, rq->sense, bytes))
+                               err = -EFAULT;
+               }
+       } else {
+               if (copy_to_user(sic->data, buffer, out_len))
+                       err = -EFAULT;
+       }
+       
+error:
+       kfree(buffer);
+       blk_put_request(rq);
+       return err;
+}
+
+int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
+{
+       request_queue_t *q;
+       struct request *rq;
+       int close = 0, err;
+
+       q = bd_disk->queue;
+       if (!q)
+               return -ENXIO;
+
+       if (blk_get_queue(q))
+               return -ENXIO;
+
+       switch (cmd) {
+               /*
+                * new sgv3 interface
+                */
+               case SG_GET_VERSION_NUM:
+                       err = sg_get_version(arg);
+                       break;
+               case SCSI_IOCTL_GET_IDLUN:
+                       err = scsi_get_idlun(q, arg);
+                       break;
+               case SCSI_IOCTL_GET_BUS_NUMBER:
+                       err = scsi_get_bus(q, arg);
+                       break;
+               case SG_SET_TIMEOUT:
+                       err = sg_set_timeout(q, arg);
+                       break;
+               case SG_GET_TIMEOUT:
+                       err = sg_get_timeout(q);
+                       break;
+               case SG_GET_RESERVED_SIZE:
+                       err = sg_get_reserved_size(q, arg);
+                       break;
+               case SG_SET_RESERVED_SIZE:
+                       err = sg_set_reserved_size(q, arg);
+                       break;
+               case SG_EMULATED_HOST:
+                       err = sg_emulated_host(q, arg);
+                       break;
+               case SG_IO: {
+                       struct sg_io_hdr hdr;
+
+                       err = -EFAULT;
+                       if (copy_from_user(&hdr, arg, sizeof(hdr)))
+                               break;
+                       err = sg_io(file, q, bd_disk, &hdr);
+                       if (err == -EFAULT)
+                               break;
+
+                       if (copy_to_user(arg, &hdr, sizeof(hdr)))
+                               err = -EFAULT;
+                       break;
+               }
+               case CDROM_SEND_PACKET: {
+                       struct cdrom_generic_command cgc;
+                       struct sg_io_hdr hdr;
+
+                       err = -EFAULT;
+                       if (copy_from_user(&cgc, arg, sizeof(cgc)))
+                               break;
+                       cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+                       memset(&hdr, 0, sizeof(hdr));
+                       hdr.interface_id = 'S';
+                       hdr.cmd_len = sizeof(cgc.cmd);
+                       hdr.dxfer_len = cgc.buflen;
+                       err = 0;
+                       switch (cgc.data_direction) {
+                               case CGC_DATA_UNKNOWN:
+                                       hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+                                       break;
+                               case CGC_DATA_WRITE:
+                                       hdr.dxfer_direction = SG_DXFER_TO_DEV;
+                                       break;
+                               case CGC_DATA_READ:
+                                       hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+                                       break;
+                               case CGC_DATA_NONE:
+                                       hdr.dxfer_direction = SG_DXFER_NONE;
+                                       break;
+                               default:
+                                       err = -EINVAL;
+                       }
+                       if (err)
+                               break;
+
+                       hdr.dxferp = cgc.buffer;
+                       hdr.sbp = cgc.sense;
+                       if (hdr.sbp)
+                               hdr.mx_sb_len = sizeof(struct request_sense);
+                       hdr.timeout = cgc.timeout;
+                       hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
+                       hdr.cmd_len = sizeof(cgc.cmd);
+
+                       err = sg_io(file, q, bd_disk, &hdr);
+                       if (err == -EFAULT)
+                               break;
+
+                       if (hdr.status)
+                               err = -EIO;
+
+                       cgc.stat = err;
+                       cgc.buflen = hdr.resid;
+                       if (copy_to_user(arg, &cgc, sizeof(cgc)))
+                               err = -EFAULT;
+
+                       break;
+               }
+
+               /*
+                * old junk scsi send command ioctl
+                */
+               case SCSI_IOCTL_SEND_COMMAND:
+                       printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
+                       err = -EINVAL;
+                       if (!arg)
+                               break;
+
+                       err = sg_scsi_ioctl(file, q, bd_disk, arg);
+                       break;
+               case CDROMCLOSETRAY:
+                       close = 1;
+               case CDROMEJECT:
+                       rq = blk_get_request(q, WRITE, __GFP_WAIT);
+                       rq->flags |= REQ_BLOCK_PC;
+                       rq->data = NULL;
+                       rq->data_len = 0;
+                       rq->timeout = BLK_DEFAULT_TIMEOUT;
+                       memset(rq->cmd, 0, sizeof(rq->cmd));
+                       rq->cmd[0] = GPCMD_START_STOP_UNIT;
+                       rq->cmd[4] = 0x02 + (close != 0);
+                       rq->cmd_len = 6;
+                       err = blk_execute_rq(q, bd_disk, rq, 0);
+                       blk_put_request(rq);
+                       break;
+               default:
+                       err = -ENOTTY;
+       }
+
+       blk_put_queue(q);
+       return err;
+}
+
+EXPORT_SYMBOL(scsi_cmd_ioctl);
index 51b0af1cebee15f0406ceaf4517538bb9a39edb3..7b1cd93892be4e271d5f94ef60982607020efa76 100644 (file)
@@ -409,16 +409,6 @@ config BLK_DEV_INITRD
          for details.
 
 
-#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
-#for instance.
-config LBD
-       bool "Support for Large Block Devices"
-       depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
-       help
-         Say Y here if you want to attach large (bigger than 2TB) discs to
-         your machine, or if you want to have a raid or loopback device
-         bigger than 2TB.  Otherwise say N.
-
 config CDROM_PKTCDVD
        tristate "Packet writing on CD/DVD media"
        depends on !UML
@@ -455,8 +445,6 @@ config CDROM_PKTCDVD_WCACHE
 
 source "drivers/s390/block/Kconfig"
 
-source "drivers/block/Kconfig.iosched"
-
 config ATA_OVER_ETH
        tristate "ATA over Ethernet support"
        depends on NET
diff --git a/drivers/block/Kconfig.iosched b/drivers/block/Kconfig.iosched
deleted file mode 100644 (file)
index 5b90d2f..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-
-menu "IO Schedulers"
-
-config IOSCHED_NOOP
-       bool
-       default y
-       ---help---
-         The no-op I/O scheduler is a minimal scheduler that does basic merging
-         and sorting. Its main uses include non-disk based block devices like
-         memory devices, and specialised software or hardware environments
-         that do their own scheduling and require only minimal assistance from
-         the kernel.
-
-config IOSCHED_AS
-       tristate "Anticipatory I/O scheduler"
-       default y
-       ---help---
-         The anticipatory I/O scheduler is the default disk scheduler. It is
-         generally a good choice for most environments, but is quite large and
-         complex when compared to the deadline I/O scheduler, it can also be
-         slower in some cases especially some database loads.
-
-config IOSCHED_DEADLINE
-       tristate "Deadline I/O scheduler"
-       default y
-       ---help---
-         The deadline I/O scheduler is simple and compact, and is often as
-         good as the anticipatory I/O scheduler, and in some database
-         workloads, better. In the case of a single process performing I/O to
-         a disk at any one time, its behaviour is almost identical to the
-         anticipatory I/O scheduler and so is a good choice.
-
-config IOSCHED_CFQ
-       tristate "CFQ I/O scheduler"
-       default y
-       ---help---
-         The CFQ I/O scheduler tries to distribute bandwidth equally
-         among all processes in the system. It should provide a fair
-         working environment, suitable for desktop systems.
-
-choice
-       prompt "Default I/O scheduler"
-       default DEFAULT_AS
-       help
-         Select the I/O scheduler which will be used by default for all
-         block devices.
-
-       config DEFAULT_AS
-               bool "Anticipatory" if IOSCHED_AS
-
-       config DEFAULT_DEADLINE
-               bool "Deadline" if IOSCHED_DEADLINE
-
-       config DEFAULT_CFQ
-               bool "CFQ" if IOSCHED_CFQ
-
-       config DEFAULT_NOOP
-               bool "No-op"
-
-endchoice
-
-config DEFAULT_IOSCHED
-       string
-       default "anticipatory" if DEFAULT_AS
-       default "deadline" if DEFAULT_DEADLINE
-       default "cfq" if DEFAULT_CFQ
-       default "noop" if DEFAULT_NOOP
-
-endmenu
index 1cf09a1c065be0e3dc29df117a2f73693c4c202c..3ec1f8df87b16d92cbbd4b0ab01fe065a4c3731a 100644 (file)
@@ -4,21 +4,7 @@
 # 12 June 2000, Christoph Hellwig <hch@infradead.org>
 # Rewritten to use lists instead of if-statements.
 # 
-# Note : at this point, these files are compiled on all systems.
-# In the future, some of these should be built conditionally.
-#
-
-#
-# NOTE that ll_rw_blk.c must come early in linkage order - it starts the
-# kblockd threads
-#
-
-obj-y  := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
 
-obj-$(CONFIG_IOSCHED_NOOP)     += noop-iosched.o
-obj-$(CONFIG_IOSCHED_AS)       += as-iosched.o
-obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
-obj-$(CONFIG_IOSCHED_CFQ)      += cfq-iosched.o
 obj-$(CONFIG_MAC_FLOPPY)       += swim3.o
 obj-$(CONFIG_BLK_DEV_FD)       += floppy.o
 obj-$(CONFIG_BLK_DEV_FD98)     += floppy98.o
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
deleted file mode 100644 (file)
index c6744ff..0000000
+++ /dev/null
@@ -1,1985 +0,0 @@
-/*
- *  linux/drivers/block/as-iosched.c
- *
- *  Anticipatory & deadline i/o scheduler.
- *
- *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
- *                     Nick Piggin <piggin@cyberone.com.au>
- *
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/hash.h>
-#include <linux/rbtree.h>
-#include <linux/interrupt.h>
-
-#define REQ_SYNC       1
-#define REQ_ASYNC      0
-
-/*
- * See Documentation/block/as-iosched.txt
- */
-
-/*
- * max time before a read is submitted.
- */
-#define default_read_expire (HZ / 8)
-
-/*
- * ditto for writes, these limits are not hard, even
- * if the disk is capable of satisfying them.
- */
-#define default_write_expire (HZ / 4)
-
-/*
- * read_batch_expire describes how long we will allow a stream of reads to
- * persist before looking to see whether it is time to switch over to writes.
- */
-#define default_read_batch_expire (HZ / 2)
-
-/*
- * write_batch_expire describes how long we want a stream of writes to run for.
- * This is not a hard limit, but a target we set for the auto-tuning thingy.
- * See, the problem is: we can send a lot of writes to disk cache / TCQ in
- * a short amount of time...
- */
-#define default_write_batch_expire (HZ / 8)
-
-/*
- * max time we may wait to anticipate a read (default around 6ms)
- */
-#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
-
-/*
- * Keep track of up to 20ms thinktimes. We can go as big as we like here,
- * however huge values tend to interfere and not decay fast enough. A program
- * might be in a non-io phase of operation. Waiting on user input for example,
- * or doing a lengthy computation. A small penalty can be justified there, and
- * will still catch out those processes that constantly have large thinktimes.
- */
-#define MAX_THINKTIME (HZ/50UL)
-
-/* Bits in as_io_context.state */
-enum as_io_states {
-       AS_TASK_RUNNING=0,      /* Process has not exitted */
-       AS_TASK_IOSTARTED,      /* Process has started some IO */
-       AS_TASK_IORUNNING,      /* Process has completed some IO */
-};
-
-enum anticipation_status {
-       ANTIC_OFF=0,            /* Not anticipating (normal operation)  */
-       ANTIC_WAIT_REQ,         /* The last read has not yet completed  */
-       ANTIC_WAIT_NEXT,        /* Currently anticipating a request vs
-                                  last read (which has completed) */
-       ANTIC_FINISHED,         /* Anticipating but have found a candidate
-                                * or timed out */
-};
-
-struct as_data {
-       /*
-        * run time data
-        */
-
-       struct request_queue *q;        /* the "owner" queue */
-
-       /*
-        * requests (as_rq s) are present on both sort_list and fifo_list
-        */
-       struct rb_root sort_list[2];
-       struct list_head fifo_list[2];
-
-       struct as_rq *next_arq[2];      /* next in sort order */
-       sector_t last_sector[2];        /* last REQ_SYNC & REQ_ASYNC sectors */
-       struct list_head *hash;         /* request hash */
-
-       unsigned long exit_prob;        /* probability a task will exit while
-                                          being waited on */
-       unsigned long new_ttime_total;  /* mean thinktime on new proc */
-       unsigned long new_ttime_mean;
-       u64 new_seek_total;             /* mean seek on new proc */
-       sector_t new_seek_mean;
-
-       unsigned long current_batch_expires;
-       unsigned long last_check_fifo[2];
-       int changed_batch;              /* 1: waiting for old batch to end */
-       int new_batch;                  /* 1: waiting on first read complete */
-       int batch_data_dir;             /* current batch REQ_SYNC / REQ_ASYNC */
-       int write_batch_count;          /* max # of reqs in a write batch */
-       int current_write_count;        /* how many requests left this batch */
-       int write_batch_idled;          /* has the write batch gone idle? */
-       mempool_t *arq_pool;
-
-       enum anticipation_status antic_status;
-       unsigned long antic_start;      /* jiffies: when it started */
-       struct timer_list antic_timer;  /* anticipatory scheduling timer */
-       struct work_struct antic_work;  /* Deferred unplugging */
-       struct io_context *io_context;  /* Identify the expected process */
-       int ioc_finished; /* IO associated with io_context is finished */
-       int nr_dispatched;
-
-       /*
-        * settings that change how the i/o scheduler behaves
-        */
-       unsigned long fifo_expire[2];
-       unsigned long batch_expire[2];
-       unsigned long antic_expire;
-};
-
-#define list_entry_fifo(ptr)   list_entry((ptr), struct as_rq, fifo)
-
-/*
- * per-request data.
- */
-enum arq_state {
-       AS_RQ_NEW=0,            /* New - not referenced and not on any lists */
-       AS_RQ_QUEUED,           /* In the request queue. It belongs to the
-                                  scheduler */
-       AS_RQ_DISPATCHED,       /* On the dispatch list. It belongs to the
-                                  driver now */
-       AS_RQ_PRESCHED,         /* Debug poisoning for requests being used */
-       AS_RQ_REMOVED,
-       AS_RQ_MERGED,
-       AS_RQ_POSTSCHED,        /* when they shouldn't be */
-};
-
-struct as_rq {
-       /*
-        * rbtree index, key is the starting offset
-        */
-       struct rb_node rb_node;
-       sector_t rb_key;
-
-       struct request *request;
-
-       struct io_context *io_context;  /* The submitting task */
-
-       /*
-        * request hash, key is the ending offset (for back merge lookup)
-        */
-       struct list_head hash;
-       unsigned int on_hash;
-
-       /*
-        * expire fifo
-        */
-       struct list_head fifo;
-       unsigned long expires;
-
-       unsigned int is_sync;
-       enum arq_state state;
-};
-
-#define RQ_DATA(rq)    ((struct as_rq *) (rq)->elevator_private)
-
-static kmem_cache_t *arq_pool;
-
-/*
- * IO Context helper functions
- */
-
-/* Called to deallocate the as_io_context */
-static void free_as_io_context(struct as_io_context *aic)
-{
-       kfree(aic);
-}
-
-/* Called when the task exits */
-static void exit_as_io_context(struct as_io_context *aic)
-{
-       WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
-       clear_bit(AS_TASK_RUNNING, &aic->state);
-}
-
-static struct as_io_context *alloc_as_io_context(void)
-{
-       struct as_io_context *ret;
-
-       ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
-       if (ret) {
-               ret->dtor = free_as_io_context;
-               ret->exit = exit_as_io_context;
-               ret->state = 1 << AS_TASK_RUNNING;
-               atomic_set(&ret->nr_queued, 0);
-               atomic_set(&ret->nr_dispatched, 0);
-               spin_lock_init(&ret->lock);
-               ret->ttime_total = 0;
-               ret->ttime_samples = 0;
-               ret->ttime_mean = 0;
-               ret->seek_total = 0;
-               ret->seek_samples = 0;
-               ret->seek_mean = 0;
-       }
-
-       return ret;
-}
-
-/*
- * If the current task has no AS IO context then create one and initialise it.
- * Then take a ref on the task's io context and return it.
- */
-static struct io_context *as_get_io_context(void)
-{
-       struct io_context *ioc = get_io_context(GFP_ATOMIC);
-       if (ioc && !ioc->aic) {
-               ioc->aic = alloc_as_io_context();
-               if (!ioc->aic) {
-                       put_io_context(ioc);
-                       ioc = NULL;
-               }
-       }
-       return ioc;
-}
-
-static void as_put_io_context(struct as_rq *arq)
-{
-       struct as_io_context *aic;
-
-       if (unlikely(!arq->io_context))
-               return;
-
-       aic = arq->io_context->aic;
-
-       if (arq->is_sync == REQ_SYNC && aic) {
-               spin_lock(&aic->lock);
-               set_bit(AS_TASK_IORUNNING, &aic->state);
-               aic->last_end_request = jiffies;
-               spin_unlock(&aic->lock);
-       }
-
-       put_io_context(arq->io_context);
-}
-
-/*
- * the back merge hash support functions
- */
-static const int as_hash_shift = 6;
-#define AS_HASH_BLOCK(sec)     ((sec) >> 3)
-#define AS_HASH_FN(sec)                (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
-#define AS_HASH_ENTRIES                (1 << as_hash_shift)
-#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr)   list_entry((ptr), struct as_rq, hash)
-
-static inline void __as_del_arq_hash(struct as_rq *arq)
-{
-       arq->on_hash = 0;
-       list_del_init(&arq->hash);
-}
-
-static inline void as_del_arq_hash(struct as_rq *arq)
-{
-       if (arq->on_hash)
-               __as_del_arq_hash(arq);
-}
-
-static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
-{
-       struct request *rq = arq->request;
-
-       BUG_ON(arq->on_hash);
-
-       arq->on_hash = 1;
-       list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
-}
-
-/*
- * move hot entry to front of chain
- */
-static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
-{
-       struct request *rq = arq->request;
-       struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
-
-       if (!arq->on_hash) {
-               WARN_ON(1);
-               return;
-       }
-
-       if (arq->hash.prev != head) {
-               list_del(&arq->hash);
-               list_add(&arq->hash, head);
-       }
-}
-
-static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
-{
-       struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
-       struct list_head *entry, *next = hash_list->next;
-
-       while ((entry = next) != hash_list) {
-               struct as_rq *arq = list_entry_hash(entry);
-               struct request *__rq = arq->request;
-
-               next = entry->next;
-
-               BUG_ON(!arq->on_hash);
-
-               if (!rq_mergeable(__rq)) {
-                       as_del_arq_hash(arq);
-                       continue;
-               }
-
-               if (rq_hash_key(__rq) == offset)
-                       return __rq;
-       }
-
-       return NULL;
-}
-
-/*
- * rb tree support functions
- */
-#define RB_NONE                (2)
-#define RB_EMPTY(root) ((root)->rb_node == NULL)
-#define ON_RB(node)    ((node)->rb_color != RB_NONE)
-#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
-#define rb_entry_arq(node)     rb_entry((node), struct as_rq, rb_node)
-#define ARQ_RB_ROOT(ad, arq)   (&(ad)->sort_list[(arq)->is_sync])
-#define rq_rb_key(rq)          (rq)->sector
-
-/*
- * as_find_first_arq finds the first (lowest sector numbered) request
- * for the specified data_dir. Used to sweep back to the start of the disk
- * (1-way elevator) after we process the last (highest sector) request.
- */
-static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
-{
-       struct rb_node *n = ad->sort_list[data_dir].rb_node;
-
-       if (n == NULL)
-               return NULL;
-
-       for (;;) {
-               if (n->rb_left == NULL)
-                       return rb_entry_arq(n);
-
-               n = n->rb_left;
-       }
-}
-
-/*
- * Add the request to the rb tree if it is unique.  If there is an alias (an
- * existing request against the same sector), which can happen when using
- * direct IO, then return the alias.
- */
-static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
-{
-       struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
-       struct rb_node *parent = NULL;
-       struct as_rq *__arq;
-       struct request *rq = arq->request;
-
-       arq->rb_key = rq_rb_key(rq);
-
-       while (*p) {
-               parent = *p;
-               __arq = rb_entry_arq(parent);
-
-               if (arq->rb_key < __arq->rb_key)
-                       p = &(*p)->rb_left;
-               else if (arq->rb_key > __arq->rb_key)
-                       p = &(*p)->rb_right;
-               else
-                       return __arq;
-       }
-
-       rb_link_node(&arq->rb_node, parent, p);
-       rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
-
-       return NULL;
-}
-
-static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
-{
-       if (!ON_RB(&arq->rb_node)) {
-               WARN_ON(1);
-               return;
-       }
-
-       rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
-       RB_CLEAR(&arq->rb_node);
-}
-
-static struct request *
-as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
-{
-       struct rb_node *n = ad->sort_list[data_dir].rb_node;
-       struct as_rq *arq;
-
-       while (n) {
-               arq = rb_entry_arq(n);
-
-               if (sector < arq->rb_key)
-                       n = n->rb_left;
-               else if (sector > arq->rb_key)
-                       n = n->rb_right;
-               else
-                       return arq->request;
-       }
-
-       return NULL;
-}
-
-/*
- * IO Scheduler proper
- */
-
-#define MAXBACK (1024 * 1024)  /*
-                                * Maximum distance the disk will go backward
-                                * for a request.
-                                */
-
-#define BACK_PENALTY   2
-
-/*
- * as_choose_req selects the preferred one of two requests of the same data_dir
- * ignoring time - eg. timeouts, which is the job of as_dispatch_request
- */
-static struct as_rq *
-as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
-{
-       int data_dir;
-       sector_t last, s1, s2, d1, d2;
-       int r1_wrap=0, r2_wrap=0;       /* requests are behind the disk head */
-       const sector_t maxback = MAXBACK;
-
-       if (arq1 == NULL || arq1 == arq2)
-               return arq2;
-       if (arq2 == NULL)
-               return arq1;
-
-       data_dir = arq1->is_sync;
-
-       last = ad->last_sector[data_dir];
-       s1 = arq1->request->sector;
-       s2 = arq2->request->sector;
-
-       BUG_ON(data_dir != arq2->is_sync);
-
-       /*
-        * Strict one way elevator _except_ in the case where we allow
-        * short backward seeks which are biased as twice the cost of a
-        * similar forward seek.
-        */
-       if (s1 >= last)
-               d1 = s1 - last;
-       else if (s1+maxback >= last)
-               d1 = (last - s1)*BACK_PENALTY;
-       else {
-               r1_wrap = 1;
-               d1 = 0; /* shut up, gcc */
-       }
-
-       if (s2 >= last)
-               d2 = s2 - last;
-       else if (s2+maxback >= last)
-               d2 = (last - s2)*BACK_PENALTY;
-       else {
-               r2_wrap = 1;
-               d2 = 0;
-       }
-
-       /* Found required data */
-       if (!r1_wrap && r2_wrap)
-               return arq1;
-       else if (!r2_wrap && r1_wrap)
-               return arq2;
-       else if (r1_wrap && r2_wrap) {
-               /* both behind the head */
-               if (s1 <= s2)
-                       return arq1;
-               else
-                       return arq2;
-       }
-
-       /* Both requests in front of the head */
-       if (d1 < d2)
-               return arq1;
-       else if (d2 < d1)
-               return arq2;
-       else {
-               if (s1 >= s2)
-                       return arq1;
-               else
-                       return arq2;
-       }
-}
-
-/*
- * as_find_next_arq finds the next request after @prev in elevator order.
- * this with as_choose_req form the basis for how the scheduler chooses
- * what request to process next. Anticipation works on top of this.
- */
-static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
-{
-       const int data_dir = last->is_sync;
-       struct as_rq *ret;
-       struct rb_node *rbnext = rb_next(&last->rb_node);
-       struct rb_node *rbprev = rb_prev(&last->rb_node);
-       struct as_rq *arq_next, *arq_prev;
-
-       BUG_ON(!ON_RB(&last->rb_node));
-
-       if (rbprev)
-               arq_prev = rb_entry_arq(rbprev);
-       else
-               arq_prev = NULL;
-
-       if (rbnext)
-               arq_next = rb_entry_arq(rbnext);
-       else {
-               arq_next = as_find_first_arq(ad, data_dir);
-               if (arq_next == last)
-                       arq_next = NULL;
-       }
-
-       ret = as_choose_req(ad, arq_next, arq_prev);
-
-       return ret;
-}
-
-/*
- * anticipatory scheduling functions follow
- */
-
-/*
- * as_antic_expired tells us when we have anticipated too long.
- * The funny "absolute difference" math on the elapsed time is to handle
- * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
- */
-static int as_antic_expired(struct as_data *ad)
-{
-       long delta_jif;
-
-       delta_jif = jiffies - ad->antic_start;
-       if (unlikely(delta_jif < 0))
-               delta_jif = -delta_jif;
-       if (delta_jif < ad->antic_expire)
-               return 0;
-
-       return 1;
-}
-
-/*
- * as_antic_waitnext starts anticipating that a nice request will soon be
- * submitted. See also as_antic_waitreq
- */
-static void as_antic_waitnext(struct as_data *ad)
-{
-       unsigned long timeout;
-
-       BUG_ON(ad->antic_status != ANTIC_OFF
-                       && ad->antic_status != ANTIC_WAIT_REQ);
-
-       timeout = ad->antic_start + ad->antic_expire;
-
-       mod_timer(&ad->antic_timer, timeout);
-
-       ad->antic_status = ANTIC_WAIT_NEXT;
-}
-
-/*
- * as_antic_waitreq starts anticipating. We don't start timing the anticipation
- * until the request that we're anticipating on has finished. This means we
- * are timing from when the candidate process wakes up hopefully.
- */
-static void as_antic_waitreq(struct as_data *ad)
-{
-       BUG_ON(ad->antic_status == ANTIC_FINISHED);
-       if (ad->antic_status == ANTIC_OFF) {
-               if (!ad->io_context || ad->ioc_finished)
-                       as_antic_waitnext(ad);
-               else
-                       ad->antic_status = ANTIC_WAIT_REQ;
-       }
-}
-
-/*
- * This is called directly by the functions in this file to stop anticipation.
- * We kill the timer and schedule a call to the request_fn asap.
- */
-static void as_antic_stop(struct as_data *ad)
-{
-       int status = ad->antic_status;
-
-       if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
-               if (status == ANTIC_WAIT_NEXT)
-                       del_timer(&ad->antic_timer);
-               ad->antic_status = ANTIC_FINISHED;
-               /* see as_work_handler */
-               kblockd_schedule_work(&ad->antic_work);
-       }
-}
-
-/*
- * as_antic_timeout is the timer function set by as_antic_waitnext.
- */
-static void as_antic_timeout(unsigned long data)
-{
-       struct request_queue *q = (struct request_queue *)data;
-       struct as_data *ad = q->elevator->elevator_data;
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       if (ad->antic_status == ANTIC_WAIT_REQ
-                       || ad->antic_status == ANTIC_WAIT_NEXT) {
-               struct as_io_context *aic = ad->io_context->aic;
-
-               ad->antic_status = ANTIC_FINISHED;
-               kblockd_schedule_work(&ad->antic_work);
-
-               if (aic->ttime_samples == 0) {
-                       /* process anticipated on has exitted or timed out*/
-                       ad->exit_prob = (7*ad->exit_prob + 256)/8;
-               }
-       }
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-/*
- * as_close_req decides if one request is considered "close" to the
- * previous one issued.
- */
-static int as_close_req(struct as_data *ad, struct as_rq *arq)
-{
-       unsigned long delay;    /* milliseconds */
-       sector_t last = ad->last_sector[ad->batch_data_dir];
-       sector_t next = arq->request->sector;
-       sector_t delta; /* acceptable close offset (in sectors) */
-
-       if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
-               delay = 0;
-       else
-               delay = ((jiffies - ad->antic_start) * 1000) / HZ;
-
-       if (delay <= 1)
-               delta = 64;
-       else if (delay <= 20 && delay <= ad->antic_expire)
-               delta = 64 << (delay-1);
-       else
-               return 1;
-
-       return (last - (delta>>1) <= next) && (next <= last + delta);
-}
-
-/*
- * as_can_break_anticipation returns true if we have been anticipating this
- * request.
- *
- * It also returns true if the process against which we are anticipating
- * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
- * dispatch it ASAP, because we know that application will not be submitting
- * any new reads.
- *
- * If the task which has submitted the request has exitted, break anticipation.
- *
- * If this task has queued some other IO, do not enter enticipation.
- */
-static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
-{
-       struct io_context *ioc;
-       struct as_io_context *aic;
-       sector_t s;
-
-       ioc = ad->io_context;
-       BUG_ON(!ioc);
-
-       if (arq && ioc == arq->io_context) {
-               /* request from same process */
-               return 1;
-       }
-
-       if (ad->ioc_finished && as_antic_expired(ad)) {
-               /*
-                * In this situation status should really be FINISHED,
-                * however the timer hasn't had the chance to run yet.
-                */
-               return 1;
-       }
-
-       aic = ioc->aic;
-       if (!aic)
-               return 0;
-
-       if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
-               /* process anticipated on has exitted */
-               if (aic->ttime_samples == 0)
-                       ad->exit_prob = (7*ad->exit_prob + 256)/8;
-               return 1;
-       }
-
-       if (atomic_read(&aic->nr_queued) > 0) {
-               /* process has more requests queued */
-               return 1;
-       }
-
-       if (atomic_read(&aic->nr_dispatched) > 0) {
-               /* process has more requests dispatched */
-               return 1;
-       }
-
-       if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, arq)) {
-               /*
-                * Found a close request that is not one of ours.
-                *
-                * This makes close requests from another process reset
-                * our thinktime delay. Is generally useful when there are
-                * two or more cooperating processes working in the same
-                * area.
-                */
-               spin_lock(&aic->lock);
-               aic->last_end_request = jiffies;
-               spin_unlock(&aic->lock);
-               return 1;
-       }
-
-
-       if (aic->ttime_samples == 0) {
-               if (ad->new_ttime_mean > ad->antic_expire)
-                       return 1;
-               if (ad->exit_prob > 128)
-                       return 1;
-       } else if (aic->ttime_mean > ad->antic_expire) {
-               /* the process thinks too much between requests */
-               return 1;
-       }
-
-       if (!arq)
-               return 0;
-
-       if (ad->last_sector[REQ_SYNC] < arq->request->sector)
-               s = arq->request->sector - ad->last_sector[REQ_SYNC];
-       else
-               s = ad->last_sector[REQ_SYNC] - arq->request->sector;
-
-       if (aic->seek_samples == 0) {
-               /*
-                * Process has just started IO. Use past statistics to
-                * guage success possibility
-                */
-               if (ad->new_seek_mean > s) {
-                       /* this request is better than what we're expecting */
-                       return 1;
-               }
-
-       } else {
-               if (aic->seek_mean > s) {
-                       /* this request is better than what we're expecting */
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-/*
- * as_can_anticipate indicates weather we should either run arq
- * or keep anticipating a better request.
- */
-static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
-{
-       if (!ad->io_context)
-               /*
-                * Last request submitted was a write
-                */
-               return 0;
-
-       if (ad->antic_status == ANTIC_FINISHED)
-               /*
-                * Don't restart if we have just finished. Run the next request
-                */
-               return 0;
-
-       if (as_can_break_anticipation(ad, arq))
-               /*
-                * This request is a good candidate. Don't keep anticipating,
-                * run it.
-                */
-               return 0;
-
-       /*
-        * OK from here, we haven't finished, and don't have a decent request!
-        * Status is either ANTIC_OFF so start waiting,
-        * ANTIC_WAIT_REQ so continue waiting for request to finish
-        * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
-        *
-        */
-
-       return 1;
-}
-
-static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, unsigned long ttime)
-{
-       /* fixed point: 1.0 == 1<<8 */
-       if (aic->ttime_samples == 0) {
-               ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
-               ad->new_ttime_mean = ad->new_ttime_total / 256;
-
-               ad->exit_prob = (7*ad->exit_prob)/8;
-       }
-       aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
-       aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
-       aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
-}
-
-static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, sector_t sdist)
-{
-       u64 total;
-
-       if (aic->seek_samples == 0) {
-               ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
-               ad->new_seek_mean = ad->new_seek_total / 256;
-       }
-
-       /*
-        * Don't allow the seek distance to get too large from the
-        * odd fragment, pagein, etc
-        */
-       if (aic->seek_samples <= 60) /* second&third seek */
-               sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
-       else
-               sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
-
-       aic->seek_samples = (7*aic->seek_samples + 256) / 8;
-       aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
-       total = aic->seek_total + (aic->seek_samples/2);
-       do_div(total, aic->seek_samples);
-       aic->seek_mean = (sector_t)total;
-}
-
-/*
- * as_update_iohist keeps a decaying histogram of IO thinktimes, and
- * updates @aic->ttime_mean based on that. It is called when a new
- * request is queued.
- */
-static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-       int data_dir = arq->is_sync;
-       unsigned long thinktime;
-       sector_t seek_dist;
-
-       if (aic == NULL)
-               return;
-
-       if (data_dir == REQ_SYNC) {
-               unsigned long in_flight = atomic_read(&aic->nr_queued)
-                                       + atomic_read(&aic->nr_dispatched);
-               spin_lock(&aic->lock);
-               if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
-                       test_bit(AS_TASK_IOSTARTED, &aic->state)) {
-                       /* Calculate read -> read thinktime */
-                       if (test_bit(AS_TASK_IORUNNING, &aic->state)
-                                                       && in_flight == 0) {
-                               thinktime = jiffies - aic->last_end_request;
-                               thinktime = min(thinktime, MAX_THINKTIME-1);
-                       } else
-                               thinktime = 0;
-                       as_update_thinktime(ad, aic, thinktime);
-
-                       /* Calculate read -> read seek distance */
-                       if (aic->last_request_pos < rq->sector)
-                               seek_dist = rq->sector - aic->last_request_pos;
-                       else
-                               seek_dist = aic->last_request_pos - rq->sector;
-                       as_update_seekdist(ad, aic, seek_dist);
-               }
-               aic->last_request_pos = rq->sector + rq->nr_sectors;
-               set_bit(AS_TASK_IOSTARTED, &aic->state);
-               spin_unlock(&aic->lock);
-       }
-}
-
-/*
- * as_update_arq must be called whenever a request (arq) is added to
- * the sort_list. This function keeps caches up to date, and checks if the
- * request might be one we are "anticipating"
- */
-static void as_update_arq(struct as_data *ad, struct as_rq *arq)
-{
-       const int data_dir = arq->is_sync;
-
-       /* keep the next_arq cache up to date */
-       ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
-
-       /*
-        * have we been anticipating this request?
-        * or does it come from the same process as the one we are anticipating
-        * for?
-        */
-       if (ad->antic_status == ANTIC_WAIT_REQ
-                       || ad->antic_status == ANTIC_WAIT_NEXT) {
-               if (as_can_break_anticipation(ad, arq))
-                       as_antic_stop(ad);
-       }
-}
-
-/*
- * Gathers timings and resizes the write batch automatically
- */
-static void update_write_batch(struct as_data *ad)
-{
-       unsigned long batch = ad->batch_expire[REQ_ASYNC];
-       long write_time;
-
-       write_time = (jiffies - ad->current_batch_expires) + batch;
-       if (write_time < 0)
-               write_time = 0;
-
-       if (write_time > batch && !ad->write_batch_idled) {
-               if (write_time > batch * 3)
-                       ad->write_batch_count /= 2;
-               else
-                       ad->write_batch_count--;
-       } else if (write_time < batch && ad->current_write_count == 0) {
-               if (batch > write_time * 3)
-                       ad->write_batch_count *= 2;
-               else
-                       ad->write_batch_count++;
-       }
-
-       if (ad->write_batch_count < 1)
-               ad->write_batch_count = 1;
-}
-
-/*
- * as_completed_request is to be called when a request has completed and
- * returned something to the requesting process, be it an error or data.
- */
-static void as_completed_request(request_queue_t *q, struct request *rq)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct as_rq *arq = RQ_DATA(rq);
-
-       WARN_ON(!list_empty(&rq->queuelist));
-
-       if (arq->state != AS_RQ_REMOVED) {
-               printk("arq->state %d\n", arq->state);
-               WARN_ON(1);
-               goto out;
-       }
-
-       if (ad->changed_batch && ad->nr_dispatched == 1) {
-               kblockd_schedule_work(&ad->antic_work);
-               ad->changed_batch = 0;
-
-               if (ad->batch_data_dir == REQ_SYNC)
-                       ad->new_batch = 1;
-       }
-       WARN_ON(ad->nr_dispatched == 0);
-       ad->nr_dispatched--;
-
-       /*
-        * Start counting the batch from when a request of that direction is
-        * actually serviced. This should help devices with big TCQ windows
-        * and writeback caches
-        */
-       if (ad->new_batch && ad->batch_data_dir == arq->is_sync) {
-               update_write_batch(ad);
-               ad->current_batch_expires = jiffies +
-                               ad->batch_expire[REQ_SYNC];
-               ad->new_batch = 0;
-       }
-
-       if (ad->io_context == arq->io_context && ad->io_context) {
-               ad->antic_start = jiffies;
-               ad->ioc_finished = 1;
-               if (ad->antic_status == ANTIC_WAIT_REQ) {
-                       /*
-                        * We were waiting on this request, now anticipate
-                        * the next one
-                        */
-                       as_antic_waitnext(ad);
-               }
-       }
-
-       as_put_io_context(arq);
-out:
-       arq->state = AS_RQ_POSTSCHED;
-}
-
-/*
- * as_remove_queued_request removes a request from the pre dispatch queue
- * without updating refcounts. It is expected the caller will drop the
- * reference unless it replaces the request at somepart of the elevator
- * (ie. the dispatch queue)
- */
-static void as_remove_queued_request(request_queue_t *q, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-       const int data_dir = arq->is_sync;
-       struct as_data *ad = q->elevator->elevator_data;
-
-       WARN_ON(arq->state != AS_RQ_QUEUED);
-
-       if (arq->io_context && arq->io_context->aic) {
-               BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued));
-               atomic_dec(&arq->io_context->aic->nr_queued);
-       }
-
-       /*
-        * Update the "next_arq" cache if we are about to remove its
-        * entry
-        */
-       if (ad->next_arq[data_dir] == arq)
-               ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
-
-       list_del_init(&arq->fifo);
-       as_del_arq_hash(arq);
-       as_del_arq_rb(ad, arq);
-}
-
-/*
- * as_fifo_expired returns 0 if there are no expired reads on the fifo,
- * 1 otherwise.  It is ratelimited so that we only perform the check once per
- * `fifo_expire' interval.  Otherwise a large number of expired requests
- * would create a hopeless seekstorm.
- *
- * See as_antic_expired comment.
- */
-static int as_fifo_expired(struct as_data *ad, int adir)
-{
-       struct as_rq *arq;
-       long delta_jif;
-
-       delta_jif = jiffies - ad->last_check_fifo[adir];
-       if (unlikely(delta_jif < 0))
-               delta_jif = -delta_jif;
-       if (delta_jif < ad->fifo_expire[adir])
-               return 0;
-
-       ad->last_check_fifo[adir] = jiffies;
-
-       if (list_empty(&ad->fifo_list[adir]))
-               return 0;
-
-       arq = list_entry_fifo(ad->fifo_list[adir].next);
-
-       return time_after(jiffies, arq->expires);
-}
-
-/*
- * as_batch_expired returns true if the current batch has expired. A batch
- * is a set of reads or a set of writes.
- */
-static inline int as_batch_expired(struct as_data *ad)
-{
-       if (ad->changed_batch || ad->new_batch)
-               return 0;
-
-       if (ad->batch_data_dir == REQ_SYNC)
-               /* TODO! add a check so a complete fifo gets written? */
-               return time_after(jiffies, ad->current_batch_expires);
-
-       return time_after(jiffies, ad->current_batch_expires)
-               || ad->current_write_count == 0;
-}
-
-/*
- * move an entry to dispatch queue
- */
-static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
-{
-       struct request *rq = arq->request;
-       const int data_dir = arq->is_sync;
-
-       BUG_ON(!ON_RB(&arq->rb_node));
-
-       as_antic_stop(ad);
-       ad->antic_status = ANTIC_OFF;
-
-       /*
-        * This has to be set in order to be correctly updated by
-        * as_find_next_arq
-        */
-       ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
-
-       if (data_dir == REQ_SYNC) {
-               /* In case we have to anticipate after this */
-               copy_io_context(&ad->io_context, &arq->io_context);
-       } else {
-               if (ad->io_context) {
-                       put_io_context(ad->io_context);
-                       ad->io_context = NULL;
-               }
-
-               if (ad->current_write_count != 0)
-                       ad->current_write_count--;
-       }
-       ad->ioc_finished = 0;
-
-       ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
-
-       /*
-        * take it off the sort and fifo list, add to dispatch queue
-        */
-       while (!list_empty(&rq->queuelist)) {
-               struct request *__rq = list_entry_rq(rq->queuelist.next);
-               struct as_rq *__arq = RQ_DATA(__rq);
-
-               list_del(&__rq->queuelist);
-
-               elv_dispatch_add_tail(ad->q, __rq);
-
-               if (__arq->io_context && __arq->io_context->aic)
-                       atomic_inc(&__arq->io_context->aic->nr_dispatched);
-
-               WARN_ON(__arq->state != AS_RQ_QUEUED);
-               __arq->state = AS_RQ_DISPATCHED;
-
-               ad->nr_dispatched++;
-       }
-
-       as_remove_queued_request(ad->q, rq);
-       WARN_ON(arq->state != AS_RQ_QUEUED);
-
-       elv_dispatch_sort(ad->q, rq);
-
-       arq->state = AS_RQ_DISPATCHED;
-       if (arq->io_context && arq->io_context->aic)
-               atomic_inc(&arq->io_context->aic->nr_dispatched);
-       ad->nr_dispatched++;
-}
-
-/*
- * as_dispatch_request selects the best request according to
- * read/write expire, batch expire, etc, and moves it to the dispatch
- * queue. Returns 1 if a request was found, 0 otherwise.
- */
-static int as_dispatch_request(request_queue_t *q, int force)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct as_rq *arq;
-       const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
-       const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
-
-       if (unlikely(force)) {
-               /*
-                * Forced dispatch, accounting is useless.  Reset
-                * accounting states and dump fifo_lists.  Note that
-                * batch_data_dir is reset to REQ_SYNC to avoid
-                * screwing write batch accounting as write batch
-                * accounting occurs on W->R transition.
-                */
-               int dispatched = 0;
-
-               ad->batch_data_dir = REQ_SYNC;
-               ad->changed_batch = 0;
-               ad->new_batch = 0;
-
-               while (ad->next_arq[REQ_SYNC]) {
-                       as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
-                       dispatched++;
-               }
-               ad->last_check_fifo[REQ_SYNC] = jiffies;
-
-               while (ad->next_arq[REQ_ASYNC]) {
-                       as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
-                       dispatched++;
-               }
-               ad->last_check_fifo[REQ_ASYNC] = jiffies;
-
-               return dispatched;
-       }
-
-       /* Signal that the write batch was uncontended, so we can't time it */
-       if (ad->batch_data_dir == REQ_ASYNC && !reads) {
-               if (ad->current_write_count == 0 || !writes)
-                       ad->write_batch_idled = 1;
-       }
-
-       if (!(reads || writes)
-               || ad->antic_status == ANTIC_WAIT_REQ
-               || ad->antic_status == ANTIC_WAIT_NEXT
-               || ad->changed_batch)
-               return 0;
-
-       if (!(reads && writes && as_batch_expired(ad)) ) {
-               /*
-                * batch is still running or no reads or no writes
-                */
-               arq = ad->next_arq[ad->batch_data_dir];
-
-               if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
-                       if (as_fifo_expired(ad, REQ_SYNC))
-                               goto fifo_expired;
-
-                       if (as_can_anticipate(ad, arq)) {
-                               as_antic_waitreq(ad);
-                               return 0;
-                       }
-               }
-
-               if (arq) {
-                       /* we have a "next request" */
-                       if (reads && !writes)
-                               ad->current_batch_expires =
-                                       jiffies + ad->batch_expire[REQ_SYNC];
-                       goto dispatch_request;
-               }
-       }
-
-       /*
-        * at this point we are not running a batch. select the appropriate
-        * data direction (read / write)
-        */
-
-       if (reads) {
-               BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));
-
-               if (writes && ad->batch_data_dir == REQ_SYNC)
-                       /*
-                        * Last batch was a read, switch to writes
-                        */
-                       goto dispatch_writes;
-
-               if (ad->batch_data_dir == REQ_ASYNC) {
-                       WARN_ON(ad->new_batch);
-                       ad->changed_batch = 1;
-               }
-               ad->batch_data_dir = REQ_SYNC;
-               arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
-               ad->last_check_fifo[ad->batch_data_dir] = jiffies;
-               goto dispatch_request;
-       }
-
-       /*
-        * the last batch was a read
-        */
-
-       if (writes) {
-dispatch_writes:
-               BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));
-
-               if (ad->batch_data_dir == REQ_SYNC) {
-                       ad->changed_batch = 1;
-
-                       /*
-                        * new_batch might be 1 when the queue runs out of
-                        * reads. A subsequent submission of a write might
-                        * cause a change of batch before the read is finished.
-                        */
-                       ad->new_batch = 0;
-               }
-               ad->batch_data_dir = REQ_ASYNC;
-               ad->current_write_count = ad->write_batch_count;
-               ad->write_batch_idled = 0;
-               arq = ad->next_arq[ad->batch_data_dir];
-               goto dispatch_request;
-       }
-
-       BUG();
-       return 0;
-
-dispatch_request:
-       /*
-        * If a request has expired, service it.
-        */
-
-       if (as_fifo_expired(ad, ad->batch_data_dir)) {
-fifo_expired:
-               arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
-               BUG_ON(arq == NULL);
-       }
-
-       if (ad->changed_batch) {
-               WARN_ON(ad->new_batch);
-
-               if (ad->nr_dispatched)
-                       return 0;
-
-               if (ad->batch_data_dir == REQ_ASYNC)
-                       ad->current_batch_expires = jiffies +
-                                       ad->batch_expire[REQ_ASYNC];
-               else
-                       ad->new_batch = 1;
-
-               ad->changed_batch = 0;
-       }
-
-       /*
-        * arq is the selected appropriate request.
-        */
-       as_move_to_dispatch(ad, arq);
-
-       return 1;
-}
-
-/*
- * Add arq to a list behind alias
- */
-static inline void
-as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alias)
-{
-       struct request  *req = arq->request;
-       struct list_head *insert = alias->request->queuelist.prev;
-
-       /*
-        * Transfer list of aliases
-        */
-       while (!list_empty(&req->queuelist)) {
-               struct request *__rq = list_entry_rq(req->queuelist.next);
-               struct as_rq *__arq = RQ_DATA(__rq);
-
-               list_move_tail(&__rq->queuelist, &alias->request->queuelist);
-
-               WARN_ON(__arq->state != AS_RQ_QUEUED);
-       }
-
-       /*
-        * Another request with the same start sector on the rbtree.
-        * Link this request to that sector. They are untangled in
-        * as_move_to_dispatch
-        */
-       list_add(&arq->request->queuelist, insert);
-
-       /*
-        * Don't want to have to handle merges.
-        */
-       as_del_arq_hash(arq);
-       arq->request->flags |= REQ_NOMERGE;
-}
-
-/*
- * add arq to rbtree and fifo
- */
-static void as_add_request(request_queue_t *q, struct request *rq)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct as_rq *arq = RQ_DATA(rq);
-       struct as_rq *alias;
-       int data_dir;
-
-       if (arq->state != AS_RQ_PRESCHED) {
-               printk("arq->state: %d\n", arq->state);
-               WARN_ON(1);
-       }
-       arq->state = AS_RQ_NEW;
-
-       if (rq_data_dir(arq->request) == READ
-                       || current->flags&PF_SYNCWRITE)
-               arq->is_sync = 1;
-       else
-               arq->is_sync = 0;
-       data_dir = arq->is_sync;
-
-       arq->io_context = as_get_io_context();
-
-       if (arq->io_context) {
-               as_update_iohist(ad, arq->io_context->aic, arq->request);
-               atomic_inc(&arq->io_context->aic->nr_queued);
-       }
-
-       alias = as_add_arq_rb(ad, arq);
-       if (!alias) {
-               /*
-                * set expire time (only used for reads) and add to fifo list
-                */
-               arq->expires = jiffies + ad->fifo_expire[data_dir];
-               list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
-
-               if (rq_mergeable(arq->request))
-                       as_add_arq_hash(ad, arq);
-               as_update_arq(ad, arq); /* keep state machine up to date */
-
-       } else {
-               as_add_aliased_request(ad, arq, alias);
-
-               /*
-                * have we been anticipating this request?
-                * or does it come from the same process as the one we are
-                * anticipating for?
-                */
-               if (ad->antic_status == ANTIC_WAIT_REQ
-                               || ad->antic_status == ANTIC_WAIT_NEXT) {
-                       if (as_can_break_anticipation(ad, arq))
-                               as_antic_stop(ad);
-               }
-       }
-
-       arq->state = AS_RQ_QUEUED;
-}
-
-static void as_activate_request(request_queue_t *q, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-
-       WARN_ON(arq->state != AS_RQ_DISPATCHED);
-       arq->state = AS_RQ_REMOVED;
-       if (arq->io_context && arq->io_context->aic)
-               atomic_dec(&arq->io_context->aic->nr_dispatched);
-}
-
-static void as_deactivate_request(request_queue_t *q, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-
-       WARN_ON(arq->state != AS_RQ_REMOVED);
-       arq->state = AS_RQ_DISPATCHED;
-       if (arq->io_context && arq->io_context->aic)
-               atomic_inc(&arq->io_context->aic->nr_dispatched);
-}
-
-/*
- * as_queue_empty tells us if there are requests left in the device. It may
- * not be the case that a driver can get the next request even if the queue
- * is not empty - it is used in the block layer to check for plugging and
- * merging opportunities
- */
-static int as_queue_empty(request_queue_t *q)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-
-       return list_empty(&ad->fifo_list[REQ_ASYNC])
-               && list_empty(&ad->fifo_list[REQ_SYNC]);
-}
-
-static struct request *
-as_former_request(request_queue_t *q, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-       struct rb_node *rbprev = rb_prev(&arq->rb_node);
-       struct request *ret = NULL;
-
-       if (rbprev)
-               ret = rb_entry_arq(rbprev)->request;
-
-       return ret;
-}
-
-static struct request *
-as_latter_request(request_queue_t *q, struct request *rq)
-{
-       struct as_rq *arq = RQ_DATA(rq);
-       struct rb_node *rbnext = rb_next(&arq->rb_node);
-       struct request *ret = NULL;
-
-       if (rbnext)
-               ret = rb_entry_arq(rbnext)->request;
-
-       return ret;
-}
-
-static int
-as_merge(request_queue_t *q, struct request **req, struct bio *bio)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       sector_t rb_key = bio->bi_sector + bio_sectors(bio);
-       struct request *__rq;
-       int ret;
-
-       /*
-        * see if the merge hash can satisfy a back merge
-        */
-       __rq = as_find_arq_hash(ad, bio->bi_sector);
-       if (__rq) {
-               BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
-               if (elv_rq_merge_ok(__rq, bio)) {
-                       ret = ELEVATOR_BACK_MERGE;
-                       goto out;
-               }
-       }
-
-       /*
-        * check for front merge
-        */
-       __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
-       if (__rq) {
-               BUG_ON(rb_key != rq_rb_key(__rq));
-
-               if (elv_rq_merge_ok(__rq, bio)) {
-                       ret = ELEVATOR_FRONT_MERGE;
-                       goto out;
-               }
-       }
-
-       return ELEVATOR_NO_MERGE;
-out:
-       if (ret) {
-               if (rq_mergeable(__rq))
-                       as_hot_arq_hash(ad, RQ_DATA(__rq));
-       }
-       *req = __rq;
-       return ret;
-}
-
-static void as_merged_request(request_queue_t *q, struct request *req)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct as_rq *arq = RQ_DATA(req);
-
-       /*
-        * hash always needs to be repositioned, key is end sector
-        */
-       as_del_arq_hash(arq);
-       as_add_arq_hash(ad, arq);
-
-       /*
-        * if the merge was a front merge, we need to reposition request
-        */
-       if (rq_rb_key(req) != arq->rb_key) {
-               struct as_rq *alias, *next_arq = NULL;
-
-               if (ad->next_arq[arq->is_sync] == arq)
-                       next_arq = as_find_next_arq(ad, arq);
-
-               /*
-                * Note! We should really be moving any old aliased requests
-                * off this request and try to insert them into the rbtree. We
-                * currently don't bother. Ditto the next function.
-                */
-               as_del_arq_rb(ad, arq);
-               if ((alias = as_add_arq_rb(ad, arq)) ) {
-                       list_del_init(&arq->fifo);
-                       as_add_aliased_request(ad, arq, alias);
-                       if (next_arq)
-                               ad->next_arq[arq->is_sync] = next_arq;
-               }
-               /*
-                * Note! At this stage of this and the next function, our next
-                * request may not be optimal - eg the request may have "grown"
-                * behind the disk head. We currently don't bother adjusting.
-                */
-       }
-}
-
-static void
-as_merged_requests(request_queue_t *q, struct request *req,
-                        struct request *next)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct as_rq *arq = RQ_DATA(req);
-       struct as_rq *anext = RQ_DATA(next);
-
-       BUG_ON(!arq);
-       BUG_ON(!anext);
-
-       /*
-        * reposition arq (this is the merged request) in hash, and in rbtree
-        * in case of a front merge
-        */
-       as_del_arq_hash(arq);
-       as_add_arq_hash(ad, arq);
-
-       if (rq_rb_key(req) != arq->rb_key) {
-               struct as_rq *alias, *next_arq = NULL;
-
-               if (ad->next_arq[arq->is_sync] == arq)
-                       next_arq = as_find_next_arq(ad, arq);
-
-               as_del_arq_rb(ad, arq);
-               if ((alias = as_add_arq_rb(ad, arq)) ) {
-                       list_del_init(&arq->fifo);
-                       as_add_aliased_request(ad, arq, alias);
-                       if (next_arq)
-                               ad->next_arq[arq->is_sync] = next_arq;
-               }
-       }
-
-       /*
-        * if anext expires before arq, assign its expire time to arq
-        * and move into anext position (anext will be deleted) in fifo
-        */
-       if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) {
-               if (time_before(anext->expires, arq->expires)) {
-                       list_move(&arq->fifo, &anext->fifo);
-                       arq->expires = anext->expires;
-                       /*
-                        * Don't copy here but swap, because when anext is
-                        * removed below, it must contain the unused context
-                        */
-                       swap_io_context(&arq->io_context, &anext->io_context);
-               }
-       }
-
-       /*
-        * Transfer list of aliases
-        */
-       while (!list_empty(&next->queuelist)) {
-               struct request *__rq = list_entry_rq(next->queuelist.next);
-               struct as_rq *__arq = RQ_DATA(__rq);
-
-               list_move_tail(&__rq->queuelist, &req->queuelist);
-
-               WARN_ON(__arq->state != AS_RQ_QUEUED);
-       }
-
-       /*
-        * kill knowledge of next, this one is a goner
-        */
-       as_remove_queued_request(q, next);
-       as_put_io_context(anext);
-
-       anext->state = AS_RQ_MERGED;
-}
-
-/*
- * This is executed in a "deferred" process context, by kblockd. It calls the
- * driver's request_fn so the driver can submit that request.
- *
- * IMPORTANT! This guy will reenter the elevator, so set up all queue global
- * state before calling, and don't rely on any state over calls.
- *
- * FIXME! dispatch queue is not a queue at all!
- */
-static void as_work_handler(void *data)
-{
-       struct request_queue *q = data;
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       if (!as_queue_empty(q))
-               q->request_fn(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void as_put_request(request_queue_t *q, struct request *rq)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct as_rq *arq = RQ_DATA(rq);
-
-       if (!arq) {
-               WARN_ON(1);
-               return;
-       }
-
-       if (unlikely(arq->state != AS_RQ_POSTSCHED &&
-                    arq->state != AS_RQ_PRESCHED &&
-                    arq->state != AS_RQ_MERGED)) {
-               printk("arq->state %d\n", arq->state);
-               WARN_ON(1);
-       }
-
-       mempool_free(arq, ad->arq_pool);
-       rq->elevator_private = NULL;
-}
-
-static int as_set_request(request_queue_t *q, struct request *rq,
-                         struct bio *bio, gfp_t gfp_mask)
-{
-       struct as_data *ad = q->elevator->elevator_data;
-       struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
-
-       if (arq) {
-               memset(arq, 0, sizeof(*arq));
-               RB_CLEAR(&arq->rb_node);
-               arq->request = rq;
-               arq->state = AS_RQ_PRESCHED;
-               arq->io_context = NULL;
-               INIT_LIST_HEAD(&arq->hash);
-               arq->on_hash = 0;
-               INIT_LIST_HEAD(&arq->fifo);
-               rq->elevator_private = arq;
-               return 0;
-       }
-
-       return 1;
-}
-
-static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
-{
-       int ret = ELV_MQUEUE_MAY;
-       struct as_data *ad = q->elevator->elevator_data;
-       struct io_context *ioc;
-       if (ad->antic_status == ANTIC_WAIT_REQ ||
-                       ad->antic_status == ANTIC_WAIT_NEXT) {
-               ioc = as_get_io_context();
-               if (ad->io_context == ioc)
-                       ret = ELV_MQUEUE_MUST;
-               put_io_context(ioc);
-       }
-
-       return ret;
-}
-
-static void as_exit_queue(elevator_t *e)
-{
-       struct as_data *ad = e->elevator_data;
-
-       del_timer_sync(&ad->antic_timer);
-       kblockd_flush();
-
-       BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
-       BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
-
-       mempool_destroy(ad->arq_pool);
-       put_io_context(ad->io_context);
-       kfree(ad->hash);
-       kfree(ad);
-}
-
-/*
- * initialize elevator private data (as_data), and alloc a arq for
- * each request on the free lists
- */
-static int as_init_queue(request_queue_t *q, elevator_t *e)
-{
-       struct as_data *ad;
-       int i;
-
-       if (!arq_pool)
-               return -ENOMEM;
-
-       ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
-       if (!ad)
-               return -ENOMEM;
-       memset(ad, 0, sizeof(*ad));
-
-       ad->q = q; /* Identify what queue the data belongs to */
-
-       ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES,
-                               GFP_KERNEL, q->node);
-       if (!ad->hash) {
-               kfree(ad);
-               return -ENOMEM;
-       }
-
-       ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
-                               mempool_free_slab, arq_pool, q->node);
-       if (!ad->arq_pool) {
-               kfree(ad->hash);
-               kfree(ad);
-               return -ENOMEM;
-       }
-
-       /* anticipatory scheduling helpers */
-       ad->antic_timer.function = as_antic_timeout;
-       ad->antic_timer.data = (unsigned long)q;
-       init_timer(&ad->antic_timer);
-       INIT_WORK(&ad->antic_work, as_work_handler, q);
-
-       for (i = 0; i < AS_HASH_ENTRIES; i++)
-               INIT_LIST_HEAD(&ad->hash[i]);
-
-       INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
-       INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
-       ad->sort_list[REQ_SYNC] = RB_ROOT;
-       ad->sort_list[REQ_ASYNC] = RB_ROOT;
-       ad->fifo_expire[REQ_SYNC] = default_read_expire;
-       ad->fifo_expire[REQ_ASYNC] = default_write_expire;
-       ad->antic_expire = default_antic_expire;
-       ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
-       ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
-       e->elevator_data = ad;
-
-       ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
-       ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
-       if (ad->write_batch_count < 2)
-               ad->write_batch_count = 2;
-
-       return 0;
-}
-
-/*
- * sysfs parts below
- */
-struct as_fs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct as_data *, char *);
-       ssize_t (*store)(struct as_data *, const char *, size_t);
-};
-
-static ssize_t
-as_var_show(unsigned int var, char *page)
-{
-       return sprintf(page, "%d\n", var);
-}
-
-static ssize_t
-as_var_store(unsigned long *var, const char *page, size_t count)
-{
-       char *p = (char *) page;
-
-       *var = simple_strtoul(p, &p, 10);
-       return count;
-}
-
-static ssize_t as_est_show(struct as_data *ad, char *page)
-{
-       int pos = 0;
-
-       pos += sprintf(page+pos, "%lu %% exit probability\n", 100*ad->exit_prob/256);
-       pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
-       pos += sprintf(page+pos, "%llu sectors new seek distance\n", (unsigned long long)ad->new_seek_mean);
-
-       return pos;
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR)                           \
-static ssize_t __FUNC(struct as_data *ad, char *page)          \
-{                                                              \
-       return as_var_show(jiffies_to_msecs((__VAR)), (page));  \
-}
-SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
-SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
-SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
-SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
-SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
-#undef SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                                \
-static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count)      \
-{                                                                      \
-       int ret = as_var_store(__PTR, (page), count);           \
-       if (*(__PTR) < (MIN))                                           \
-               *(__PTR) = (MIN);                                       \
-       else if (*(__PTR) > (MAX))                                      \
-               *(__PTR) = (MAX);                                       \
-       *(__PTR) = msecs_to_jiffies(*(__PTR));                          \
-       return ret;                                                     \
-}
-STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
-STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
-STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
-STORE_FUNCTION(as_read_batchexpire_store,
-                       &ad->batch_expire[REQ_SYNC], 0, INT_MAX);
-STORE_FUNCTION(as_write_batchexpire_store,
-                       &ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
-#undef STORE_FUNCTION
-
-static struct as_fs_entry as_est_entry = {
-       .attr = {.name = "est_time", .mode = S_IRUGO },
-       .show = as_est_show,
-};
-static struct as_fs_entry as_readexpire_entry = {
-       .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = as_readexpire_show,
-       .store = as_readexpire_store,
-};
-static struct as_fs_entry as_writeexpire_entry = {
-       .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = as_writeexpire_show,
-       .store = as_writeexpire_store,
-};
-static struct as_fs_entry as_anticexpire_entry = {
-       .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = as_anticexpire_show,
-       .store = as_anticexpire_store,
-};
-static struct as_fs_entry as_read_batchexpire_entry = {
-       .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = as_read_batchexpire_show,
-       .store = as_read_batchexpire_store,
-};
-static struct as_fs_entry as_write_batchexpire_entry = {
-       .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = as_write_batchexpire_show,
-       .store = as_write_batchexpire_store,
-};
-
-static struct attribute *default_attrs[] = {
-       &as_est_entry.attr,
-       &as_readexpire_entry.attr,
-       &as_writeexpire_entry.attr,
-       &as_anticexpire_entry.attr,
-       &as_read_batchexpire_entry.attr,
-       &as_write_batchexpire_entry.attr,
-       NULL,
-};
-
-#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
-
-static ssize_t
-as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
-       struct as_fs_entry *entry = to_as(attr);
-
-       if (!entry->show)
-               return -EIO;
-
-       return entry->show(e->elevator_data, page);
-}
-
-static ssize_t
-as_attr_store(struct kobject *kobj, struct attribute *attr,
-                   const char *page, size_t length)
-{
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
-       struct as_fs_entry *entry = to_as(attr);
-
-       if (!entry->store)
-               return -EIO;
-
-       return entry->store(e->elevator_data, page, length);
-}
-
-static struct sysfs_ops as_sysfs_ops = {
-       .show   = as_attr_show,
-       .store  = as_attr_store,
-};
-
-static struct kobj_type as_ktype = {
-       .sysfs_ops      = &as_sysfs_ops,
-       .default_attrs  = default_attrs,
-};
-
-static struct elevator_type iosched_as = {
-       .ops = {
-               .elevator_merge_fn =            as_merge,
-               .elevator_merged_fn =           as_merged_request,
-               .elevator_merge_req_fn =        as_merged_requests,
-               .elevator_dispatch_fn =         as_dispatch_request,
-               .elevator_add_req_fn =          as_add_request,
-               .elevator_activate_req_fn =     as_activate_request,
-               .elevator_deactivate_req_fn =   as_deactivate_request,
-               .elevator_queue_empty_fn =      as_queue_empty,
-               .elevator_completed_req_fn =    as_completed_request,
-               .elevator_former_req_fn =       as_former_request,
-               .elevator_latter_req_fn =       as_latter_request,
-               .elevator_set_req_fn =          as_set_request,
-               .elevator_put_req_fn =          as_put_request,
-               .elevator_may_queue_fn =        as_may_queue,
-               .elevator_init_fn =             as_init_queue,
-               .elevator_exit_fn =             as_exit_queue,
-       },
-
-       .elevator_ktype = &as_ktype,
-       .elevator_name = "anticipatory",
-       .elevator_owner = THIS_MODULE,
-};
-
-static int __init as_init(void)
-{
-       int ret;
-
-       arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
-                                    0, 0, NULL, NULL);
-       if (!arq_pool)
-               return -ENOMEM;
-
-       ret = elv_register(&iosched_as);
-       if (!ret) {
-               /*
-                * don't allow AS to get unregistered, since we would have
-                * to browse all tasks in the system and release their
-                * as_io_context first
-                */
-               __module_get(THIS_MODULE);
-               return 0;
-       }
-
-       kmem_cache_destroy(arq_pool);
-       return ret;
-}
-
-static void __exit as_exit(void)
-{
-       elv_unregister(&iosched_as);
-       kmem_cache_destroy(arq_pool);
-}
-
-module_init(as_init);
-module_exit(as_exit);
-
-MODULE_AUTHOR("Nick Piggin");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("anticipatory IO scheduler");
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
deleted file mode 100644 (file)
index ecacca9..0000000
+++ /dev/null
@@ -1,2428 +0,0 @@
-/*
- *  linux/drivers/block/cfq-iosched.c
- *
- *  CFQ, or complete fairness queueing, disk scheduler.
- *
- *  Based on ideas from a previously unfinished io
- *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
- *
- *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/hash.h>
-#include <linux/rbtree.h>
-#include <linux/mempool.h>
-#include <linux/ioprio.h>
-#include <linux/writeback.h>
-
-/*
- * tunables
- */
-static int cfq_quantum = 4;            /* max queue in one round of service */
-static int cfq_queued = 8;             /* minimum rq allocate limit per-queue*/
-static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
-static int cfq_back_max = 16 * 1024;   /* maximum backwards seek, in KiB */
-static int cfq_back_penalty = 2;       /* penalty of a backwards seek */
-
-static int cfq_slice_sync = HZ / 10;
-static int cfq_slice_async = HZ / 25;
-static int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 100;
-
-#define CFQ_IDLE_GRACE         (HZ / 10)
-#define CFQ_SLICE_SCALE                (5)
-
-#define CFQ_KEY_ASYNC          (0)
-#define CFQ_KEY_ANY            (0xffff)
-
-/*
- * disable queueing at the driver/hardware level
- */
-static int cfq_max_depth = 2;
-
-/*
- * for the hash of cfqq inside the cfqd
- */
-#define CFQ_QHASH_SHIFT                6
-#define CFQ_QHASH_ENTRIES      (1 << CFQ_QHASH_SHIFT)
-#define list_entry_qhash(entry)        hlist_entry((entry), struct cfq_queue, cfq_hash)
-
-/*
- * for the hash of crq inside the cfqq
- */
-#define CFQ_MHASH_SHIFT                6
-#define CFQ_MHASH_BLOCK(sec)   ((sec) >> 3)
-#define CFQ_MHASH_ENTRIES      (1 << CFQ_MHASH_SHIFT)
-#define CFQ_MHASH_FN(sec)      hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
-#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr)   hlist_entry((ptr), struct cfq_rq, hash)
-
-#define list_entry_cfqq(ptr)   list_entry((ptr), struct cfq_queue, cfq_list)
-#define list_entry_fifo(ptr)   list_entry((ptr), struct request, queuelist)
-
-#define RQ_DATA(rq)            (rq)->elevator_private
-
-/*
- * rb-tree defines
- */
-#define RB_NONE                        (2)
-#define RB_EMPTY(node)         ((node)->rb_node == NULL)
-#define RB_CLEAR_COLOR(node)   (node)->rb_color = RB_NONE
-#define RB_CLEAR(node)         do {    \
-       (node)->rb_parent = NULL;       \
-       RB_CLEAR_COLOR((node));         \
-       (node)->rb_right = NULL;        \
-       (node)->rb_left = NULL;         \
-} while (0)
-#define RB_CLEAR_ROOT(root)    ((root)->rb_node = NULL)
-#define rb_entry_crq(node)     rb_entry((node), struct cfq_rq, rb_node)
-#define rq_rb_key(rq)          (rq)->sector
-
-static kmem_cache_t *crq_pool;
-static kmem_cache_t *cfq_pool;
-static kmem_cache_t *cfq_ioc_pool;
-
-#define CFQ_PRIO_LISTS         IOPRIO_BE_NR
-#define cfq_class_idle(cfqq)   ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
-#define cfq_class_be(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
-#define cfq_class_rt(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
-
-#define ASYNC                  (0)
-#define SYNC                   (1)
-
-#define cfq_cfqq_dispatched(cfqq)      \
-       ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
-
-#define cfq_cfqq_class_sync(cfqq)      ((cfqq)->key != CFQ_KEY_ASYNC)
-
-#define cfq_cfqq_sync(cfqq)            \
-       (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
-
-/*
- * Per block device queue structure
- */
-struct cfq_data {
-       atomic_t ref;
-       request_queue_t *queue;
-
-       /*
-        * rr list of queues with requests and the count of them
-        */
-       struct list_head rr_list[CFQ_PRIO_LISTS];
-       struct list_head busy_rr;
-       struct list_head cur_rr;
-       struct list_head idle_rr;
-       unsigned int busy_queues;
-
-       /*
-        * non-ordered list of empty cfqq's
-        */
-       struct list_head empty_list;
-
-       /*
-        * cfqq lookup hash
-        */
-       struct hlist_head *cfq_hash;
-
-       /*
-        * global crq hash for all queues
-        */
-       struct hlist_head *crq_hash;
-
-       unsigned int max_queued;
-
-       mempool_t *crq_pool;
-
-       int rq_in_driver;
-
-       /*
-        * schedule slice state info
-        */
-       /*
-        * idle window management
-        */
-       struct timer_list idle_slice_timer;
-       struct work_struct unplug_work;
-
-       struct cfq_queue *active_queue;
-       struct cfq_io_context *active_cic;
-       int cur_prio, cur_end_prio;
-       unsigned int dispatch_slice;
-
-       struct timer_list idle_class_timer;
-
-       sector_t last_sector;
-       unsigned long last_end_request;
-
-       unsigned int rq_starved;
-
-       /*
-        * tunables, see top of file
-        */
-       unsigned int cfq_quantum;
-       unsigned int cfq_queued;
-       unsigned int cfq_fifo_expire[2];
-       unsigned int cfq_back_penalty;
-       unsigned int cfq_back_max;
-       unsigned int cfq_slice[2];
-       unsigned int cfq_slice_async_rq;
-       unsigned int cfq_slice_idle;
-       unsigned int cfq_max_depth;
-};
-
-/*
- * Per process-grouping structure
- */
-struct cfq_queue {
-       /* reference count */
-       atomic_t ref;
-       /* parent cfq_data */
-       struct cfq_data *cfqd;
-       /* cfqq lookup hash */
-       struct hlist_node cfq_hash;
-       /* hash key */
-       unsigned int key;
-       /* on either rr or empty list of cfqd */
-       struct list_head cfq_list;
-       /* sorted list of pending requests */
-       struct rb_root sort_list;
-       /* if fifo isn't expired, next request to serve */
-       struct cfq_rq *next_crq;
-       /* requests queued in sort_list */
-       int queued[2];
-       /* currently allocated requests */
-       int allocated[2];
-       /* fifo list of requests in sort_list */
-       struct list_head fifo;
-
-       unsigned long slice_start;
-       unsigned long slice_end;
-       unsigned long slice_left;
-       unsigned long service_last;
-
-       /* number of requests that are on the dispatch list */
-       int on_dispatch[2];
-
-       /* io prio of this group */
-       unsigned short ioprio, org_ioprio;
-       unsigned short ioprio_class, org_ioprio_class;
-
-       /* various state flags, see below */
-       unsigned int flags;
-};
-
-struct cfq_rq {
-       struct rb_node rb_node;
-       sector_t rb_key;
-       struct request *request;
-       struct hlist_node hash;
-
-       struct cfq_queue *cfq_queue;
-       struct cfq_io_context *io_context;
-
-       unsigned int crq_flags;
-};
-
-enum cfqq_state_flags {
-       CFQ_CFQQ_FLAG_on_rr = 0,
-       CFQ_CFQQ_FLAG_wait_request,
-       CFQ_CFQQ_FLAG_must_alloc,
-       CFQ_CFQQ_FLAG_must_alloc_slice,
-       CFQ_CFQQ_FLAG_must_dispatch,
-       CFQ_CFQQ_FLAG_fifo_expire,
-       CFQ_CFQQ_FLAG_idle_window,
-       CFQ_CFQQ_FLAG_prio_changed,
-       CFQ_CFQQ_FLAG_expired,
-};
-
-#define CFQ_CFQQ_FNS(name)                                             \
-static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)                \
-{                                                                      \
-       cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
-}                                                                      \
-static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)       \
-{                                                                      \
-       cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
-}                                                                      \
-static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)                \
-{                                                                      \
-       return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
-}
-
-CFQ_CFQQ_FNS(on_rr);
-CFQ_CFQQ_FNS(wait_request);
-CFQ_CFQQ_FNS(must_alloc);
-CFQ_CFQQ_FNS(must_alloc_slice);
-CFQ_CFQQ_FNS(must_dispatch);
-CFQ_CFQQ_FNS(fifo_expire);
-CFQ_CFQQ_FNS(idle_window);
-CFQ_CFQQ_FNS(prio_changed);
-CFQ_CFQQ_FNS(expired);
-#undef CFQ_CFQQ_FNS
-
-enum cfq_rq_state_flags {
-       CFQ_CRQ_FLAG_is_sync = 0,
-};
-
-#define CFQ_CRQ_FNS(name)                                              \
-static inline void cfq_mark_crq_##name(struct cfq_rq *crq)             \
-{                                                                      \
-       crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name);                   \
-}                                                                      \
-static inline void cfq_clear_crq_##name(struct cfq_rq *crq)            \
-{                                                                      \
-       crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name);                  \
-}                                                                      \
-static inline int cfq_crq_##name(const struct cfq_rq *crq)             \
-{                                                                      \
-       return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0;      \
-}
-
-CFQ_CRQ_FNS(is_sync);
-#undef CFQ_CRQ_FNS
-
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
-static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
-static void cfq_put_cfqd(struct cfq_data *cfqd);
-
-#define process_sync(tsk)      ((tsk)->flags & PF_SYNCWRITE)
-
-/*
- * lots of deadline iosched dupes, can be abstracted later...
- */
-static inline void cfq_del_crq_hash(struct cfq_rq *crq)
-{
-       hlist_del_init(&crq->hash);
-}
-
-static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
-{
-       const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
-
-       hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
-}
-
-static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
-{
-       struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
-       struct hlist_node *entry, *next;
-
-       hlist_for_each_safe(entry, next, hash_list) {
-               struct cfq_rq *crq = list_entry_hash(entry);
-               struct request *__rq = crq->request;
-
-               if (!rq_mergeable(__rq)) {
-                       cfq_del_crq_hash(crq);
-                       continue;
-               }
-
-               if (rq_hash_key(__rq) == offset)
-                       return __rq;
-       }
-
-       return NULL;
-}
-
-/*
- * scheduler run of queue, if there are requests pending and no one in the
- * driver that will restart queueing
- */
-static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
-{
-       if (!cfqd->rq_in_driver && cfqd->busy_queues)
-               kblockd_schedule_work(&cfqd->unplug_work);
-}
-
-static int cfq_queue_empty(request_queue_t *q)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-
-       return !cfqd->busy_queues;
-}
-
-/*
- * Lifted from AS - choose which of crq1 and crq2 that is best served now.
- * We choose the request that is closest to the head right now. Distance
- * behind the head are penalized and only allowed to a certain extent.
- */
-static struct cfq_rq *
-cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
-{
-       sector_t last, s1, s2, d1 = 0, d2 = 0;
-       int r1_wrap = 0, r2_wrap = 0;   /* requests are behind the disk head */
-       unsigned long back_max;
-
-       if (crq1 == NULL || crq1 == crq2)
-               return crq2;
-       if (crq2 == NULL)
-               return crq1;
-
-       if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
-               return crq1;
-       else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
-               return crq2;
-
-       s1 = crq1->request->sector;
-       s2 = crq2->request->sector;
-
-       last = cfqd->last_sector;
-
-       /*
-        * by definition, 1KiB is 2 sectors
-        */
-       back_max = cfqd->cfq_back_max * 2;
-
-       /*
-        * Strict one way elevator _except_ in the case where we allow
-        * short backward seeks which are biased as twice the cost of a
-        * similar forward seek.
-        */
-       if (s1 >= last)
-               d1 = s1 - last;
-       else if (s1 + back_max >= last)
-               d1 = (last - s1) * cfqd->cfq_back_penalty;
-       else
-               r1_wrap = 1;
-
-       if (s2 >= last)
-               d2 = s2 - last;
-       else if (s2 + back_max >= last)
-               d2 = (last - s2) * cfqd->cfq_back_penalty;
-       else
-               r2_wrap = 1;
-
-       /* Found required data */
-       if (!r1_wrap && r2_wrap)
-               return crq1;
-       else if (!r2_wrap && r1_wrap)
-               return crq2;
-       else if (r1_wrap && r2_wrap) {
-               /* both behind the head */
-               if (s1 <= s2)
-                       return crq1;
-               else
-                       return crq2;
-       }
-
-       /* Both requests in front of the head */
-       if (d1 < d2)
-               return crq1;
-       else if (d2 < d1)
-               return crq2;
-       else {
-               if (s1 >= s2)
-                       return crq1;
-               else
-                       return crq2;
-       }
-}
-
-/*
- * would be nice to take fifo expire time into account as well
- */
-static struct cfq_rq *
-cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                 struct cfq_rq *last)
-{
-       struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
-       struct rb_node *rbnext, *rbprev;
-
-       if (!(rbnext = rb_next(&last->rb_node))) {
-               rbnext = rb_first(&cfqq->sort_list);
-               if (rbnext == &last->rb_node)
-                       rbnext = NULL;
-       }
-
-       rbprev = rb_prev(&last->rb_node);
-
-       if (rbprev)
-               crq_prev = rb_entry_crq(rbprev);
-       if (rbnext)
-               crq_next = rb_entry_crq(rbnext);
-
-       return cfq_choose_req(cfqd, crq_next, crq_prev);
-}
-
-static void cfq_update_next_crq(struct cfq_rq *crq)
-{
-       struct cfq_queue *cfqq = crq->cfq_queue;
-
-       if (cfqq->next_crq == crq)
-               cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
-}
-
-static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
-{
-       struct cfq_data *cfqd = cfqq->cfqd;
-       struct list_head *list, *entry;
-
-       BUG_ON(!cfq_cfqq_on_rr(cfqq));
-
-       list_del(&cfqq->cfq_list);
-
-       if (cfq_class_rt(cfqq))
-               list = &cfqd->cur_rr;
-       else if (cfq_class_idle(cfqq))
-               list = &cfqd->idle_rr;
-       else {
-               /*
-                * if cfqq has requests in flight, don't allow it to be
-                * found in cfq_set_active_queue before it has finished them.
-                * this is done to increase fairness between a process that
-                * has lots of io pending vs one that only generates one
-                * sporadically or synchronously
-                */
-               if (cfq_cfqq_dispatched(cfqq))
-                       list = &cfqd->busy_rr;
-               else
-                       list = &cfqd->rr_list[cfqq->ioprio];
-       }
-
-       /*
-        * if queue was preempted, just add to front to be fair. busy_rr
-        * isn't sorted.
-        */
-       if (preempted || list == &cfqd->busy_rr) {
-               list_add(&cfqq->cfq_list, list);
-               return;
-       }
-
-       /*
-        * sort by when queue was last serviced
-        */
-       entry = list;
-       while ((entry = entry->prev) != list) {
-               struct cfq_queue *__cfqq = list_entry_cfqq(entry);
-
-               if (!__cfqq->service_last)
-                       break;
-               if (time_before(__cfqq->service_last, cfqq->service_last))
-                       break;
-       }
-
-       list_add(&cfqq->cfq_list, entry);
-}
-
-/*
- * add to busy list of queues for service, trying to be fair in ordering
- * the pending list according to last request service
- */
-static inline void
-cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       BUG_ON(cfq_cfqq_on_rr(cfqq));
-       cfq_mark_cfqq_on_rr(cfqq);
-       cfqd->busy_queues++;
-
-       cfq_resort_rr_list(cfqq, 0);
-}
-
-static inline void
-cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       BUG_ON(!cfq_cfqq_on_rr(cfqq));
-       cfq_clear_cfqq_on_rr(cfqq);
-       list_move(&cfqq->cfq_list, &cfqd->empty_list);
-
-       BUG_ON(!cfqd->busy_queues);
-       cfqd->busy_queues--;
-}
-
-/*
- * rb tree support functions
- */
-static inline void cfq_del_crq_rb(struct cfq_rq *crq)
-{
-       struct cfq_queue *cfqq = crq->cfq_queue;
-       struct cfq_data *cfqd = cfqq->cfqd;
-       const int sync = cfq_crq_is_sync(crq);
-
-       BUG_ON(!cfqq->queued[sync]);
-       cfqq->queued[sync]--;
-
-       cfq_update_next_crq(crq);
-
-       rb_erase(&crq->rb_node, &cfqq->sort_list);
-       RB_CLEAR_COLOR(&crq->rb_node);
-
-       if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
-               cfq_del_cfqq_rr(cfqd, cfqq);
-}
-
-static struct cfq_rq *
-__cfq_add_crq_rb(struct cfq_rq *crq)
-{
-       struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
-       struct rb_node *parent = NULL;
-       struct cfq_rq *__crq;
-
-       while (*p) {
-               parent = *p;
-               __crq = rb_entry_crq(parent);
-
-               if (crq->rb_key < __crq->rb_key)
-                       p = &(*p)->rb_left;
-               else if (crq->rb_key > __crq->rb_key)
-                       p = &(*p)->rb_right;
-               else
-                       return __crq;
-       }
-
-       rb_link_node(&crq->rb_node, parent, p);
-       return NULL;
-}
-
-static void cfq_add_crq_rb(struct cfq_rq *crq)
-{
-       struct cfq_queue *cfqq = crq->cfq_queue;
-       struct cfq_data *cfqd = cfqq->cfqd;
-       struct request *rq = crq->request;
-       struct cfq_rq *__alias;
-
-       crq->rb_key = rq_rb_key(rq);
-       cfqq->queued[cfq_crq_is_sync(crq)]++;
-
-       /*
-        * looks a little odd, but the first insert might return an alias.
-        * if that happens, put the alias on the dispatch list
-        */
-       while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
-               cfq_dispatch_insert(cfqd->queue, __alias);
-
-       rb_insert_color(&crq->rb_node, &cfqq->sort_list);
-
-       if (!cfq_cfqq_on_rr(cfqq))
-               cfq_add_cfqq_rr(cfqd, cfqq);
-
-       /*
-        * check if this request is a better next-serve candidate
-        */
-       cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
-}
-
-static inline void
-cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
-{
-       rb_erase(&crq->rb_node, &cfqq->sort_list);
-       cfqq->queued[cfq_crq_is_sync(crq)]--;
-
-       cfq_add_crq_rb(crq);
-}
-
-static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
-
-{
-       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);
-       struct rb_node *n;
-
-       if (!cfqq)
-               goto out;
-
-       n = cfqq->sort_list.rb_node;
-       while (n) {
-               struct cfq_rq *crq = rb_entry_crq(n);
-
-               if (sector < crq->rb_key)
-                       n = n->rb_left;
-               else if (sector > crq->rb_key)
-                       n = n->rb_right;
-               else
-                       return crq->request;
-       }
-
-out:
-       return NULL;
-}
-
-static void cfq_activate_request(request_queue_t *q, struct request *rq)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-
-       cfqd->rq_in_driver++;
-}
-
-static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-
-       WARN_ON(!cfqd->rq_in_driver);
-       cfqd->rq_in_driver--;
-}
-
-static void cfq_remove_request(struct request *rq)
-{
-       struct cfq_rq *crq = RQ_DATA(rq);
-
-       list_del_init(&rq->queuelist);
-       cfq_del_crq_rb(crq);
-       cfq_del_crq_hash(crq);
-}
-
-static int
-cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct request *__rq;
-       int ret;
-
-       __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
-       if (__rq && elv_rq_merge_ok(__rq, bio)) {
-               ret = ELEVATOR_BACK_MERGE;
-               goto out;
-       }
-
-       __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
-       if (__rq && elv_rq_merge_ok(__rq, bio)) {
-               ret = ELEVATOR_FRONT_MERGE;
-               goto out;
-       }
-
-       return ELEVATOR_NO_MERGE;
-out:
-       *req = __rq;
-       return ret;
-}
-
-static void cfq_merged_request(request_queue_t *q, struct request *req)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_rq *crq = RQ_DATA(req);
-
-       cfq_del_crq_hash(crq);
-       cfq_add_crq_hash(cfqd, crq);
-
-       if (rq_rb_key(req) != crq->rb_key) {
-               struct cfq_queue *cfqq = crq->cfq_queue;
-
-               cfq_update_next_crq(crq);
-               cfq_reposition_crq_rb(cfqq, crq);
-       }
-}
-
-static void
-cfq_merged_requests(request_queue_t *q, struct request *rq,
-                   struct request *next)
-{
-       cfq_merged_request(q, rq);
-
-       /*
-        * reposition in fifo if next is older than rq
-        */
-       if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-           time_before(next->start_time, rq->start_time))
-               list_move(&rq->queuelist, &next->queuelist);
-
-       cfq_remove_request(next);
-}
-
-static inline void
-__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       if (cfqq) {
-               /*
-                * stop potential idle class queues waiting service
-                */
-               del_timer(&cfqd->idle_class_timer);
-
-               cfqq->slice_start = jiffies;
-               cfqq->slice_end = 0;
-               cfqq->slice_left = 0;
-               cfq_clear_cfqq_must_alloc_slice(cfqq);
-               cfq_clear_cfqq_fifo_expire(cfqq);
-               cfq_clear_cfqq_expired(cfqq);
-       }
-
-       cfqd->active_queue = cfqq;
-}
-
-/*
- * 0
- * 0,1
- * 0,1,2
- * 0,1,2,3
- * 0,1,2,3,4
- * 0,1,2,3,4,5
- * 0,1,2,3,4,5,6
- * 0,1,2,3,4,5,6,7
- */
-static int cfq_get_next_prio_level(struct cfq_data *cfqd)
-{
-       int prio, wrap;
-
-       prio = -1;
-       wrap = 0;
-       do {
-               int p;
-
-               for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
-                       if (!list_empty(&cfqd->rr_list[p])) {
-                               prio = p;
-                               break;
-                       }
-               }
-
-               if (prio != -1)
-                       break;
-               cfqd->cur_prio = 0;
-               if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
-                       cfqd->cur_end_prio = 0;
-                       if (wrap)
-                               break;
-                       wrap = 1;
-               }
-       } while (1);
-
-       if (unlikely(prio == -1))
-               return -1;
-
-       BUG_ON(prio >= CFQ_PRIO_LISTS);
-
-       list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
-
-       cfqd->cur_prio = prio + 1;
-       if (cfqd->cur_prio > cfqd->cur_end_prio) {
-               cfqd->cur_end_prio = cfqd->cur_prio;
-               cfqd->cur_prio = 0;
-       }
-       if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
-               cfqd->cur_prio = 0;
-               cfqd->cur_end_prio = 0;
-       }
-
-       return prio;
-}
-
-static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
-{
-       struct cfq_queue *cfqq;
-
-       /*
-        * if current queue is expired but not done with its requests yet,
-        * wait for that to happen
-        */
-       if ((cfqq = cfqd->active_queue) != NULL) {
-               if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))
-                       return NULL;
-       }
-
-       /*
-        * if current list is non-empty, grab first entry. if it is empty,
-        * get next prio level and grab first entry then if any are spliced
-        */
-       if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
-               cfqq = list_entry_cfqq(cfqd->cur_rr.next);
-
-       /*
-        * if we have idle queues and no rt or be queues had pending
-        * requests, either allow immediate service if the grace period
-        * has passed or arm the idle grace timer
-        */
-       if (!cfqq && !list_empty(&cfqd->idle_rr)) {
-               unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
-
-               if (time_after_eq(jiffies, end))
-                       cfqq = list_entry_cfqq(cfqd->idle_rr.next);
-               else
-                       mod_timer(&cfqd->idle_class_timer, end);
-       }
-
-       __cfq_set_active_queue(cfqd, cfqq);
-       return cfqq;
-}
-
-/*
- * current cfqq expired its slice (or was too idle), select new one
- */
-static void
-__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                   int preempted)
-{
-       unsigned long now = jiffies;
-
-       if (cfq_cfqq_wait_request(cfqq))
-               del_timer(&cfqd->idle_slice_timer);
-
-       if (!preempted && !cfq_cfqq_dispatched(cfqq))
-               cfqq->service_last = now;
-
-       cfq_clear_cfqq_must_dispatch(cfqq);
-       cfq_clear_cfqq_wait_request(cfqq);
-
-       /*
-        * store what was left of this slice, if the queue idled out
-        * or was preempted
-        */
-       if (time_after(now, cfqq->slice_end))
-               cfqq->slice_left = now - cfqq->slice_end;
-       else
-               cfqq->slice_left = 0;
-
-       if (cfq_cfqq_on_rr(cfqq))
-               cfq_resort_rr_list(cfqq, preempted);
-
-       if (cfqq == cfqd->active_queue)
-               cfqd->active_queue = NULL;
-
-       if (cfqd->active_cic) {
-               put_io_context(cfqd->active_cic->ioc);
-               cfqd->active_cic = NULL;
-       }
-
-       cfqd->dispatch_slice = 0;
-}
-
-static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
-{
-       struct cfq_queue *cfqq = cfqd->active_queue;
-
-       if (cfqq) {
-               /*
-                * use deferred expiry, if there are requests in progress as
-                * not to disturb the slice of the next queue
-                */
-               if (cfq_cfqq_dispatched(cfqq))
-                       cfq_mark_cfqq_expired(cfqq);
-               else
-                       __cfq_slice_expired(cfqd, cfqq, preempted);
-       }
-}
-
-static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-
-{
-       WARN_ON(!RB_EMPTY(&cfqq->sort_list));
-       WARN_ON(cfqq != cfqd->active_queue);
-
-       /*
-        * idle is disabled, either manually or by past process history
-        */
-       if (!cfqd->cfq_slice_idle)
-               return 0;
-       if (!cfq_cfqq_idle_window(cfqq))
-               return 0;
-       /*
-        * task has exited, don't wait
-        */
-       if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
-               return 0;
-
-       cfq_mark_cfqq_must_dispatch(cfqq);
-       cfq_mark_cfqq_wait_request(cfqq);
-
-       if (!timer_pending(&cfqd->idle_slice_timer)) {
-               unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
-
-               cfqd->idle_slice_timer.expires = jiffies + slice_left;
-               add_timer(&cfqd->idle_slice_timer);
-       }
-
-       return 1;
-}
-
-static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_queue *cfqq = crq->cfq_queue;
-
-       cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
-       cfq_remove_request(crq->request);
-       cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
-       elv_dispatch_sort(q, crq->request);
-}
-
-/*
- * return expired entry, or NULL to just start from scratch in rbtree
- */
-static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
-{
-       struct cfq_data *cfqd = cfqq->cfqd;
-       struct request *rq;
-       struct cfq_rq *crq;
-
-       if (cfq_cfqq_fifo_expire(cfqq))
-               return NULL;
-
-       if (!list_empty(&cfqq->fifo)) {
-               int fifo = cfq_cfqq_class_sync(cfqq);
-
-               crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
-               rq = crq->request;
-               if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
-                       cfq_mark_cfqq_fifo_expire(cfqq);
-                       return crq;
-               }
-       }
-
-       return NULL;
-}
-
-/*
- * Scale schedule slice based on io priority. Use the sync time slice only
- * if a queue is marked sync and has sync io queued. A sync queue with async
- * io only, should not get full sync slice length.
- */
-static inline int
-cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
-
-       WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
-
-       return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
-}
-
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
-}
-
-static inline int
-cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       const int base_rq = cfqd->cfq_slice_async_rq;
-
-       WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
-
-       return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
-}
-
-/*
- * get next queue for service
- */
-static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
-{
-       unsigned long now = jiffies;
-       struct cfq_queue *cfqq;
-
-       cfqq = cfqd->active_queue;
-       if (!cfqq)
-               goto new_queue;
-
-       if (cfq_cfqq_expired(cfqq))
-               goto new_queue;
-
-       /*
-        * slice has expired
-        */
-       if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
-               goto expire;
-
-       /*
-        * if queue has requests, dispatch one. if not, check if
-        * enough slice is left to wait for one
-        */
-       if (!RB_EMPTY(&cfqq->sort_list))
-               goto keep_queue;
-       else if (!force && cfq_cfqq_class_sync(cfqq) &&
-                time_before(now, cfqq->slice_end)) {
-               if (cfq_arm_slice_timer(cfqd, cfqq))
-                       return NULL;
-       }
-
-expire:
-       cfq_slice_expired(cfqd, 0);
-new_queue:
-       cfqq = cfq_set_active_queue(cfqd);
-keep_queue:
-       return cfqq;
-}
-
-static int
-__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                       int max_dispatch)
-{
-       int dispatched = 0;
-
-       BUG_ON(RB_EMPTY(&cfqq->sort_list));
-
-       do {
-               struct cfq_rq *crq;
-
-               /*
-                * follow expired path, else get first next available
-                */
-               if ((crq = cfq_check_fifo(cfqq)) == NULL)
-                       crq = cfqq->next_crq;
-
-               /*
-                * finally, insert request into driver dispatch list
-                */
-               cfq_dispatch_insert(cfqd->queue, crq);
-
-               cfqd->dispatch_slice++;
-               dispatched++;
-
-               if (!cfqd->active_cic) {
-                       atomic_inc(&crq->io_context->ioc->refcount);
-                       cfqd->active_cic = crq->io_context;
-               }
-
-               if (RB_EMPTY(&cfqq->sort_list))
-                       break;
-
-       } while (dispatched < max_dispatch);
-
-       /*
-        * if slice end isn't set yet, set it. if at least one request was
-        * sync, use the sync time slice value
-        */
-       if (!cfqq->slice_end)
-               cfq_set_prio_slice(cfqd, cfqq);
-
-       /*
-        * expire an async queue immediately if it has used up its slice. idle
-        * queue always expire after 1 dispatch round.
-        */
-       if ((!cfq_cfqq_sync(cfqq) &&
-           cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
-           cfq_class_idle(cfqq))
-               cfq_slice_expired(cfqd, 0);
-
-       return dispatched;
-}
-
-static int
-cfq_dispatch_requests(request_queue_t *q, int force)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_queue *cfqq;
-
-       if (!cfqd->busy_queues)
-               return 0;
-
-       cfqq = cfq_select_queue(cfqd, force);
-       if (cfqq) {
-               int max_dispatch;
-
-               /*
-                * if idle window is disabled, allow queue buildup
-                */
-               if (!cfq_cfqq_idle_window(cfqq) &&
-                   cfqd->rq_in_driver >= cfqd->cfq_max_depth)
-                       return 0;
-
-               cfq_clear_cfqq_must_dispatch(cfqq);
-               cfq_clear_cfqq_wait_request(cfqq);
-               del_timer(&cfqd->idle_slice_timer);
-
-               if (!force) {
-                       max_dispatch = cfqd->cfq_quantum;
-                       if (cfq_class_idle(cfqq))
-                               max_dispatch = 1;
-               } else
-                       max_dispatch = INT_MAX;
-
-               return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
-       }
-
-       return 0;
-}
-
-/*
- * task holds one reference to the queue, dropped when task exits. each crq
- * in-flight on this queue also holds a reference, dropped when crq is freed.
- *
- * queue lock must be held here.
- */
-static void cfq_put_queue(struct cfq_queue *cfqq)
-{
-       struct cfq_data *cfqd = cfqq->cfqd;
-
-       BUG_ON(atomic_read(&cfqq->ref) <= 0);
-
-       if (!atomic_dec_and_test(&cfqq->ref))
-               return;
-
-       BUG_ON(rb_first(&cfqq->sort_list));
-       BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
-       BUG_ON(cfq_cfqq_on_rr(cfqq));
-
-       if (unlikely(cfqd->active_queue == cfqq)) {
-               __cfq_slice_expired(cfqd, cfqq, 0);
-               cfq_schedule_dispatch(cfqd);
-       }
-
-       cfq_put_cfqd(cfqq->cfqd);
-
-       /*
-        * it's on the empty list and still hashed
-        */
-       list_del(&cfqq->cfq_list);
-       hlist_del(&cfqq->cfq_hash);
-       kmem_cache_free(cfq_pool, cfqq);
-}
-
-static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
-                   const int hashval)
-{
-       struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
-       struct hlist_node *entry, *next;
-
-       hlist_for_each_safe(entry, next, hash_list) {
-               struct cfq_queue *__cfqq = list_entry_qhash(entry);
-               const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
-
-               if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
-                       return __cfqq;
-       }
-
-       return NULL;
-}
-
-static struct cfq_queue *
-cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
-{
-       return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
-}
-
-static void cfq_free_io_context(struct cfq_io_context *cic)
-{
-       struct cfq_io_context *__cic;
-       struct list_head *entry, *next;
-
-       list_for_each_safe(entry, next, &cic->list) {
-               __cic = list_entry(entry, struct cfq_io_context, list);
-               kmem_cache_free(cfq_ioc_pool, __cic);
-       }
-
-       kmem_cache_free(cfq_ioc_pool, cic);
-}
-
-/*
- * Called with interrupts disabled
- */
-static void cfq_exit_single_io_context(struct cfq_io_context *cic)
-{
-       struct cfq_data *cfqd = cic->cfqq->cfqd;
-       request_queue_t *q = cfqd->queue;
-
-       WARN_ON(!irqs_disabled());
-
-       spin_lock(q->queue_lock);
-
-       if (unlikely(cic->cfqq == cfqd->active_queue)) {
-               __cfq_slice_expired(cfqd, cic->cfqq, 0);
-               cfq_schedule_dispatch(cfqd);
-       }
-
-       cfq_put_queue(cic->cfqq);
-       cic->cfqq = NULL;
-       spin_unlock(q->queue_lock);
-}
-
-/*
- * Another task may update the task cic list, if it is doing a queue lookup
- * on its behalf. cfq_cic_lock excludes such concurrent updates
- */
-static void cfq_exit_io_context(struct cfq_io_context *cic)
-{
-       struct cfq_io_context *__cic;
-       struct list_head *entry;
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       /*
-        * put the reference this task is holding to the various queues
-        */
-       list_for_each(entry, &cic->list) {
-               __cic = list_entry(entry, struct cfq_io_context, list);
-               cfq_exit_single_io_context(__cic);
-       }
-
-       cfq_exit_single_io_context(cic);
-       local_irq_restore(flags);
-}
-
-static struct cfq_io_context *
-cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
-{
-       struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
-
-       if (cic) {
-               INIT_LIST_HEAD(&cic->list);
-               cic->cfqq = NULL;
-               cic->key = NULL;
-               cic->last_end_request = jiffies;
-               cic->ttime_total = 0;
-               cic->ttime_samples = 0;
-               cic->ttime_mean = 0;
-               cic->dtor = cfq_free_io_context;
-               cic->exit = cfq_exit_io_context;
-       }
-
-       return cic;
-}
-
-static void cfq_init_prio_data(struct cfq_queue *cfqq)
-{
-       struct task_struct *tsk = current;
-       int ioprio_class;
-
-       if (!cfq_cfqq_prio_changed(cfqq))
-               return;
-
-       ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
-       switch (ioprio_class) {
-               default:
-                       printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
-               case IOPRIO_CLASS_NONE:
-                       /*
-                        * no prio set, place us in the middle of the BE classes
-                        */
-                       cfqq->ioprio = task_nice_ioprio(tsk);
-                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
-                       break;
-               case IOPRIO_CLASS_RT:
-                       cfqq->ioprio = task_ioprio(tsk);
-                       cfqq->ioprio_class = IOPRIO_CLASS_RT;
-                       break;
-               case IOPRIO_CLASS_BE:
-                       cfqq->ioprio = task_ioprio(tsk);
-                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
-                       break;
-               case IOPRIO_CLASS_IDLE:
-                       cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
-                       cfqq->ioprio = 7;
-                       cfq_clear_cfqq_idle_window(cfqq);
-                       break;
-       }
-
-       /*
-        * keep track of original prio settings in case we have to temporarily
-        * elevate the priority of this queue
-        */
-       cfqq->org_ioprio = cfqq->ioprio;
-       cfqq->org_ioprio_class = cfqq->ioprio_class;
-
-       if (cfq_cfqq_on_rr(cfqq))
-               cfq_resort_rr_list(cfqq, 0);
-
-       cfq_clear_cfqq_prio_changed(cfqq);
-}
-
-static inline void changed_ioprio(struct cfq_queue *cfqq)
-{
-       if (cfqq) {
-               struct cfq_data *cfqd = cfqq->cfqd;
-
-               spin_lock(cfqd->queue->queue_lock);
-               cfq_mark_cfqq_prio_changed(cfqq);
-               cfq_init_prio_data(cfqq);
-               spin_unlock(cfqd->queue->queue_lock);
-       }
-}
-
-/*
- * callback from sys_ioprio_set, irqs are disabled
- */
-static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
-{
-       struct cfq_io_context *cic = ioc->cic;
-
-       changed_ioprio(cic->cfqq);
-
-       list_for_each_entry(cic, &cic->list, list)
-               changed_ioprio(cic->cfqq);
-
-       return 0;
-}
-
-static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
-             gfp_t gfp_mask)
-{
-       const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
-       struct cfq_queue *cfqq, *new_cfqq = NULL;
-
-retry:
-       cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
-
-       if (!cfqq) {
-               if (new_cfqq) {
-                       cfqq = new_cfqq;
-                       new_cfqq = NULL;
-               } else if (gfp_mask & __GFP_WAIT) {
-                       spin_unlock_irq(cfqd->queue->queue_lock);
-                       new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
-                       spin_lock_irq(cfqd->queue->queue_lock);
-                       goto retry;
-               } else {
-                       cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
-                       if (!cfqq)
-                               goto out;
-               }
-
-               memset(cfqq, 0, sizeof(*cfqq));
-
-               INIT_HLIST_NODE(&cfqq->cfq_hash);
-               INIT_LIST_HEAD(&cfqq->cfq_list);
-               RB_CLEAR_ROOT(&cfqq->sort_list);
-               INIT_LIST_HEAD(&cfqq->fifo);
-
-               cfqq->key = key;
-               hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
-               atomic_set(&cfqq->ref, 0);
-               cfqq->cfqd = cfqd;
-               atomic_inc(&cfqd->ref);
-               cfqq->service_last = 0;
-               /*
-                * set ->slice_left to allow preemption for a new process
-                */
-               cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
-               cfq_mark_cfqq_idle_window(cfqq);
-               cfq_mark_cfqq_prio_changed(cfqq);
-               cfq_init_prio_data(cfqq);
-       }
-
-       if (new_cfqq)
-               kmem_cache_free(cfq_pool, new_cfqq);
-
-       atomic_inc(&cfqq->ref);
-out:
-       WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
-       return cfqq;
-}
-
-/*
- * Setup general io context and cfq io context. There can be several cfq
- * io contexts per general io context, if this process is doing io to more
- * than one device managed by cfq. Note that caller is holding a reference to
- * cfqq, so we don't need to worry about it disappearing
- */
-static struct cfq_io_context *
-cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
-{
-       struct io_context *ioc = NULL;
-       struct cfq_io_context *cic;
-
-       might_sleep_if(gfp_mask & __GFP_WAIT);
-
-       ioc = get_io_context(gfp_mask);
-       if (!ioc)
-               return NULL;
-
-       if ((cic = ioc->cic) == NULL) {
-               cic = cfq_alloc_io_context(cfqd, gfp_mask);
-
-               if (cic == NULL)
-                       goto err;
-
-               /*
-                * manually increment generic io_context usage count, it
-                * cannot go away since we are already holding one ref to it
-                */
-               ioc->cic = cic;
-               ioc->set_ioprio = cfq_ioc_set_ioprio;
-               cic->ioc = ioc;
-               cic->key = cfqd;
-               atomic_inc(&cfqd->ref);
-       } else {
-               struct cfq_io_context *__cic;
-
-               /*
-                * the first cic on the list is actually the head itself
-                */
-               if (cic->key == cfqd)
-                       goto out;
-
-               /*
-                * cic exists, check if we already are there. linear search
-                * should be ok here, the list will usually not be more than
-                * 1 or a few entries long
-                */
-               list_for_each_entry(__cic, &cic->list, list) {
-                       /*
-                        * this process is already holding a reference to
-                        * this queue, so no need to get one more
-                        */
-                       if (__cic->key == cfqd) {
-                               cic = __cic;
-                               goto out;
-                       }
-               }
-
-               /*
-                * nope, process doesn't have a cic assoicated with this
-                * cfqq yet. get a new one and add to list
-                */
-               __cic = cfq_alloc_io_context(cfqd, gfp_mask);
-               if (__cic == NULL)
-                       goto err;
-
-               __cic->ioc = ioc;
-               __cic->key = cfqd;
-               atomic_inc(&cfqd->ref);
-               list_add(&__cic->list, &cic->list);
-               cic = __cic;
-       }
-
-out:
-       return cic;
-err:
-       put_io_context(ioc);
-       return NULL;
-}
-
-static void
-cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
-{
-       unsigned long elapsed, ttime;
-
-       /*
-        * if this context already has stuff queued, thinktime is from
-        * last queue not last end
-        */
-#if 0
-       if (time_after(cic->last_end_request, cic->last_queue))
-               elapsed = jiffies - cic->last_end_request;
-       else
-               elapsed = jiffies - cic->last_queue;
-#else
-               elapsed = jiffies - cic->last_end_request;
-#endif
-
-       ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
-
-       cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
-       cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
-       cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
-}
-
-#define sample_valid(samples)  ((samples) > 80)
-
-/*
- * Disable idle window if the process thinks too long or seeks so much that
- * it doesn't matter
- */
-static void
-cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                      struct cfq_io_context *cic)
-{
-       int enable_idle = cfq_cfqq_idle_window(cfqq);
-
-       if (!cic->ioc->task || !cfqd->cfq_slice_idle)
-               enable_idle = 0;
-       else if (sample_valid(cic->ttime_samples)) {
-               if (cic->ttime_mean > cfqd->cfq_slice_idle)
-                       enable_idle = 0;
-               else
-                       enable_idle = 1;
-       }
-
-       if (enable_idle)
-               cfq_mark_cfqq_idle_window(cfqq);
-       else
-               cfq_clear_cfqq_idle_window(cfqq);
-}
-
-
-/*
- * Check if new_cfqq should preempt the currently active queue. Return 0 for
- * no or if we aren't sure, a 1 will cause a preempt.
- */
-static int
-cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
-                  struct cfq_rq *crq)
-{
-       struct cfq_queue *cfqq = cfqd->active_queue;
-
-       if (cfq_class_idle(new_cfqq))
-               return 0;
-
-       if (!cfqq)
-               return 1;
-
-       if (cfq_class_idle(cfqq))
-               return 1;
-       if (!cfq_cfqq_wait_request(new_cfqq))
-               return 0;
-       /*
-        * if it doesn't have slice left, forget it
-        */
-       if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
-               return 0;
-       if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
-               return 1;
-
-       return 0;
-}
-
-/*
- * cfqq preempts the active queue. if we allowed preempt with no slice left,
- * let it have half of its nominal slice.
- */
-static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       struct cfq_queue *__cfqq, *next;
-
-       list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
-               cfq_resort_rr_list(__cfqq, 1);
-
-       if (!cfqq->slice_left)
-               cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
-
-       cfqq->slice_end = cfqq->slice_left + jiffies;
-       __cfq_slice_expired(cfqd, cfqq, 1);
-       __cfq_set_active_queue(cfqd, cfqq);
-}
-
-/*
- * should really be a ll_rw_blk.c helper
- */
-static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
-       request_queue_t *q = cfqd->queue;
-
-       if (!blk_queue_plugged(q))
-               q->request_fn(q);
-       else
-               __generic_unplug_device(q);
-}
-
-/*
- * Called when a new fs request (crq) is added (to cfqq). Check if there's
- * something we should do about it
- */
-static void
-cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                struct cfq_rq *crq)
-{
-       struct cfq_io_context *cic;
-
-       cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
-
-       /*
-        * we never wait for an async request and we don't allow preemption
-        * of an async request. so just return early
-        */
-       if (!cfq_crq_is_sync(crq))
-               return;
-
-       cic = crq->io_context;
-
-       cfq_update_io_thinktime(cfqd, cic);
-       cfq_update_idle_window(cfqd, cfqq, cic);
-
-       cic->last_queue = jiffies;
-
-       if (cfqq == cfqd->active_queue) {
-               /*
-                * if we are waiting for a request for this queue, let it rip
-                * immediately and flag that we must not expire this queue
-                * just now
-                */
-               if (cfq_cfqq_wait_request(cfqq)) {
-                       cfq_mark_cfqq_must_dispatch(cfqq);
-                       del_timer(&cfqd->idle_slice_timer);
-                       cfq_start_queueing(cfqd, cfqq);
-               }
-       } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
-               /*
-                * not the active queue - expire current slice if it is
-                * idle and has expired it's mean thinktime or this new queue
-                * has some old slice time left and is of higher priority
-                */
-               cfq_preempt_queue(cfqd, cfqq);
-               cfq_mark_cfqq_must_dispatch(cfqq);
-               cfq_start_queueing(cfqd, cfqq);
-       }
-}
-
-static void cfq_insert_request(request_queue_t *q, struct request *rq)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
-       struct cfq_queue *cfqq = crq->cfq_queue;
-
-       cfq_init_prio_data(cfqq);
-
-       cfq_add_crq_rb(crq);
-
-       list_add_tail(&rq->queuelist, &cfqq->fifo);
-
-       if (rq_mergeable(rq))
-               cfq_add_crq_hash(cfqd, crq);
-
-       cfq_crq_enqueued(cfqd, cfqq, crq);
-}
-
-static void cfq_completed_request(request_queue_t *q, struct request *rq)
-{
-       struct cfq_rq *crq = RQ_DATA(rq);
-       struct cfq_queue *cfqq = crq->cfq_queue;
-       struct cfq_data *cfqd = cfqq->cfqd;
-       const int sync = cfq_crq_is_sync(crq);
-       unsigned long now;
-
-       now = jiffies;
-
-       WARN_ON(!cfqd->rq_in_driver);
-       WARN_ON(!cfqq->on_dispatch[sync]);
-       cfqd->rq_in_driver--;
-       cfqq->on_dispatch[sync]--;
-
-       if (!cfq_class_idle(cfqq))
-               cfqd->last_end_request = now;
-
-       if (!cfq_cfqq_dispatched(cfqq)) {
-               if (cfq_cfqq_on_rr(cfqq)) {
-                       cfqq->service_last = now;
-                       cfq_resort_rr_list(cfqq, 0);
-               }
-               if (cfq_cfqq_expired(cfqq)) {
-                       __cfq_slice_expired(cfqd, cfqq, 0);
-                       cfq_schedule_dispatch(cfqd);
-               }
-       }
-
-       if (cfq_crq_is_sync(crq))
-               crq->io_context->last_end_request = now;
-}
-
-static struct request *
-cfq_former_request(request_queue_t *q, struct request *rq)
-{
-       struct cfq_rq *crq = RQ_DATA(rq);
-       struct rb_node *rbprev = rb_prev(&crq->rb_node);
-
-       if (rbprev)
-               return rb_entry_crq(rbprev)->request;
-
-       return NULL;
-}
-
-static struct request *
-cfq_latter_request(request_queue_t *q, struct request *rq)
-{
-       struct cfq_rq *crq = RQ_DATA(rq);
-       struct rb_node *rbnext = rb_next(&crq->rb_node);
-
-       if (rbnext)
-               return rb_entry_crq(rbnext)->request;
-
-       return NULL;
-}
-
-/*
- * we temporarily boost lower priority queues if they are holding fs exclusive
- * resources. they are boosted to normal prio (CLASS_BE/4)
- */
-static void cfq_prio_boost(struct cfq_queue *cfqq)
-{
-       const int ioprio_class = cfqq->ioprio_class;
-       const int ioprio = cfqq->ioprio;
-
-       if (has_fs_excl()) {
-               /*
-                * boost idle prio on transactions that would lock out other
-                * users of the filesystem
-                */
-               if (cfq_class_idle(cfqq))
-                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
-               if (cfqq->ioprio > IOPRIO_NORM)
-                       cfqq->ioprio = IOPRIO_NORM;
-       } else {
-               /*
-                * check if we need to unboost the queue
-                */
-               if (cfqq->ioprio_class != cfqq->org_ioprio_class)
-                       cfqq->ioprio_class = cfqq->org_ioprio_class;
-               if (cfqq->ioprio != cfqq->org_ioprio)
-                       cfqq->ioprio = cfqq->org_ioprio;
-       }
-
-       /*
-        * refile between round-robin lists if we moved the priority class
-        */
-       if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
-           cfq_cfqq_on_rr(cfqq))
-               cfq_resort_rr_list(cfqq, 0);
-}
-
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
-{
-       if (rw == READ || process_sync(task))
-               return task->pid;
-
-       return CFQ_KEY_ASYNC;
-}
-
-static inline int
-__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-               struct task_struct *task, int rw)
-{
-#if 1
-       if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
-           !cfq_cfqq_must_alloc_slice(cfqq)) {
-               cfq_mark_cfqq_must_alloc_slice(cfqq);
-               return ELV_MQUEUE_MUST;
-       }
-
-       return ELV_MQUEUE_MAY;
-#else
-       if (!cfqq || task->flags & PF_MEMALLOC)
-               return ELV_MQUEUE_MAY;
-       if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
-               if (cfq_cfqq_wait_request(cfqq))
-                       return ELV_MQUEUE_MUST;
-
-               /*
-                * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
-                * can quickly flood the queue with writes from a single task
-                */
-               if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
-                       cfq_mark_cfqq_must_alloc_slice(cfqq);
-                       return ELV_MQUEUE_MUST;
-               }
-
-               return ELV_MQUEUE_MAY;
-       }
-       if (cfq_class_idle(cfqq))
-               return ELV_MQUEUE_NO;
-       if (cfqq->allocated[rw] >= cfqd->max_queued) {
-               struct io_context *ioc = get_io_context(GFP_ATOMIC);
-               int ret = ELV_MQUEUE_NO;
-
-               if (ioc && ioc->nr_batch_requests)
-                       ret = ELV_MQUEUE_MAY;
-
-               put_io_context(ioc);
-               return ret;
-       }
-
-       return ELV_MQUEUE_MAY;
-#endif
-}
-
-static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct task_struct *tsk = current;
-       struct cfq_queue *cfqq;
-
-       /*
-        * don't force setup of a queue from here, as a call to may_queue
-        * does not necessarily imply that a request actually will be queued.
-        * so just lookup a possibly existing queue, or return 'may queue'
-        * if that fails
-        */
-       cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
-       if (cfqq) {
-               cfq_init_prio_data(cfqq);
-               cfq_prio_boost(cfqq);
-
-               return __cfq_may_queue(cfqd, cfqq, tsk, rw);
-       }
-
-       return ELV_MQUEUE_MAY;
-}
-
-static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct request_list *rl = &q->rq;
-
-       if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
-               smp_mb();
-               if (waitqueue_active(&rl->wait[READ]))
-                       wake_up(&rl->wait[READ]);
-       }
-
-       if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
-               smp_mb();
-               if (waitqueue_active(&rl->wait[WRITE]))
-                       wake_up(&rl->wait[WRITE]);
-       }
-}
-
-/*
- * queue lock held here
- */
-static void cfq_put_request(request_queue_t *q, struct request *rq)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
-
-       if (crq) {
-               struct cfq_queue *cfqq = crq->cfq_queue;
-               const int rw = rq_data_dir(rq);
-
-               BUG_ON(!cfqq->allocated[rw]);
-               cfqq->allocated[rw]--;
-
-               put_io_context(crq->io_context->ioc);
-
-               mempool_free(crq, cfqd->crq_pool);
-               rq->elevator_private = NULL;
-
-               cfq_check_waiters(q, cfqq);
-               cfq_put_queue(cfqq);
-       }
-}
-
-/*
- * Allocate cfq data structures associated with this request.
- */
-static int
-cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
-               gfp_t gfp_mask)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct task_struct *tsk = current;
-       struct cfq_io_context *cic;
-       const int rw = rq_data_dir(rq);
-       pid_t key = cfq_queue_pid(tsk, rw);
-       struct cfq_queue *cfqq;
-       struct cfq_rq *crq;
-       unsigned long flags;
-
-       might_sleep_if(gfp_mask & __GFP_WAIT);
-
-       cic = cfq_get_io_context(cfqd, key, gfp_mask);
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       if (!cic)
-               goto queue_fail;
-
-       if (!cic->cfqq) {
-               cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);
-               if (!cfqq)
-                       goto queue_fail;
-
-               cic->cfqq = cfqq;
-       } else
-               cfqq = cic->cfqq;
-
-       cfqq->allocated[rw]++;
-       cfq_clear_cfqq_must_alloc(cfqq);
-       cfqd->rq_starved = 0;
-       atomic_inc(&cfqq->ref);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-       crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
-       if (crq) {
-               RB_CLEAR(&crq->rb_node);
-               crq->rb_key = 0;
-               crq->request = rq;
-               INIT_HLIST_NODE(&crq->hash);
-               crq->cfq_queue = cfqq;
-               crq->io_context = cic;
-
-               if (rw == READ || process_sync(tsk))
-                       cfq_mark_crq_is_sync(crq);
-               else
-                       cfq_clear_crq_is_sync(crq);
-
-               rq->elevator_private = crq;
-               return 0;
-       }
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       cfqq->allocated[rw]--;
-       if (!(cfqq->allocated[0] + cfqq->allocated[1]))
-               cfq_mark_cfqq_must_alloc(cfqq);
-       cfq_put_queue(cfqq);
-queue_fail:
-       if (cic)
-               put_io_context(cic->ioc);
-       /*
-        * mark us rq allocation starved. we need to kickstart the process
-        * ourselves if there are no pending requests that can do it for us.
-        * that would be an extremely rare OOM situation
-        */
-       cfqd->rq_starved = 1;
-       cfq_schedule_dispatch(cfqd);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-       return 1;
-}
-
-static void cfq_kick_queue(void *data)
-{
-       request_queue_t *q = data;
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       if (cfqd->rq_starved) {
-               struct request_list *rl = &q->rq;
-
-               /*
-                * we aren't guaranteed to get a request after this, but we
-                * have to be opportunistic
-                */
-               smp_mb();
-               if (waitqueue_active(&rl->wait[READ]))
-                       wake_up(&rl->wait[READ]);
-               if (waitqueue_active(&rl->wait[WRITE]))
-                       wake_up(&rl->wait[WRITE]);
-       }
-
-       blk_remove_plug(q);
-       q->request_fn(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-/*
- * Timer running if the active_queue is currently idling inside its time slice
- */
-static void cfq_idle_slice_timer(unsigned long data)
-{
-       struct cfq_data *cfqd = (struct cfq_data *) data;
-       struct cfq_queue *cfqq;
-       unsigned long flags;
-
-       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-
-       if ((cfqq = cfqd->active_queue) != NULL) {
-               unsigned long now = jiffies;
-
-               /*
-                * expired
-                */
-               if (time_after(now, cfqq->slice_end))
-                       goto expire;
-
-               /*
-                * only expire and reinvoke request handler, if there are
-                * other queues with pending requests
-                */
-               if (!cfqd->busy_queues) {
-                       cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
-                       add_timer(&cfqd->idle_slice_timer);
-                       goto out_cont;
-               }
-
-               /*
-                * not expired and it has a request pending, let it dispatch
-                */
-               if (!RB_EMPTY(&cfqq->sort_list)) {
-                       cfq_mark_cfqq_must_dispatch(cfqq);
-                       goto out_kick;
-               }
-       }
-expire:
-       cfq_slice_expired(cfqd, 0);
-out_kick:
-       cfq_schedule_dispatch(cfqd);
-out_cont:
-       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
-}
-
-/*
- * Timer running if an idle class queue is waiting for service
- */
-static void cfq_idle_class_timer(unsigned long data)
-{
-       struct cfq_data *cfqd = (struct cfq_data *) data;
-       unsigned long flags, end;
-
-       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-
-       /*
-        * race with a non-idle queue, reset timer
-        */
-       end = cfqd->last_end_request + CFQ_IDLE_GRACE;
-       if (!time_after_eq(jiffies, end)) {
-               cfqd->idle_class_timer.expires = end;
-               add_timer(&cfqd->idle_class_timer);
-       } else
-               cfq_schedule_dispatch(cfqd);
-
-       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
-}
-
-static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
-{
-       del_timer_sync(&cfqd->idle_slice_timer);
-       del_timer_sync(&cfqd->idle_class_timer);
-       blk_sync_queue(cfqd->queue);
-}
-
-static void cfq_put_cfqd(struct cfq_data *cfqd)
-{
-       request_queue_t *q = cfqd->queue;
-
-       if (!atomic_dec_and_test(&cfqd->ref))
-               return;
-
-       cfq_shutdown_timer_wq(cfqd);
-       blk_put_queue(q);
-
-       mempool_destroy(cfqd->crq_pool);
-       kfree(cfqd->crq_hash);
-       kfree(cfqd->cfq_hash);
-       kfree(cfqd);
-}
-
-static void cfq_exit_queue(elevator_t *e)
-{
-       struct cfq_data *cfqd = e->elevator_data;
-
-       cfq_shutdown_timer_wq(cfqd);
-       cfq_put_cfqd(cfqd);
-}
-
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
-{
-       struct cfq_data *cfqd;
-       int i;
-
-       cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
-       if (!cfqd)
-               return -ENOMEM;
-
-       memset(cfqd, 0, sizeof(*cfqd));
-
-       for (i = 0; i < CFQ_PRIO_LISTS; i++)
-               INIT_LIST_HEAD(&cfqd->rr_list[i]);
-
-       INIT_LIST_HEAD(&cfqd->busy_rr);
-       INIT_LIST_HEAD(&cfqd->cur_rr);
-       INIT_LIST_HEAD(&cfqd->idle_rr);
-       INIT_LIST_HEAD(&cfqd->empty_list);
-
-       cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
-       if (!cfqd->crq_hash)
-               goto out_crqhash;
-
-       cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
-       if (!cfqd->cfq_hash)
-               goto out_cfqhash;
-
-       cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
-       if (!cfqd->crq_pool)
-               goto out_crqpool;
-
-       for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
-               INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
-       for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
-               INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
-
-       e->elevator_data = cfqd;
-
-       cfqd->queue = q;
-       atomic_inc(&q->refcnt);
-
-       cfqd->max_queued = q->nr_requests / 4;
-       q->nr_batching = cfq_queued;
-
-       init_timer(&cfqd->idle_slice_timer);
-       cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
-       cfqd->idle_slice_timer.data = (unsigned long) cfqd;
-
-       init_timer(&cfqd->idle_class_timer);
-       cfqd->idle_class_timer.function = cfq_idle_class_timer;
-       cfqd->idle_class_timer.data = (unsigned long) cfqd;
-
-       INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
-
-       atomic_set(&cfqd->ref, 1);
-
-       cfqd->cfq_queued = cfq_queued;
-       cfqd->cfq_quantum = cfq_quantum;
-       cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
-       cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
-       cfqd->cfq_back_max = cfq_back_max;
-       cfqd->cfq_back_penalty = cfq_back_penalty;
-       cfqd->cfq_slice[0] = cfq_slice_async;
-       cfqd->cfq_slice[1] = cfq_slice_sync;
-       cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
-       cfqd->cfq_slice_idle = cfq_slice_idle;
-       cfqd->cfq_max_depth = cfq_max_depth;
-
-       return 0;
-out_crqpool:
-       kfree(cfqd->cfq_hash);
-out_cfqhash:
-       kfree(cfqd->crq_hash);
-out_crqhash:
-       kfree(cfqd);
-       return -ENOMEM;
-}
-
-static void cfq_slab_kill(void)
-{
-       if (crq_pool)
-               kmem_cache_destroy(crq_pool);
-       if (cfq_pool)
-               kmem_cache_destroy(cfq_pool);
-       if (cfq_ioc_pool)
-               kmem_cache_destroy(cfq_ioc_pool);
-}
-
-static int __init cfq_slab_setup(void)
-{
-       crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
-                                       NULL, NULL);
-       if (!crq_pool)
-               goto fail;
-
-       cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
-                                       NULL, NULL);
-       if (!cfq_pool)
-               goto fail;
-
-       cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
-                       sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
-       if (!cfq_ioc_pool)
-               goto fail;
-
-       return 0;
-fail:
-       cfq_slab_kill();
-       return -ENOMEM;
-}
-
-/*
- * sysfs parts below -->
- */
-struct cfq_fs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct cfq_data *, char *);
-       ssize_t (*store)(struct cfq_data *, const char *, size_t);
-};
-
-static ssize_t
-cfq_var_show(unsigned int var, char *page)
-{
-       return sprintf(page, "%d\n", var);
-}
-
-static ssize_t
-cfq_var_store(unsigned int *var, const char *page, size_t count)
-{
-       char *p = (char *) page;
-
-       *var = simple_strtoul(p, &p, 10);
-       return count;
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
-static ssize_t __FUNC(struct cfq_data *cfqd, char *page)               \
-{                                                                      \
-       unsigned int __data = __VAR;                                    \
-       if (__CONV)                                                     \
-               __data = jiffies_to_msecs(__data);                      \
-       return cfq_var_show(__data, (page));                            \
-}
-SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
-SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
-SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
-SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
-SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
-SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
-SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
-SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
-SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
-SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
-SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
-#undef SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
-static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)   \
-{                                                                      \
-       unsigned int __data;                                            \
-       int ret = cfq_var_store(&__data, (page), count);                \
-       if (__data < (MIN))                                             \
-               __data = (MIN);                                         \
-       else if (__data > (MAX))                                        \
-               __data = (MAX);                                         \
-       if (__CONV)                                                     \
-               *(__PTR) = msecs_to_jiffies(__data);                    \
-       else                                                            \
-               *(__PTR) = __data;                                      \
-       return ret;                                                     \
-}
-STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
-STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
-#undef STORE_FUNCTION
-
-static struct cfq_fs_entry cfq_quantum_entry = {
-       .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_quantum_show,
-       .store = cfq_quantum_store,
-};
-static struct cfq_fs_entry cfq_queued_entry = {
-       .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_queued_show,
-       .store = cfq_queued_store,
-};
-static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
-       .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_fifo_expire_sync_show,
-       .store = cfq_fifo_expire_sync_store,
-};
-static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
-       .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_fifo_expire_async_show,
-       .store = cfq_fifo_expire_async_store,
-};
-static struct cfq_fs_entry cfq_back_max_entry = {
-       .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_back_max_show,
-       .store = cfq_back_max_store,
-};
-static struct cfq_fs_entry cfq_back_penalty_entry = {
-       .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_back_penalty_show,
-       .store = cfq_back_penalty_store,
-};
-static struct cfq_fs_entry cfq_slice_sync_entry = {
-       .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_slice_sync_show,
-       .store = cfq_slice_sync_store,
-};
-static struct cfq_fs_entry cfq_slice_async_entry = {
-       .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_slice_async_show,
-       .store = cfq_slice_async_store,
-};
-static struct cfq_fs_entry cfq_slice_async_rq_entry = {
-       .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_slice_async_rq_show,
-       .store = cfq_slice_async_rq_store,
-};
-static struct cfq_fs_entry cfq_slice_idle_entry = {
-       .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_slice_idle_show,
-       .store = cfq_slice_idle_store,
-};
-static struct cfq_fs_entry cfq_max_depth_entry = {
-       .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_max_depth_show,
-       .store = cfq_max_depth_store,
-};
-
-static struct attribute *default_attrs[] = {
-       &cfq_quantum_entry.attr,
-       &cfq_queued_entry.attr,
-       &cfq_fifo_expire_sync_entry.attr,
-       &cfq_fifo_expire_async_entry.attr,
-       &cfq_back_max_entry.attr,
-       &cfq_back_penalty_entry.attr,
-       &cfq_slice_sync_entry.attr,
-       &cfq_slice_async_entry.attr,
-       &cfq_slice_async_rq_entry.attr,
-       &cfq_slice_idle_entry.attr,
-       &cfq_max_depth_entry.attr,
-       NULL,
-};
-
-#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
-
-static ssize_t
-cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
-       struct cfq_fs_entry *entry = to_cfq(attr);
-
-       if (!entry->show)
-               return -EIO;
-
-       return entry->show(e->elevator_data, page);
-}
-
-static ssize_t
-cfq_attr_store(struct kobject *kobj, struct attribute *attr,
-              const char *page, size_t length)
-{
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
-       struct cfq_fs_entry *entry = to_cfq(attr);
-
-       if (!entry->store)
-               return -EIO;
-
-       return entry->store(e->elevator_data, page, length);
-}
-
-static struct sysfs_ops cfq_sysfs_ops = {
-       .show   = cfq_attr_show,
-       .store  = cfq_attr_store,
-};
-
-static struct kobj_type cfq_ktype = {
-       .sysfs_ops      = &cfq_sysfs_ops,
-       .default_attrs  = default_attrs,
-};
-
-static struct elevator_type iosched_cfq = {
-       .ops = {
-               .elevator_merge_fn =            cfq_merge,
-               .elevator_merged_fn =           cfq_merged_request,
-               .elevator_merge_req_fn =        cfq_merged_requests,
-               .elevator_dispatch_fn =         cfq_dispatch_requests,
-               .elevator_add_req_fn =          cfq_insert_request,
-               .elevator_activate_req_fn =     cfq_activate_request,
-               .elevator_deactivate_req_fn =   cfq_deactivate_request,
-               .elevator_queue_empty_fn =      cfq_queue_empty,
-               .elevator_completed_req_fn =    cfq_completed_request,
-               .elevator_former_req_fn =       cfq_former_request,
-               .elevator_latter_req_fn =       cfq_latter_request,
-               .elevator_set_req_fn =          cfq_set_request,
-               .elevator_put_req_fn =          cfq_put_request,
-               .elevator_may_queue_fn =        cfq_may_queue,
-               .elevator_init_fn =             cfq_init_queue,
-               .elevator_exit_fn =             cfq_exit_queue,
-       },
-       .elevator_ktype =       &cfq_ktype,
-       .elevator_name =        "cfq",
-       .elevator_owner =       THIS_MODULE,
-};
-
-static int __init cfq_init(void)
-{
-       int ret;
-
-       /*
-        * could be 0 on HZ < 1000 setups
-        */
-       if (!cfq_slice_async)
-               cfq_slice_async = 1;
-       if (!cfq_slice_idle)
-               cfq_slice_idle = 1;
-
-       if (cfq_slab_setup())
-               return -ENOMEM;
-
-       ret = elv_register(&iosched_cfq);
-       if (ret)
-               cfq_slab_kill();
-
-       return ret;
-}
-
-static void __exit cfq_exit(void)
-{
-       elv_unregister(&iosched_cfq);
-       cfq_slab_kill();
-}
-
-module_init(cfq_init);
-module_exit(cfq_exit);
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
deleted file mode 100644 (file)
index 7929471..0000000
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
- *  linux/drivers/block/deadline-iosched.c
- *
- *  Deadline i/o scheduler.
- *
- *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/hash.h>
-#include <linux/rbtree.h>
-
-/*
- * See Documentation/block/deadline-iosched.txt
- */
-static int read_expire = HZ / 2;  /* max time before a read is submitted. */
-static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-static int writes_starved = 2;    /* max times reads can starve a write */
-static int fifo_batch = 16;       /* # of sequential requests treated as one
-                                    by the above parameters. For throughput. */
-
-static const int deadline_hash_shift = 5;
-#define DL_HASH_BLOCK(sec)     ((sec) >> 3)
-#define DL_HASH_FN(sec)                (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
-#define DL_HASH_ENTRIES                (1 << deadline_hash_shift)
-#define rq_hash_key(rq)                ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr)   list_entry((ptr), struct deadline_rq, hash)
-#define ON_HASH(drq)           (drq)->on_hash
-
-struct deadline_data {
-       /*
-        * run time data
-        */
-
-       /*
-        * requests (deadline_rq s) are present on both sort_list and fifo_list
-        */
-       struct rb_root sort_list[2];    
-       struct list_head fifo_list[2];
-       
-       /*
-        * next in sort order. read, write or both are NULL
-        */
-       struct deadline_rq *next_drq[2];
-       struct list_head *hash;         /* request hash */
-       unsigned int batching;          /* number of sequential requests made */
-       sector_t last_sector;           /* head position */
-       unsigned int starved;           /* times reads have starved writes */
-
-       /*
-        * settings that change how the i/o scheduler behaves
-        */
-       int fifo_expire[2];
-       int fifo_batch;
-       int writes_starved;
-       int front_merges;
-
-       mempool_t *drq_pool;
-};
-
-/*
- * pre-request data.
- */
-struct deadline_rq {
-       /*
-        * rbtree index, key is the starting offset
-        */
-       struct rb_node rb_node;
-       sector_t rb_key;
-
-       struct request *request;
-
-       /*
-        * request hash, key is the ending offset (for back merge lookup)
-        */
-       struct list_head hash;
-       char on_hash;
-
-       /*
-        * expire fifo
-        */
-       struct list_head fifo;
-       unsigned long expires;
-};
-
-static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
-
-static kmem_cache_t *drq_pool;
-
-#define RQ_DATA(rq)    ((struct deadline_rq *) (rq)->elevator_private)
-
-/*
- * the back merge hash support functions
- */
-static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
-{
-       drq->on_hash = 0;
-       list_del_init(&drq->hash);
-}
-
-static inline void deadline_del_drq_hash(struct deadline_rq *drq)
-{
-       if (ON_HASH(drq))
-               __deadline_del_drq_hash(drq);
-}
-
-static inline void
-deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
-{
-       struct request *rq = drq->request;
-
-       BUG_ON(ON_HASH(drq));
-
-       drq->on_hash = 1;
-       list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
-}
-
-/*
- * move hot entry to front of chain
- */
-static inline void
-deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
-{
-       struct request *rq = drq->request;
-       struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
-
-       if (ON_HASH(drq) && drq->hash.prev != head) {
-               list_del(&drq->hash);
-               list_add(&drq->hash, head);
-       }
-}
-
-static struct request *
-deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
-{
-       struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
-       struct list_head *entry, *next = hash_list->next;
-
-       while ((entry = next) != hash_list) {
-               struct deadline_rq *drq = list_entry_hash(entry);
-               struct request *__rq = drq->request;
-
-               next = entry->next;
-               
-               BUG_ON(!ON_HASH(drq));
-
-               if (!rq_mergeable(__rq)) {
-                       __deadline_del_drq_hash(drq);
-                       continue;
-               }
-
-               if (rq_hash_key(__rq) == offset)
-                       return __rq;
-       }
-
-       return NULL;
-}
-
-/*
- * rb tree support functions
- */
-#define RB_NONE                (2)
-#define RB_EMPTY(root) ((root)->rb_node == NULL)
-#define ON_RB(node)    ((node)->rb_color != RB_NONE)
-#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
-#define rb_entry_drq(node)     rb_entry((node), struct deadline_rq, rb_node)
-#define DRQ_RB_ROOT(dd, drq)   (&(dd)->sort_list[rq_data_dir((drq)->request)])
-#define rq_rb_key(rq)          (rq)->sector
-
-static struct deadline_rq *
-__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
-{
-       struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
-       struct rb_node *parent = NULL;
-       struct deadline_rq *__drq;
-
-       while (*p) {
-               parent = *p;
-               __drq = rb_entry_drq(parent);
-
-               if (drq->rb_key < __drq->rb_key)
-                       p = &(*p)->rb_left;
-               else if (drq->rb_key > __drq->rb_key)
-                       p = &(*p)->rb_right;
-               else
-                       return __drq;
-       }
-
-       rb_link_node(&drq->rb_node, parent, p);
-       return NULL;
-}
-
-static void
-deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
-{
-       struct deadline_rq *__alias;
-
-       drq->rb_key = rq_rb_key(drq->request);
-
-retry:
-       __alias = __deadline_add_drq_rb(dd, drq);
-       if (!__alias) {
-               rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
-               return;
-       }
-
-       deadline_move_request(dd, __alias);
-       goto retry;
-}
-
-static inline void
-deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
-{
-       const int data_dir = rq_data_dir(drq->request);
-
-       if (dd->next_drq[data_dir] == drq) {
-               struct rb_node *rbnext = rb_next(&drq->rb_node);
-
-               dd->next_drq[data_dir] = NULL;
-               if (rbnext)
-                       dd->next_drq[data_dir] = rb_entry_drq(rbnext);
-       }
-
-       BUG_ON(!ON_RB(&drq->rb_node));
-       rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
-       RB_CLEAR(&drq->rb_node);
-}
-
-static struct request *
-deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
-{
-       struct rb_node *n = dd->sort_list[data_dir].rb_node;
-       struct deadline_rq *drq;
-
-       while (n) {
-               drq = rb_entry_drq(n);
-
-               if (sector < drq->rb_key)
-                       n = n->rb_left;
-               else if (sector > drq->rb_key)
-                       n = n->rb_right;
-               else
-                       return drq->request;
-       }
-
-       return NULL;
-}
-
-/*
- * deadline_find_first_drq finds the first (lowest sector numbered) request
- * for the specified data_dir. Used to sweep back to the start of the disk
- * (1-way elevator) after we process the last (highest sector) request.
- */
-static struct deadline_rq *
-deadline_find_first_drq(struct deadline_data *dd, int data_dir)
-{
-       struct rb_node *n = dd->sort_list[data_dir].rb_node;
-
-       for (;;) {
-               if (n->rb_left == NULL)
-                       return rb_entry_drq(n);
-               
-               n = n->rb_left;
-       }
-}
-
-/*
- * add drq to rbtree and fifo
- */
-static void
-deadline_add_request(struct request_queue *q, struct request *rq)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct deadline_rq *drq = RQ_DATA(rq);
-
-       const int data_dir = rq_data_dir(drq->request);
-
-       deadline_add_drq_rb(dd, drq);
-       /*
-        * set expire time (only used for reads) and add to fifo list
-        */
-       drq->expires = jiffies + dd->fifo_expire[data_dir];
-       list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
-
-       if (rq_mergeable(rq))
-               deadline_add_drq_hash(dd, drq);
-}
-
-/*
- * remove rq from rbtree, fifo, and hash
- */
-static void deadline_remove_request(request_queue_t *q, struct request *rq)
-{
-       struct deadline_rq *drq = RQ_DATA(rq);
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       list_del_init(&drq->fifo);
-       deadline_del_drq_rb(dd, drq);
-       deadline_del_drq_hash(drq);
-}
-
-static int
-deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct request *__rq;
-       int ret;
-
-       /*
-        * see if the merge hash can satisfy a back merge
-        */
-       __rq = deadline_find_drq_hash(dd, bio->bi_sector);
-       if (__rq) {
-               BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
-               if (elv_rq_merge_ok(__rq, bio)) {
-                       ret = ELEVATOR_BACK_MERGE;
-                       goto out;
-               }
-       }
-
-       /*
-        * check for front merge
-        */
-       if (dd->front_merges) {
-               sector_t rb_key = bio->bi_sector + bio_sectors(bio);
-
-               __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
-               if (__rq) {
-                       BUG_ON(rb_key != rq_rb_key(__rq));
-
-                       if (elv_rq_merge_ok(__rq, bio)) {
-                               ret = ELEVATOR_FRONT_MERGE;
-                               goto out;
-                       }
-               }
-       }
-
-       return ELEVATOR_NO_MERGE;
-out:
-       if (ret)
-               deadline_hot_drq_hash(dd, RQ_DATA(__rq));
-       *req = __rq;
-       return ret;
-}
-
-static void deadline_merged_request(request_queue_t *q, struct request *req)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct deadline_rq *drq = RQ_DATA(req);
-
-       /*
-        * hash always needs to be repositioned, key is end sector
-        */
-       deadline_del_drq_hash(drq);
-       deadline_add_drq_hash(dd, drq);
-
-       /*
-        * if the merge was a front merge, we need to reposition request
-        */
-       if (rq_rb_key(req) != drq->rb_key) {
-               deadline_del_drq_rb(dd, drq);
-               deadline_add_drq_rb(dd, drq);
-       }
-}
-
-static void
-deadline_merged_requests(request_queue_t *q, struct request *req,
-                        struct request *next)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct deadline_rq *drq = RQ_DATA(req);
-       struct deadline_rq *dnext = RQ_DATA(next);
-
-       BUG_ON(!drq);
-       BUG_ON(!dnext);
-
-       /*
-        * reposition drq (this is the merged request) in hash, and in rbtree
-        * in case of a front merge
-        */
-       deadline_del_drq_hash(drq);
-       deadline_add_drq_hash(dd, drq);
-
-       if (rq_rb_key(req) != drq->rb_key) {
-               deadline_del_drq_rb(dd, drq);
-               deadline_add_drq_rb(dd, drq);
-       }
-
-       /*
-        * if dnext expires before drq, assign its expire time to drq
-        * and move into dnext position (dnext will be deleted) in fifo
-        */
-       if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
-               if (time_before(dnext->expires, drq->expires)) {
-                       list_move(&drq->fifo, &dnext->fifo);
-                       drq->expires = dnext->expires;
-               }
-       }
-
-       /*
-        * kill knowledge of next, this one is a goner
-        */
-       deadline_remove_request(q, next);
-}
-
-/*
- * move request from sort list to dispatch queue.
- */
-static inline void
-deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
-{
-       request_queue_t *q = drq->request->q;
-
-       deadline_remove_request(q, drq->request);
-       elv_dispatch_add_tail(q, drq->request);
-}
-
-/*
- * move an entry to dispatch queue
- */
-static void
-deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
-{
-       const int data_dir = rq_data_dir(drq->request);
-       struct rb_node *rbnext = rb_next(&drq->rb_node);
-
-       dd->next_drq[READ] = NULL;
-       dd->next_drq[WRITE] = NULL;
-
-       if (rbnext)
-               dd->next_drq[data_dir] = rb_entry_drq(rbnext);
-       
-       dd->last_sector = drq->request->sector + drq->request->nr_sectors;
-
-       /*
-        * take it off the sort and fifo list, move
-        * to dispatch queue
-        */
-       deadline_move_to_dispatch(dd, drq);
-}
-
-#define list_entry_fifo(ptr)   list_entry((ptr), struct deadline_rq, fifo)
-
-/*
- * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
- * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
- */
-static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
-{
-       struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
-
-       /*
-        * drq is expired!
-        */
-       if (time_after(jiffies, drq->expires))
-               return 1;
-
-       return 0;
-}
-
-/*
- * deadline_dispatch_requests selects the best request according to
- * read/write expire, fifo_batch, etc
- */
-static int deadline_dispatch_requests(request_queue_t *q, int force)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       const int reads = !list_empty(&dd->fifo_list[READ]);
-       const int writes = !list_empty(&dd->fifo_list[WRITE]);
-       struct deadline_rq *drq;
-       int data_dir;
-
-       /*
-        * batches are currently reads XOR writes
-        */
-       if (dd->next_drq[WRITE])
-               drq = dd->next_drq[WRITE];
-       else
-               drq = dd->next_drq[READ];
-
-       if (drq) {
-               /* we have a "next request" */
-               
-               if (dd->last_sector != drq->request->sector)
-                       /* end the batch on a non sequential request */
-                       dd->batching += dd->fifo_batch;
-               
-               if (dd->batching < dd->fifo_batch)
-                       /* we are still entitled to batch */
-                       goto dispatch_request;
-       }
-
-       /*
-        * at this point we are not running a batch. select the appropriate
-        * data direction (read / write)
-        */
-
-       if (reads) {
-               BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
-
-               if (writes && (dd->starved++ >= dd->writes_starved))
-                       goto dispatch_writes;
-
-               data_dir = READ;
-
-               goto dispatch_find_request;
-       }
-
-       /*
-        * there are either no reads or writes have been starved
-        */
-
-       if (writes) {
-dispatch_writes:
-               BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
-
-               dd->starved = 0;
-
-               data_dir = WRITE;
-
-               goto dispatch_find_request;
-       }
-
-       return 0;
-
-dispatch_find_request:
-       /*
-        * we are not running a batch, find best request for selected data_dir
-        */
-       if (deadline_check_fifo(dd, data_dir)) {
-               /* An expired request exists - satisfy it */
-               dd->batching = 0;
-               drq = list_entry_fifo(dd->fifo_list[data_dir].next);
-               
-       } else if (dd->next_drq[data_dir]) {
-               /*
-                * The last req was the same dir and we have a next request in
-                * sort order. No expired requests so continue on from here.
-                */
-               drq = dd->next_drq[data_dir];
-       } else {
-               /*
-                * The last req was the other direction or we have run out of
-                * higher-sectored requests. Go back to the lowest sectored
-                * request (1 way elevator) and start a new batch.
-                */
-               dd->batching = 0;
-               drq = deadline_find_first_drq(dd, data_dir);
-       }
-
-dispatch_request:
-       /*
-        * drq is the selected appropriate request.
-        */
-       dd->batching++;
-       deadline_move_request(dd, drq);
-
-       return 1;
-}
-
-static int deadline_queue_empty(request_queue_t *q)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-
-       return list_empty(&dd->fifo_list[WRITE])
-               && list_empty(&dd->fifo_list[READ]);
-}
-
-static struct request *
-deadline_former_request(request_queue_t *q, struct request *rq)
-{
-       struct deadline_rq *drq = RQ_DATA(rq);
-       struct rb_node *rbprev = rb_prev(&drq->rb_node);
-
-       if (rbprev)
-               return rb_entry_drq(rbprev)->request;
-
-       return NULL;
-}
-
-static struct request *
-deadline_latter_request(request_queue_t *q, struct request *rq)
-{
-       struct deadline_rq *drq = RQ_DATA(rq);
-       struct rb_node *rbnext = rb_next(&drq->rb_node);
-
-       if (rbnext)
-               return rb_entry_drq(rbnext)->request;
-
-       return NULL;
-}
-
-static void deadline_exit_queue(elevator_t *e)
-{
-       struct deadline_data *dd = e->elevator_data;
-
-       BUG_ON(!list_empty(&dd->fifo_list[READ]));
-       BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
-
-       mempool_destroy(dd->drq_pool);
-       kfree(dd->hash);
-       kfree(dd);
-}
-
-/*
- * initialize elevator private data (deadline_data), and alloc a drq for
- * each request on the free lists
- */
-static int deadline_init_queue(request_queue_t *q, elevator_t *e)
-{
-       struct deadline_data *dd;
-       int i;
-
-       if (!drq_pool)
-               return -ENOMEM;
-
-       dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
-       if (!dd)
-               return -ENOMEM;
-       memset(dd, 0, sizeof(*dd));
-
-       dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
-                               GFP_KERNEL, q->node);
-       if (!dd->hash) {
-               kfree(dd);
-               return -ENOMEM;
-       }
-
-       dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
-                                       mempool_free_slab, drq_pool, q->node);
-       if (!dd->drq_pool) {
-               kfree(dd->hash);
-               kfree(dd);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < DL_HASH_ENTRIES; i++)
-               INIT_LIST_HEAD(&dd->hash[i]);
-
-       INIT_LIST_HEAD(&dd->fifo_list[READ]);
-       INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
-       dd->sort_list[READ] = RB_ROOT;
-       dd->sort_list[WRITE] = RB_ROOT;
-       dd->fifo_expire[READ] = read_expire;
-       dd->fifo_expire[WRITE] = write_expire;
-       dd->writes_starved = writes_starved;
-       dd->front_merges = 1;
-       dd->fifo_batch = fifo_batch;
-       e->elevator_data = dd;
-       return 0;
-}
-
-static void deadline_put_request(request_queue_t *q, struct request *rq)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct deadline_rq *drq = RQ_DATA(rq);
-
-       mempool_free(drq, dd->drq_pool);
-       rq->elevator_private = NULL;
-}
-
-static int
-deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
-                    gfp_t gfp_mask)
-{
-       struct deadline_data *dd = q->elevator->elevator_data;
-       struct deadline_rq *drq;
-
-       drq = mempool_alloc(dd->drq_pool, gfp_mask);
-       if (drq) {
-               memset(drq, 0, sizeof(*drq));
-               RB_CLEAR(&drq->rb_node);
-               drq->request = rq;
-
-               INIT_LIST_HEAD(&drq->hash);
-               drq->on_hash = 0;
-
-               INIT_LIST_HEAD(&drq->fifo);
-
-               rq->elevator_private = drq;
-               return 0;
-       }
-
-       return 1;
-}
-
-/*
- * sysfs parts below
- */
-struct deadline_fs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct deadline_data *, char *);
-       ssize_t (*store)(struct deadline_data *, const char *, size_t);
-};
-
-static ssize_t
-deadline_var_show(int var, char *page)
-{
-       return sprintf(page, "%d\n", var);
-}
-
-static ssize_t
-deadline_var_store(int *var, const char *page, size_t count)
-{
-       char *p = (char *) page;
-
-       *var = simple_strtol(p, &p, 10);
-       return count;
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
-static ssize_t __FUNC(struct deadline_data *dd, char *page)            \
-{                                                                      \
-       int __data = __VAR;                                     \
-       if (__CONV)                                                     \
-               __data = jiffies_to_msecs(__data);                      \
-       return deadline_var_show(__data, (page));                       \
-}
-SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
-SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
-SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
-SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
-SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
-#undef SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
-static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)        \
-{                                                                      \
-       int __data;                                                     \
-       int ret = deadline_var_store(&__data, (page), count);           \
-       if (__data < (MIN))                                             \
-               __data = (MIN);                                         \
-       else if (__data > (MAX))                                        \
-               __data = (MAX);                                         \
-       if (__CONV)                                                     \
-               *(__PTR) = msecs_to_jiffies(__data);                    \
-       else                                                            \
-               *(__PTR) = __data;                                      \
-       return ret;                                                     \
-}
-STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
-STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
-STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
-#undef STORE_FUNCTION
-
-static struct deadline_fs_entry deadline_readexpire_entry = {
-       .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = deadline_readexpire_show,
-       .store = deadline_readexpire_store,
-};
-static struct deadline_fs_entry deadline_writeexpire_entry = {
-       .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = deadline_writeexpire_show,
-       .store = deadline_writeexpire_store,
-};
-static struct deadline_fs_entry deadline_writesstarved_entry = {
-       .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
-       .show = deadline_writesstarved_show,
-       .store = deadline_writesstarved_store,
-};
-static struct deadline_fs_entry deadline_frontmerges_entry = {
-       .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
-       .show = deadline_frontmerges_show,
-       .store = deadline_frontmerges_store,
-};
-static struct deadline_fs_entry deadline_fifobatch_entry = {
-       .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
-       .show = deadline_fifobatch_show,
-       .store = deadline_fifobatch_store,
-};
-
-static struct attribute *default_attrs[] = {
-       &deadline_readexpire_entry.attr,
-       &deadline_writeexpire_entry.attr,
-       &deadline_writesstarved_entry.attr,
-       &deadline_frontmerges_entry.attr,
-       &deadline_fifobatch_entry.attr,
-       NULL,
-};
-
-#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
-
-static ssize_t
-deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
-       struct deadline_fs_entry *entry = to_deadline(attr);
-
-       if (!entry->show)
-               return -EIO;
-
-       return entry->show(e->elevator_data, page);
-}
-
-static ssize_t
-deadline_attr_store(struct kobject *kobj, struct attribute *attr,
-                   const char *page, size_t length)
-{
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
-       struct deadline_fs_entry *entry = to_deadline(attr);
-
-       if (!entry->store)
-               return -EIO;
-
-       return entry->store(e->elevator_data, page, length);
-}
-
-static struct sysfs_ops deadline_sysfs_ops = {
-       .show   = deadline_attr_show,
-       .store  = deadline_attr_store,
-};
-
-static struct kobj_type deadline_ktype = {
-       .sysfs_ops      = &deadline_sysfs_ops,
-       .default_attrs  = default_attrs,
-};
-
-static struct elevator_type iosched_deadline = {
-       .ops = {
-               .elevator_merge_fn =            deadline_merge,
-               .elevator_merged_fn =           deadline_merged_request,
-               .elevator_merge_req_fn =        deadline_merged_requests,
-               .elevator_dispatch_fn =         deadline_dispatch_requests,
-               .elevator_add_req_fn =          deadline_add_request,
-               .elevator_queue_empty_fn =      deadline_queue_empty,
-               .elevator_former_req_fn =       deadline_former_request,
-               .elevator_latter_req_fn =       deadline_latter_request,
-               .elevator_set_req_fn =          deadline_set_request,
-               .elevator_put_req_fn =          deadline_put_request,
-               .elevator_init_fn =             deadline_init_queue,
-               .elevator_exit_fn =             deadline_exit_queue,
-       },
-
-       .elevator_ktype = &deadline_ktype,
-       .elevator_name = "deadline",
-       .elevator_owner = THIS_MODULE,
-};
-
-static int __init deadline_init(void)
-{
-       int ret;
-
-       drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
-                                    0, 0, NULL, NULL);
-
-       if (!drq_pool)
-               return -ENOMEM;
-
-       ret = elv_register(&iosched_deadline);
-       if (ret)
-               kmem_cache_destroy(drq_pool);
-
-       return ret;
-}
-
-static void __exit deadline_exit(void)
-{
-       kmem_cache_destroy(drq_pool);
-       elv_unregister(&iosched_deadline);
-}
-
-module_init(deadline_init);
-module_exit(deadline_exit);
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
deleted file mode 100644 (file)
index d4a49a3..0000000
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- *  linux/drivers/block/elevator.c
- *
- *  Block device elevator/IO-scheduler.
- *
- *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
- *
- * 30042000 Jens Axboe <axboe@suse.de> :
- *
- * Split the elevator a bit so that it is possible to choose a different
- * one or even write a new "plug in". There are three pieces:
- * - elevator_fn, inserts a new request in the queue list
- * - elevator_merge_fn, decides whether a new buffer can be merged with
- *   an existing request
- * - elevator_dequeue_fn, called when a request is taken off the active list
- *
- * 20082000 Dave Jones <davej@suse.de> :
- * Removed tests for max-bomb-segments, which was breaking elvtune
- *  when run without -bN
- *
- * Jens:
- * - Rework again to work with bio instead of buffer_heads
- * - loose bi_dev comparisons, partition handling is right now
- * - completely modularize elevator setup and teardown
- *
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/delay.h>
-
-#include <asm/uaccess.h>
-
-static DEFINE_SPINLOCK(elv_list_lock);
-static LIST_HEAD(elv_list);
-
-/*
- * can we safely merge with this request?
- */
-inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
-{
-       if (!rq_mergeable(rq))
-               return 0;
-
-       /*
-        * different data direction or already started, don't merge
-        */
-       if (bio_data_dir(bio) != rq_data_dir(rq))
-               return 0;
-
-       /*
-        * same device and no special stuff set, merge is ok
-        */
-       if (rq->rq_disk == bio->bi_bdev->bd_disk &&
-           !rq->waiting && !rq->special)
-               return 1;
-
-       return 0;
-}
-EXPORT_SYMBOL(elv_rq_merge_ok);
-
-inline int elv_try_merge(struct request *__rq, struct bio *bio)
-{
-       int ret = ELEVATOR_NO_MERGE;
-
-       /*
-        * we can merge and sequence is ok, check if it's possible
-        */
-       if (elv_rq_merge_ok(__rq, bio)) {
-               if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
-                       ret = ELEVATOR_BACK_MERGE;
-               else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
-                       ret = ELEVATOR_FRONT_MERGE;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(elv_try_merge);
-
-static struct elevator_type *elevator_find(const char *name)
-{
-       struct elevator_type *e = NULL;
-       struct list_head *entry;
-
-       list_for_each(entry, &elv_list) {
-               struct elevator_type *__e;
-
-               __e = list_entry(entry, struct elevator_type, list);
-
-               if (!strcmp(__e->elevator_name, name)) {
-                       e = __e;
-                       break;
-               }
-       }
-
-       return e;
-}
-
-static void elevator_put(struct elevator_type *e)
-{
-       module_put(e->elevator_owner);
-}
-
-static struct elevator_type *elevator_get(const char *name)
-{
-       struct elevator_type *e;
-
-       spin_lock_irq(&elv_list_lock);
-
-       e = elevator_find(name);
-       if (e && !try_module_get(e->elevator_owner))
-               e = NULL;
-
-       spin_unlock_irq(&elv_list_lock);
-
-       return e;
-}
-
-static int elevator_attach(request_queue_t *q, struct elevator_type *e,
-                          struct elevator_queue *eq)
-{
-       int ret = 0;
-
-       memset(eq, 0, sizeof(*eq));
-       eq->ops = &e->ops;
-       eq->elevator_type = e;
-
-       q->elevator = eq;
-
-       if (eq->ops->elevator_init_fn)
-               ret = eq->ops->elevator_init_fn(q, eq);
-
-       return ret;
-}
-
-static char chosen_elevator[16];
-
-static void elevator_setup_default(void)
-{
-       struct elevator_type *e;
-
-       /*
-        * If default has not been set, use the compiled-in selection.
-        */
-       if (!chosen_elevator[0])
-               strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
-
-       /*
-        * If the given scheduler is not available, fall back to no-op.
-        */
-       if (!(e = elevator_find(chosen_elevator)))
-               strcpy(chosen_elevator, "noop");
-       elevator_put(e);
-}
-
-static int __init elevator_setup(char *str)
-{
-       strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
-       return 0;
-}
-
-__setup("elevator=", elevator_setup);
-
-int elevator_init(request_queue_t *q, char *name)
-{
-       struct elevator_type *e = NULL;
-       struct elevator_queue *eq;
-       int ret = 0;
-
-       INIT_LIST_HEAD(&q->queue_head);
-       q->last_merge = NULL;
-       q->end_sector = 0;
-       q->boundary_rq = NULL;
-
-       elevator_setup_default();
-
-       if (!name)
-               name = chosen_elevator;
-
-       e = elevator_get(name);
-       if (!e)
-               return -EINVAL;
-
-       eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
-       if (!eq) {
-               elevator_put(e->elevator_type);
-               return -ENOMEM;
-       }
-
-       ret = elevator_attach(q, e, eq);
-       if (ret) {
-               kfree(eq);
-               elevator_put(e->elevator_type);
-       }
-
-       return ret;
-}
-
-void elevator_exit(elevator_t *e)
-{
-       if (e->ops->elevator_exit_fn)
-               e->ops->elevator_exit_fn(e);
-
-       elevator_put(e->elevator_type);
-       e->elevator_type = NULL;
-       kfree(e);
-}
-
-/*
- * Insert rq into dispatch queue of q.  Queue lock must be held on
- * entry.  If sort != 0, rq is sort-inserted; otherwise, rq will be
- * appended to the dispatch queue.  To be used by specific elevators.
- */
-void elv_dispatch_sort(request_queue_t *q, struct request *rq)
-{
-       sector_t boundary;
-       struct list_head *entry;
-
-       if (q->last_merge == rq)
-               q->last_merge = NULL;
-
-       boundary = q->end_sector;
-
-       list_for_each_prev(entry, &q->queue_head) {
-               struct request *pos = list_entry_rq(entry);
-
-               if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
-                       break;
-               if (rq->sector >= boundary) {
-                       if (pos->sector < boundary)
-                               continue;
-               } else {
-                       if (pos->sector >= boundary)
-                               break;
-               }
-               if (rq->sector >= pos->sector)
-                       break;
-       }
-
-       list_add(&rq->queuelist, entry);
-}
-
-int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
-{
-       elevator_t *e = q->elevator;
-       int ret;
-
-       if (q->last_merge) {
-               ret = elv_try_merge(q->last_merge, bio);
-               if (ret != ELEVATOR_NO_MERGE) {
-                       *req = q->last_merge;
-                       return ret;
-               }
-       }
-
-       if (e->ops->elevator_merge_fn)
-               return e->ops->elevator_merge_fn(q, req, bio);
-
-       return ELEVATOR_NO_MERGE;
-}
-
-void elv_merged_request(request_queue_t *q, struct request *rq)
-{
-       elevator_t *e = q->elevator;
-
-       if (e->ops->elevator_merged_fn)
-               e->ops->elevator_merged_fn(q, rq);
-
-       q->last_merge = rq;
-}
-
-void elv_merge_requests(request_queue_t *q, struct request *rq,
-                            struct request *next)
-{
-       elevator_t *e = q->elevator;
-
-       if (e->ops->elevator_merge_req_fn)
-               e->ops->elevator_merge_req_fn(q, rq, next);
-
-       q->last_merge = rq;
-}
-
-void elv_requeue_request(request_queue_t *q, struct request *rq)
-{
-       elevator_t *e = q->elevator;
-
-       /*
-        * it already went through dequeue, we need to decrement the
-        * in_flight count again
-        */
-       if (blk_account_rq(rq)) {
-               q->in_flight--;
-               if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
-                       e->ops->elevator_deactivate_req_fn(q, rq);
-       }
-
-       rq->flags &= ~REQ_STARTED;
-
-       /*
-        * if this is the flush, requeue the original instead and drop the flush
-        */
-       if (rq->flags & REQ_BAR_FLUSH) {
-               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-               rq = rq->end_io_data;
-       }
-
-       __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
-}
-
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
-                      int plug)
-{
-       if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
-               /*
-                * barriers implicitly indicate back insertion
-                */
-               if (where == ELEVATOR_INSERT_SORT)
-                       where = ELEVATOR_INSERT_BACK;
-
-               /*
-                * this request is scheduling boundary, update end_sector
-                */
-               if (blk_fs_request(rq)) {
-                       q->end_sector = rq_end_sector(rq);
-                       q->boundary_rq = rq;
-               }
-       } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
-               where = ELEVATOR_INSERT_BACK;
-
-       if (plug)
-               blk_plug_device(q);
-
-       rq->q = q;
-
-       switch (where) {
-       case ELEVATOR_INSERT_FRONT:
-               rq->flags |= REQ_SOFTBARRIER;
-
-               list_add(&rq->queuelist, &q->queue_head);
-               break;
-
-       case ELEVATOR_INSERT_BACK:
-               rq->flags |= REQ_SOFTBARRIER;
-
-               while (q->elevator->ops->elevator_dispatch_fn(q, 1))
-                       ;
-               list_add_tail(&rq->queuelist, &q->queue_head);
-               /*
-                * We kick the queue here for the following reasons.
-                * - The elevator might have returned NULL previously
-                *   to delay requests and returned them now.  As the
-                *   queue wasn't empty before this request, ll_rw_blk
-                *   won't run the queue on return, resulting in hang.
-                * - Usually, back inserted requests won't be merged
-                *   with anything.  There's no point in delaying queue
-                *   processing.
-                */
-               blk_remove_plug(q);
-               q->request_fn(q);
-               break;
-
-       case ELEVATOR_INSERT_SORT:
-               BUG_ON(!blk_fs_request(rq));
-               rq->flags |= REQ_SORTED;
-               if (q->last_merge == NULL && rq_mergeable(rq))
-                       q->last_merge = rq;
-               /*
-                * Some ioscheds (cfq) run q->request_fn directly, so
-                * rq cannot be accessed after calling
-                * elevator_add_req_fn.
-                */
-               q->elevator->ops->elevator_add_req_fn(q, rq);
-               break;
-
-       default:
-               printk(KERN_ERR "%s: bad insertion point %d\n",
-                      __FUNCTION__, where);
-               BUG();
-       }
-
-       if (blk_queue_plugged(q)) {
-               int nrq = q->rq.count[READ] + q->rq.count[WRITE]
-                       - q->in_flight;
-
-               if (nrq >= q->unplug_thresh)
-                       __generic_unplug_device(q);
-       }
-}
-
-void elv_add_request(request_queue_t *q, struct request *rq, int where,
-                    int plug)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       __elv_add_request(q, rq, where, plug);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static inline struct request *__elv_next_request(request_queue_t *q)
-{
-       struct request *rq;
-
-       if (unlikely(list_empty(&q->queue_head) &&
-                    !q->elevator->ops->elevator_dispatch_fn(q, 0)))
-               return NULL;
-
-       rq = list_entry_rq(q->queue_head.next);
-
-       /*
-        * if this is a barrier write and the device has to issue a
-        * flush sequence to support it, check how far we are
-        */
-       if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
-               BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
-
-               if (q->ordered == QUEUE_ORDERED_FLUSH &&
-                   !blk_barrier_preflush(rq))
-                       rq = blk_start_pre_flush(q, rq);
-       }
-
-       return rq;
-}
-
-struct request *elv_next_request(request_queue_t *q)
-{
-       struct request *rq;
-       int ret;
-
-       while ((rq = __elv_next_request(q)) != NULL) {
-               if (!(rq->flags & REQ_STARTED)) {
-                       elevator_t *e = q->elevator;
-
-                       /*
-                        * This is the first time the device driver
-                        * sees this request (possibly after
-                        * requeueing).  Notify IO scheduler.
-                        */
-                       if (blk_sorted_rq(rq) &&
-                           e->ops->elevator_activate_req_fn)
-                               e->ops->elevator_activate_req_fn(q, rq);
-
-                       /*
-                        * just mark as started even if we don't start
-                        * it, a request that has been delayed should
-                        * not be passed by new incoming requests
-                        */
-                       rq->flags |= REQ_STARTED;
-               }
-
-               if (!q->boundary_rq || q->boundary_rq == rq) {
-                       q->end_sector = rq_end_sector(rq);
-                       q->boundary_rq = NULL;
-               }
-
-               if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
-                       break;
-
-               ret = q->prep_rq_fn(q, rq);
-               if (ret == BLKPREP_OK) {
-                       break;
-               } else if (ret == BLKPREP_DEFER) {
-                       /*
-                        * the request may have been (partially) prepped.
-                        * we need to keep this request in the front to
-                        * avoid resource deadlock.  REQ_STARTED will
-                        * prevent other fs requests from passing this one.
-                        */
-                       rq = NULL;
-                       break;
-               } else if (ret == BLKPREP_KILL) {
-                       int nr_bytes = rq->hard_nr_sectors << 9;
-
-                       if (!nr_bytes)
-                               nr_bytes = rq->data_len;
-
-                       blkdev_dequeue_request(rq);
-                       rq->flags |= REQ_QUIET;
-                       end_that_request_chunk(rq, 0, nr_bytes);
-                       end_that_request_last(rq);
-               } else {
-                       printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
-                                                               ret);
-                       break;
-               }
-       }
-
-       return rq;
-}
-
-void elv_dequeue_request(request_queue_t *q, struct request *rq)
-{
-       BUG_ON(list_empty(&rq->queuelist));
-
-       list_del_init(&rq->queuelist);
-
-       /*
-        * the time frame between a request being removed from the lists
-        * and to it is freed is accounted as io that is in progress at
-        * the driver side.
-        */
-       if (blk_account_rq(rq))
-               q->in_flight++;
-}
-
-int elv_queue_empty(request_queue_t *q)
-{
-       elevator_t *e = q->elevator;
-
-       if (!list_empty(&q->queue_head))
-               return 0;
-
-       if (e->ops->elevator_queue_empty_fn)
-               return e->ops->elevator_queue_empty_fn(q);
-
-       return 1;
-}
-
-struct request *elv_latter_request(request_queue_t *q, struct request *rq)
-{
-       struct list_head *next;
-
-       elevator_t *e = q->elevator;
-
-       if (e->ops->elevator_latter_req_fn)
-               return e->ops->elevator_latter_req_fn(q, rq);
-
-       next = rq->queuelist.next;
-       if (next != &q->queue_head && next != &rq->queuelist)
-               return list_entry_rq(next);
-
-       return NULL;
-}
-
-struct request *elv_former_request(request_queue_t *q, struct request *rq)
-{
-       struct list_head *prev;
-
-       elevator_t *e = q->elevator;
-
-       if (e->ops->elevator_former_req_fn)
-               return e->ops->elevator_former_req_fn(q, rq);
-
-       prev = rq->queuelist.prev;
-       if (prev != &q->queue_head && prev != &rq->queuelist)
-               return list_entry_rq(prev);
-
-       return NULL;
-}
-
-int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
-                   gfp_t gfp_mask)
-{
-       elevator_t *e = q->elevator;
-
-       if (e->ops->elevator_set_req_fn)
-               return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
-
-       rq->elevator_private = NULL;
-       return 0;
-}
-
-void elv_put_request(request_queue_t *q, struct request *rq)
-{
-       elevator_t *e = q->elevator;
-
-       if (e->ops->elevator_put_req_fn)
-               e->ops->elevator_put_req_fn(q, rq);
-}
-
-int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
-{
-       elevator_t *e = q->elevator;
-
-       if (e->ops->elevator_may_queue_fn)
-               return e->ops->elevator_may_queue_fn(q, rw, bio);
-
-       return ELV_MQUEUE_MAY;
-}
-
-void elv_completed_request(request_queue_t *q, struct request *rq)
-{
-       elevator_t *e = q->elevator;
-
-       /*
-        * request is released from the driver, io must be done
-        */
-       if (blk_account_rq(rq)) {
-               q->in_flight--;
-               if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
-                       e->ops->elevator_completed_req_fn(q, rq);
-       }
-}
-
-int elv_register_queue(struct request_queue *q)
-{
-       elevator_t *e = q->elevator;
-
-       e->kobj.parent = kobject_get(&q->kobj);
-       if (!e->kobj.parent)
-               return -EBUSY;
-
-       snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
-       e->kobj.ktype = e->elevator_type->elevator_ktype;
-
-       return kobject_register(&e->kobj);
-}
-
-void elv_unregister_queue(struct request_queue *q)
-{
-       if (q) {
-               elevator_t *e = q->elevator;
-               kobject_unregister(&e->kobj);
-               kobject_put(&q->kobj);
-       }
-}
-
-int elv_register(struct elevator_type *e)
-{
-       spin_lock_irq(&elv_list_lock);
-       if (elevator_find(e->elevator_name))
-               BUG();
-       list_add_tail(&e->list, &elv_list);
-       spin_unlock_irq(&elv_list_lock);
-
-       printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
-       if (!strcmp(e->elevator_name, chosen_elevator))
-               printk(" (default)");
-       printk("\n");
-       return 0;
-}
-EXPORT_SYMBOL_GPL(elv_register);
-
-void elv_unregister(struct elevator_type *e)
-{
-       struct task_struct *g, *p;
-
-       /*
-        * Iterate every thread in the process to remove the io contexts.
-        */
-       read_lock(&tasklist_lock);
-       do_each_thread(g, p) {
-               struct io_context *ioc = p->io_context;
-               if (ioc && ioc->cic) {
-                       ioc->cic->exit(ioc->cic);
-                       ioc->cic->dtor(ioc->cic);
-                       ioc->cic = NULL;
-               }
-               if (ioc && ioc->aic) {
-                       ioc->aic->exit(ioc->aic);
-                       ioc->aic->dtor(ioc->aic);
-                       ioc->aic = NULL;
-               }
-       } while_each_thread(g, p);
-       read_unlock(&tasklist_lock);
-
-       spin_lock_irq(&elv_list_lock);
-       list_del_init(&e->list);
-       spin_unlock_irq(&elv_list_lock);
-}
-EXPORT_SYMBOL_GPL(elv_unregister);
-
-/*
- * switch to new_e io scheduler. be careful not to introduce deadlocks -
- * we don't free the old io scheduler, before we have allocated what we
- * need for the new one. this way we have a chance of going back to the old
- * one, if the new one fails init for some reason.
- */
-static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
-{
-       elevator_t *old_elevator, *e;
-
-       /*
-        * Allocate new elevator
-        */
-       e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
-       if (!e)
-               goto error;
-
-       /*
-        * Turn on BYPASS and drain all requests w/ elevator private data
-        */
-       spin_lock_irq(q->queue_lock);
-
-       set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-
-       while (q->elevator->ops->elevator_dispatch_fn(q, 1))
-               ;
-
-       while (q->rq.elvpriv) {
-               spin_unlock_irq(q->queue_lock);
-               msleep(10);
-               spin_lock_irq(q->queue_lock);
-       }
-
-       spin_unlock_irq(q->queue_lock);
-
-       /*
-        * unregister old elevator data
-        */
-       elv_unregister_queue(q);
-       old_elevator = q->elevator;
-
-       /*
-        * attach and start new elevator
-        */
-       if (elevator_attach(q, new_e, e))
-               goto fail;
-
-       if (elv_register_queue(q))
-               goto fail_register;
-
-       /*
-        * finally exit old elevator and turn off BYPASS.
-        */
-       elevator_exit(old_elevator);
-       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-       return;
-
-fail_register:
-       /*
-        * switch failed, exit the new io scheduler and reattach the old
-        * one again (along with re-adding the sysfs dir)
-        */
-       elevator_exit(e);
-       e = NULL;
-fail:
-       q->elevator = old_elevator;
-       elv_register_queue(q);
-       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-       kfree(e);
-error:
-       elevator_put(new_e);
-       printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
-}
-
-ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
-{
-       char elevator_name[ELV_NAME_MAX];
-       struct elevator_type *e;
-
-       memset(elevator_name, 0, sizeof(elevator_name));
-       strncpy(elevator_name, name, sizeof(elevator_name));
-
-       if (elevator_name[strlen(elevator_name) - 1] == '\n')
-               elevator_name[strlen(elevator_name) - 1] = '\0';
-
-       e = elevator_get(elevator_name);
-       if (!e) {
-               printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
-               return -EINVAL;
-       }
-
-       if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
-               elevator_put(e);
-               return count;
-       }
-
-       elevator_switch(q, e);
-       return count;
-}
-
-ssize_t elv_iosched_show(request_queue_t *q, char *name)
-{
-       elevator_t *e = q->elevator;
-       struct elevator_type *elv = e->elevator_type;
-       struct list_head *entry;
-       int len = 0;
-
-       spin_lock_irq(q->queue_lock);
-       list_for_each(entry, &elv_list) {
-               struct elevator_type *__e;
-
-               __e = list_entry(entry, struct elevator_type, list);
-               if (!strcmp(elv->elevator_name, __e->elevator_name))
-                       len += sprintf(name+len, "[%s] ", elv->elevator_name);
-               else
-                       len += sprintf(name+len, "%s ", __e->elevator_name);
-       }
-       spin_unlock_irq(q->queue_lock);
-
-       len += sprintf(len+name, "\n");
-       return len;
-}
-
-EXPORT_SYMBOL(elv_dispatch_sort);
-EXPORT_SYMBOL(elv_add_request);
-EXPORT_SYMBOL(__elv_add_request);
-EXPORT_SYMBOL(elv_requeue_request);
-EXPORT_SYMBOL(elv_next_request);
-EXPORT_SYMBOL(elv_dequeue_request);
-EXPORT_SYMBOL(elv_queue_empty);
-EXPORT_SYMBOL(elv_completed_request);
-EXPORT_SYMBOL(elevator_exit);
-EXPORT_SYMBOL(elevator_init);
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
deleted file mode 100644 (file)
index 54aec4a..0000000
+++ /dev/null
@@ -1,726 +0,0 @@
-/*
- *  gendisk handling
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/genhd.h>
-#include <linux/kernel.h>
-#include <linux/blkdev.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/kmod.h>
-#include <linux/kobj_map.h>
-#include <linux/buffer_head.h>
-
-#define MAX_PROBE_HASH 255     /* random */
-
-static struct subsystem block_subsys;
-
-static DECLARE_MUTEX(block_subsys_sem);
-
-/*
- * Can be deleted altogether. Later.
- *
- */
-static struct blk_major_name {
-       struct blk_major_name *next;
-       int major;
-       char name[16];
-} *major_names[MAX_PROBE_HASH];
-
-/* index in the above - for now: assume no multimajor ranges */
-static inline int major_to_index(int major)
-{
-       return major % MAX_PROBE_HASH;
-}
-
-#ifdef CONFIG_PROC_FS
-/* get block device names in somewhat random order */
-int get_blkdev_list(char *p, int used)
-{
-       struct blk_major_name *n;
-       int i, len;
-
-       len = snprintf(p, (PAGE_SIZE-used), "\nBlock devices:\n");
-
-       down(&block_subsys_sem);
-       for (i = 0; i < ARRAY_SIZE(major_names); i++) {
-               for (n = major_names[i]; n; n = n->next) {
-                       /*
-                        * If the curent string plus the 5 extra characters
-                        * in the line would run us off the page, then we're done
-                        */
-                       if ((len + used + strlen(n->name) + 5) >= PAGE_SIZE)
-                               goto page_full;
-                       len += sprintf(p+len, "%3d %s\n",
-                                      n->major, n->name);
-               }
-       }
-page_full:
-       up(&block_subsys_sem);
-
-       return len;
-}
-#endif
-
-int register_blkdev(unsigned int major, const char *name)
-{
-       struct blk_major_name **n, *p;
-       int index, ret = 0;
-
-       down(&block_subsys_sem);
-
-       /* temporary */
-       if (major == 0) {
-               for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
-                       if (major_names[index] == NULL)
-                               break;
-               }
-
-               if (index == 0) {
-                       printk("register_blkdev: failed to get major for %s\n",
-                              name);
-                       ret = -EBUSY;
-                       goto out;
-               }
-               major = index;
-               ret = major;
-       }
-
-       p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
-       if (p == NULL) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       p->major = major;
-       strlcpy(p->name, name, sizeof(p->name));
-       p->next = NULL;
-       index = major_to_index(major);
-
-       for (n = &major_names[index]; *n; n = &(*n)->next) {
-               if ((*n)->major == major)
-                       break;
-       }
-       if (!*n)
-               *n = p;
-       else
-               ret = -EBUSY;
-
-       if (ret < 0) {
-               printk("register_blkdev: cannot get major %d for %s\n",
-                      major, name);
-               kfree(p);
-       }
-out:
-       up(&block_subsys_sem);
-       return ret;
-}
-
-EXPORT_SYMBOL(register_blkdev);
-
-/* todo: make void - error printk here */
-int unregister_blkdev(unsigned int major, const char *name)
-{
-       struct blk_major_name **n;
-       struct blk_major_name *p = NULL;
-       int index = major_to_index(major);
-       int ret = 0;
-
-       down(&block_subsys_sem);
-       for (n = &major_names[index]; *n; n = &(*n)->next)
-               if ((*n)->major == major)
-                       break;
-       if (!*n || strcmp((*n)->name, name))
-               ret = -EINVAL;
-       else {
-               p = *n;
-               *n = p->next;
-       }
-       up(&block_subsys_sem);
-       kfree(p);
-
-       return ret;
-}
-
-EXPORT_SYMBOL(unregister_blkdev);
-
-static struct kobj_map *bdev_map;
-
-/*
- * Register device numbers dev..(dev+range-1)
- * range must be nonzero
- * The hash chain is sorted on range, so that subranges can override.
- */
-void blk_register_region(dev_t dev, unsigned long range, struct module *module,
-                        struct kobject *(*probe)(dev_t, int *, void *),
-                        int (*lock)(dev_t, void *), void *data)
-{
-       kobj_map(bdev_map, dev, range, module, probe, lock, data);
-}
-
-EXPORT_SYMBOL(blk_register_region);
-
-void blk_unregister_region(dev_t dev, unsigned long range)
-{
-       kobj_unmap(bdev_map, dev, range);
-}
-
-EXPORT_SYMBOL(blk_unregister_region);
-
-static struct kobject *exact_match(dev_t dev, int *part, void *data)
-{
-       struct gendisk *p = data;
-       return &p->kobj;
-}
-
-static int exact_lock(dev_t dev, void *data)
-{
-       struct gendisk *p = data;
-
-       if (!get_disk(p))
-               return -1;
-       return 0;
-}
-
-/**
- * add_disk - add partitioning information to kernel list
- * @disk: per-device partitioning information
- *
- * This function registers the partitioning information in @disk
- * with the kernel.
- */
-void add_disk(struct gendisk *disk)
-{
-       disk->flags |= GENHD_FL_UP;
-       blk_register_region(MKDEV(disk->major, disk->first_minor),
-                           disk->minors, NULL, exact_match, exact_lock, disk);
-       register_disk(disk);
-       blk_register_queue(disk);
-}
-
-EXPORT_SYMBOL(add_disk);
-EXPORT_SYMBOL(del_gendisk);    /* in partitions/check.c */
-
-void unlink_gendisk(struct gendisk *disk)
-{
-       blk_unregister_queue(disk);
-       blk_unregister_region(MKDEV(disk->major, disk->first_minor),
-                             disk->minors);
-}
-
-#define to_disk(obj) container_of(obj,struct gendisk,kobj)
-
-/**
- * get_gendisk - get partitioning information for a given device
- * @dev: device to get partitioning information for
- *
- * This function gets the structure containing partitioning
- * information for the given device @dev.
- */
-struct gendisk *get_gendisk(dev_t dev, int *part)
-{
-       struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
-       return  kobj ? to_disk(kobj) : NULL;
-}
-
-#ifdef CONFIG_PROC_FS
-/* iterator */
-static void *part_start(struct seq_file *part, loff_t *pos)
-{
-       struct list_head *p;
-       loff_t l = *pos;
-
-       down(&block_subsys_sem);
-       list_for_each(p, &block_subsys.kset.list)
-               if (!l--)
-                       return list_entry(p, struct gendisk, kobj.entry);
-       return NULL;
-}
-
-static void *part_next(struct seq_file *part, void *v, loff_t *pos)
-{
-       struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
-       ++*pos;
-       return p==&block_subsys.kset.list ? NULL : 
-               list_entry(p, struct gendisk, kobj.entry);
-}
-
-static void part_stop(struct seq_file *part, void *v)
-{
-       up(&block_subsys_sem);
-}
-
-static int show_partition(struct seq_file *part, void *v)
-{
-       struct gendisk *sgp = v;
-       int n;
-       char buf[BDEVNAME_SIZE];
-
-       if (&sgp->kobj.entry == block_subsys.kset.list.next)
-               seq_puts(part, "major minor  #blocks  name\n\n");
-
-       /* Don't show non-partitionable removeable devices or empty devices */
-       if (!get_capacity(sgp) ||
-                       (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
-               return 0;
-       if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
-               return 0;
-
-       /* show the full disk and all non-0 size partitions of it */
-       seq_printf(part, "%4d  %4d %10llu %s\n",
-               sgp->major, sgp->first_minor,
-               (unsigned long long)get_capacity(sgp) >> 1,
-               disk_name(sgp, 0, buf));
-       for (n = 0; n < sgp->minors - 1; n++) {
-               if (!sgp->part[n])
-                       continue;
-               if (sgp->part[n]->nr_sects == 0)
-                       continue;
-               seq_printf(part, "%4d  %4d %10llu %s\n",
-                       sgp->major, n + 1 + sgp->first_minor,
-                       (unsigned long long)sgp->part[n]->nr_sects >> 1 ,
-                       disk_name(sgp, n + 1, buf));
-       }
-
-       return 0;
-}
-
-struct seq_operations partitions_op = {
-       .start =part_start,
-       .next = part_next,
-       .stop = part_stop,
-       .show = show_partition
-};
-#endif
-
-
-extern int blk_dev_init(void);
-
-static struct kobject *base_probe(dev_t dev, int *part, void *data)
-{
-       if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
-               /* Make old-style 2.4 aliases work */
-               request_module("block-major-%d", MAJOR(dev));
-       return NULL;
-}
-
-static int __init genhd_device_init(void)
-{
-       bdev_map = kobj_map_init(base_probe, &block_subsys_sem);
-       blk_dev_init();
-       subsystem_register(&block_subsys);
-       return 0;
-}
-
-subsys_initcall(genhd_device_init);
-
-
-
-/*
- * kobject & sysfs bindings for block devices
- */
-static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
-                             char *page)
-{
-       struct gendisk *disk = to_disk(kobj);
-       struct disk_attribute *disk_attr =
-               container_of(attr,struct disk_attribute,attr);
-       ssize_t ret = -EIO;
-
-       if (disk_attr->show)
-               ret = disk_attr->show(disk,page);
-       return ret;
-}
-
-static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr,
-                              const char *page, size_t count)
-{
-       struct gendisk *disk = to_disk(kobj);
-       struct disk_attribute *disk_attr =
-               container_of(attr,struct disk_attribute,attr);
-       ssize_t ret = 0;
-
-       if (disk_attr->store)
-               ret = disk_attr->store(disk, page, count);
-       return ret;
-}
-
-static struct sysfs_ops disk_sysfs_ops = {
-       .show   = &disk_attr_show,
-       .store  = &disk_attr_store,
-};
-
-static ssize_t disk_uevent_store(struct gendisk * disk,
-                                const char *buf, size_t count)
-{
-       kobject_hotplug(&disk->kobj, KOBJ_ADD);
-       return count;
-}
-static ssize_t disk_dev_read(struct gendisk * disk, char *page)
-{
-       dev_t base = MKDEV(disk->major, disk->first_minor); 
-       return print_dev_t(page, base);
-}
-static ssize_t disk_range_read(struct gendisk * disk, char *page)
-{
-       return sprintf(page, "%d\n", disk->minors);
-}
-static ssize_t disk_removable_read(struct gendisk * disk, char *page)
-{
-       return sprintf(page, "%d\n",
-                      (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
-
-}
-static ssize_t disk_size_read(struct gendisk * disk, char *page)
-{
-       return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
-}
-
-static ssize_t disk_stats_read(struct gendisk * disk, char *page)
-{
-       preempt_disable();
-       disk_round_stats(disk);
-       preempt_enable();
-       return sprintf(page,
-               "%8u %8u %8llu %8u "
-               "%8u %8u %8llu %8u "
-               "%8u %8u %8u"
-               "\n",
-               disk_stat_read(disk, ios[0]), disk_stat_read(disk, merges[0]),
-               (unsigned long long)disk_stat_read(disk, sectors[0]),
-               jiffies_to_msecs(disk_stat_read(disk, ticks[0])),
-               disk_stat_read(disk, ios[1]), disk_stat_read(disk, merges[1]),
-               (unsigned long long)disk_stat_read(disk, sectors[1]),
-               jiffies_to_msecs(disk_stat_read(disk, ticks[1])),
-               disk->in_flight,
-               jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
-               jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
-}
-static struct disk_attribute disk_attr_uevent = {
-       .attr = {.name = "uevent", .mode = S_IWUSR },
-       .store  = disk_uevent_store
-};
-static struct disk_attribute disk_attr_dev = {
-       .attr = {.name = "dev", .mode = S_IRUGO },
-       .show   = disk_dev_read
-};
-static struct disk_attribute disk_attr_range = {
-       .attr = {.name = "range", .mode = S_IRUGO },
-       .show   = disk_range_read
-};
-static struct disk_attribute disk_attr_removable = {
-       .attr = {.name = "removable", .mode = S_IRUGO },
-       .show   = disk_removable_read
-};
-static struct disk_attribute disk_attr_size = {
-       .attr = {.name = "size", .mode = S_IRUGO },
-       .show   = disk_size_read
-};
-static struct disk_attribute disk_attr_stat = {
-       .attr = {.name = "stat", .mode = S_IRUGO },
-       .show   = disk_stats_read
-};
-
-static struct attribute * default_attrs[] = {
-       &disk_attr_uevent.attr,
-       &disk_attr_dev.attr,
-       &disk_attr_range.attr,
-       &disk_attr_removable.attr,
-       &disk_attr_size.attr,
-       &disk_attr_stat.attr,
-       NULL,
-};
-
-static void disk_release(struct kobject * kobj)
-{
-       struct gendisk *disk = to_disk(kobj);
-       kfree(disk->random);
-       kfree(disk->part);
-       free_disk_stats(disk);
-       kfree(disk);
-}
-
-static struct kobj_type ktype_block = {
-       .release        = disk_release,
-       .sysfs_ops      = &disk_sysfs_ops,
-       .default_attrs  = default_attrs,
-};
-
-extern struct kobj_type ktype_part;
-
-static int block_hotplug_filter(struct kset *kset, struct kobject *kobj)
-{
-       struct kobj_type *ktype = get_ktype(kobj);
-
-       return ((ktype == &ktype_block) || (ktype == &ktype_part));
-}
-
-static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
-                        int num_envp, char *buffer, int buffer_size)
-{
-       struct kobj_type *ktype = get_ktype(kobj);
-       struct device *physdev;
-       struct gendisk *disk;
-       struct hd_struct *part;
-       int length = 0;
-       int i = 0;
-
-       if (ktype == &ktype_block) {
-               disk = container_of(kobj, struct gendisk, kobj);
-               add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
-                                   &length, "MINOR=%u", disk->first_minor);
-       } else if (ktype == &ktype_part) {
-               disk = container_of(kobj->parent, struct gendisk, kobj);
-               part = container_of(kobj, struct hd_struct, kobj);
-               add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
-                                   &length, "MINOR=%u",
-                                   disk->first_minor + part->partno);
-       } else
-               return 0;
-
-       add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, &length,
-                           "MAJOR=%u", disk->major);
-
-       /* add physical device, backing this device  */
-       physdev = disk->driverfs_dev;
-       if (physdev) {
-               char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
-
-               add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
-                                   &length, "PHYSDEVPATH=%s", path);
-               kfree(path);
-
-               if (physdev->bus)
-                       add_hotplug_env_var(envp, num_envp, &i,
-                                           buffer, buffer_size, &length,
-                                           "PHYSDEVBUS=%s",
-                                           physdev->bus->name);
-
-               if (physdev->driver)
-                       add_hotplug_env_var(envp, num_envp, &i,
-                                           buffer, buffer_size, &length,
-                                           "PHYSDEVDRIVER=%s",
-                                           physdev->driver->name);
-       }
-
-       /* terminate, set to next free slot, shrink available space */
-       envp[i] = NULL;
-       envp = &envp[i];
-       num_envp -= i;
-       buffer = &buffer[length];
-       buffer_size -= length;
-
-       return 0;
-}
-
-static struct kset_hotplug_ops block_hotplug_ops = {
-       .filter         = block_hotplug_filter,
-       .hotplug        = block_hotplug,
-};
-
-/* declare block_subsys. */
-static decl_subsys(block, &ktype_block, &block_hotplug_ops);
-
-
-/*
- * aggregate disk stat collector.  Uses the same stats that the sysfs
- * entries do, above, but makes them available through one seq_file.
- * Watching a few disks may be efficient through sysfs, but watching
- * all of them will be more efficient through this interface.
- *
- * The output looks suspiciously like /proc/partitions with a bunch of
- * extra fields.
- */
-
-/* iterator */
-static void *diskstats_start(struct seq_file *part, loff_t *pos)
-{
-       loff_t k = *pos;
-       struct list_head *p;
-
-       down(&block_subsys_sem);
-       list_for_each(p, &block_subsys.kset.list)
-               if (!k--)
-                       return list_entry(p, struct gendisk, kobj.entry);
-       return NULL;
-}
-
-static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
-{
-       struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
-       ++*pos;
-       return p==&block_subsys.kset.list ? NULL :
-               list_entry(p, struct gendisk, kobj.entry);
-}
-
-static void diskstats_stop(struct seq_file *part, void *v)
-{
-       up(&block_subsys_sem);
-}
-
-static int diskstats_show(struct seq_file *s, void *v)
-{
-       struct gendisk *gp = v;
-       char buf[BDEVNAME_SIZE];
-       int n = 0;
-
-       /*
-       if (&sgp->kobj.entry == block_subsys.kset.list.next)
-               seq_puts(s,     "major minor name"
-                               "     rio rmerge rsect ruse wio wmerge "
-                               "wsect wuse running use aveq"
-                               "\n\n");
-       */
-       preempt_disable();
-       disk_round_stats(gp);
-       preempt_enable();
-       seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n",
-               gp->major, n + gp->first_minor, disk_name(gp, n, buf),
-               disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]),
-               (unsigned long long)disk_stat_read(gp, sectors[0]),
-               jiffies_to_msecs(disk_stat_read(gp, ticks[0])),
-               disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]),
-               (unsigned long long)disk_stat_read(gp, sectors[1]),
-               jiffies_to_msecs(disk_stat_read(gp, ticks[1])),
-               gp->in_flight,
-               jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
-               jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
-
-       /* now show all non-0 size partitions of it */
-       for (n = 0; n < gp->minors - 1; n++) {
-               struct hd_struct *hd = gp->part[n];
-
-               if (hd && hd->nr_sects)
-                       seq_printf(s, "%4d %4d %s %u %u %u %u\n",
-                               gp->major, n + gp->first_minor + 1,
-                               disk_name(gp, n + 1, buf),
-                               hd->ios[0], hd->sectors[0],
-                               hd->ios[1], hd->sectors[1]);
-       }
-       return 0;
-}
-
-struct seq_operations diskstats_op = {
-       .start  = diskstats_start,
-       .next   = diskstats_next,
-       .stop   = diskstats_stop,
-       .show   = diskstats_show
-};
-
-struct gendisk *alloc_disk(int minors)
-{
-       return alloc_disk_node(minors, -1);
-}
-
-struct gendisk *alloc_disk_node(int minors, int node_id)
-{
-       struct gendisk *disk;
-
-       disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
-       if (disk) {
-               memset(disk, 0, sizeof(struct gendisk));
-               if (!init_disk_stats(disk)) {
-                       kfree(disk);
-                       return NULL;
-               }
-               if (minors > 1) {
-                       int size = (minors - 1) * sizeof(struct hd_struct *);
-                       disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
-                       if (!disk->part) {
-                               kfree(disk);
-                               return NULL;
-                       }
-                       memset(disk->part, 0, size);
-               }
-               disk->minors = minors;
-               kobj_set_kset_s(disk,block_subsys);
-               kobject_init(&disk->kobj);
-               rand_initialize_disk(disk);
-       }
-       return disk;
-}
-
-EXPORT_SYMBOL(alloc_disk);
-EXPORT_SYMBOL(alloc_disk_node);
-
-struct kobject *get_disk(struct gendisk *disk)
-{
-       struct module *owner;
-       struct kobject *kobj;
-
-       if (!disk->fops)
-               return NULL;
-       owner = disk->fops->owner;
-       if (owner && !try_module_get(owner))
-               return NULL;
-       kobj = kobject_get(&disk->kobj);
-       if (kobj == NULL) {
-               module_put(owner);
-               return NULL;
-       }
-       return kobj;
-
-}
-
-EXPORT_SYMBOL(get_disk);
-
-void put_disk(struct gendisk *disk)
-{
-       if (disk)
-               kobject_put(&disk->kobj);
-}
-
-EXPORT_SYMBOL(put_disk);
-
-void set_device_ro(struct block_device *bdev, int flag)
-{
-       if (bdev->bd_contains != bdev)
-               bdev->bd_part->policy = flag;
-       else
-               bdev->bd_disk->policy = flag;
-}
-
-EXPORT_SYMBOL(set_device_ro);
-
-void set_disk_ro(struct gendisk *disk, int flag)
-{
-       int i;
-       disk->policy = flag;
-       for (i = 0; i < disk->minors - 1; i++)
-               if (disk->part[i]) disk->part[i]->policy = flag;
-}
-
-EXPORT_SYMBOL(set_disk_ro);
-
-int bdev_read_only(struct block_device *bdev)
-{
-       if (!bdev)
-               return 0;
-       else if (bdev->bd_contains != bdev)
-               return bdev->bd_part->policy;
-       else
-               return bdev->bd_disk->policy;
-}
-
-EXPORT_SYMBOL(bdev_read_only);
-
-int invalidate_partition(struct gendisk *disk, int index)
-{
-       int res = 0;
-       struct block_device *bdev = bdget_disk(disk, index);
-       if (bdev) {
-               fsync_bdev(bdev);
-               res = __invalidate_device(bdev);
-               bdput(bdev);
-       }
-       return res;
-}
-
-EXPORT_SYMBOL(invalidate_partition);
diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
deleted file mode 100644 (file)
index 6e27847..0000000
+++ /dev/null
@@ -1,275 +0,0 @@
-#include <linux/sched.h>               /* for capable() */
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/backing-dev.h>
-#include <linux/buffer_head.h>
-#include <linux/smp_lock.h>
-#include <asm/uaccess.h>
-
-static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
-{
-       struct block_device *bdevp;
-       struct gendisk *disk;
-       struct blkpg_ioctl_arg a;
-       struct blkpg_partition p;
-       long long start, length;
-       int part;
-       int i;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-       if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
-               return -EFAULT;
-       if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
-               return -EFAULT;
-       disk = bdev->bd_disk;
-       if (bdev != bdev->bd_contains)
-               return -EINVAL;
-       part = p.pno;
-       if (part <= 0 || part >= disk->minors)
-               return -EINVAL;
-       switch (a.op) {
-               case BLKPG_ADD_PARTITION:
-                       start = p.start >> 9;
-                       length = p.length >> 9;
-                       /* check for fit in a hd_struct */ 
-                       if (sizeof(sector_t) == sizeof(long) && 
-                           sizeof(long long) > sizeof(long)) {
-                               long pstart = start, plength = length;
-                               if (pstart != start || plength != length
-                                   || pstart < 0 || plength < 0)
-                                       return -EINVAL;
-                       }
-                       /* partition number in use? */
-                       down(&bdev->bd_sem);
-                       if (disk->part[part - 1]) {
-                               up(&bdev->bd_sem);
-                               return -EBUSY;
-                       }
-                       /* overlap? */
-                       for (i = 0; i < disk->minors - 1; i++) {
-                               struct hd_struct *s = disk->part[i];
-
-                               if (!s)
-                                       continue;
-                               if (!(start+length <= s->start_sect ||
-                                     start >= s->start_sect + s->nr_sects)) {
-                                       up(&bdev->bd_sem);
-                                       return -EBUSY;
-                               }
-                       }
-                       /* all seems OK */
-                       add_partition(disk, part, start, length);
-                       up(&bdev->bd_sem);
-                       return 0;
-               case BLKPG_DEL_PARTITION:
-                       if (!disk->part[part-1])
-                               return -ENXIO;
-                       if (disk->part[part - 1]->nr_sects == 0)
-                               return -ENXIO;
-                       bdevp = bdget_disk(disk, part);
-                       if (!bdevp)
-                               return -ENOMEM;
-                       down(&bdevp->bd_sem);
-                       if (bdevp->bd_openers) {
-                               up(&bdevp->bd_sem);
-                               bdput(bdevp);
-                               return -EBUSY;
-                       }
-                       /* all seems OK */
-                       fsync_bdev(bdevp);
-                       invalidate_bdev(bdevp, 0);
-
-                       down(&bdev->bd_sem);
-                       delete_partition(disk, part);
-                       up(&bdev->bd_sem);
-                       up(&bdevp->bd_sem);
-                       bdput(bdevp);
-
-                       return 0;
-               default:
-                       return -EINVAL;
-       }
-}
-
-static int blkdev_reread_part(struct block_device *bdev)
-{
-       struct gendisk *disk = bdev->bd_disk;
-       int res;
-
-       if (disk->minors == 1 || bdev != bdev->bd_contains)
-               return -EINVAL;
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-       if (down_trylock(&bdev->bd_sem))
-               return -EBUSY;
-       res = rescan_partitions(disk, bdev);
-       up(&bdev->bd_sem);
-       return res;
-}
-
-static int put_ushort(unsigned long arg, unsigned short val)
-{
-       return put_user(val, (unsigned short __user *)arg);
-}
-
-static int put_int(unsigned long arg, int val)
-{
-       return put_user(val, (int __user *)arg);
-}
-
-static int put_long(unsigned long arg, long val)
-{
-       return put_user(val, (long __user *)arg);
-}
-
-static int put_ulong(unsigned long arg, unsigned long val)
-{
-       return put_user(val, (unsigned long __user *)arg);
-}
-
-static int put_u64(unsigned long arg, u64 val)
-{
-       return put_user(val, (u64 __user *)arg);
-}
-
-static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev,
-                               unsigned cmd, unsigned long arg)
-{
-       struct backing_dev_info *bdi;
-       int ret, n;
-
-       switch (cmd) {
-       case BLKRAGET:
-       case BLKFRAGET:
-               if (!arg)
-                       return -EINVAL;
-               bdi = blk_get_backing_dev_info(bdev);
-               if (bdi == NULL)
-                       return -ENOTTY;
-               return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
-       case BLKROGET:
-               return put_int(arg, bdev_read_only(bdev) != 0);
-       case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
-               return put_int(arg, block_size(bdev));
-       case BLKSSZGET: /* get block device hardware sector size */
-               return put_int(arg, bdev_hardsect_size(bdev));
-       case BLKSECTGET:
-               return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
-       case BLKRASET:
-       case BLKFRASET:
-               if(!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-               bdi = blk_get_backing_dev_info(bdev);
-               if (bdi == NULL)
-                       return -ENOTTY;
-               bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
-               return 0;
-       case BLKBSZSET:
-               /* set the logical block size */
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-               if (!arg)
-                       return -EINVAL;
-               if (get_user(n, (int __user *) arg))
-                       return -EFAULT;
-               if (bd_claim(bdev, file) < 0)
-                       return -EBUSY;
-               ret = set_blocksize(bdev, n);
-               bd_release(bdev);
-               return ret;
-       case BLKPG:
-               return blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
-       case BLKRRPART:
-               return blkdev_reread_part(bdev);
-       case BLKGETSIZE:
-               if ((bdev->bd_inode->i_size >> 9) > ~0UL)
-                       return -EFBIG;
-               return put_ulong(arg, bdev->bd_inode->i_size >> 9);
-       case BLKGETSIZE64:
-               return put_u64(arg, bdev->bd_inode->i_size);
-       }
-       return -ENOIOCTLCMD;
-}
-
-static int blkdev_driver_ioctl(struct inode *inode, struct file *file,
-               struct gendisk *disk, unsigned cmd, unsigned long arg)
-{
-       int ret;
-       if (disk->fops->unlocked_ioctl)
-               return disk->fops->unlocked_ioctl(file, cmd, arg);
-
-       if (disk->fops->ioctl) {
-               lock_kernel();
-               ret = disk->fops->ioctl(inode, file, cmd, arg);
-               unlock_kernel();
-               return ret;
-       }
-
-       return -ENOTTY;
-}
-
-int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
-                       unsigned long arg)
-{
-       struct block_device *bdev = inode->i_bdev;
-       struct gendisk *disk = bdev->bd_disk;
-       int ret, n;
-
-       switch(cmd) {
-       case BLKFLSBUF:
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-
-               ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
-               /* -EINVAL to handle old uncorrected drivers */
-               if (ret != -EINVAL && ret != -ENOTTY)
-                       return ret;
-
-               lock_kernel();
-               fsync_bdev(bdev);
-               invalidate_bdev(bdev, 0);
-               unlock_kernel();
-               return 0;
-
-       case BLKROSET:
-               ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
-               /* -EINVAL to handle old uncorrected drivers */
-               if (ret != -EINVAL && ret != -ENOTTY)
-                       return ret;
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-               if (get_user(n, (int __user *)(arg)))
-                       return -EFAULT;
-               lock_kernel();
-               set_device_ro(bdev, n);
-               unlock_kernel();
-               return 0;
-       }
-
-       lock_kernel();
-       ret = blkdev_locked_ioctl(file, bdev, cmd, arg);
-       unlock_kernel();
-       if (ret != -ENOIOCTLCMD)
-               return ret;
-
-       return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
-}
-
-/* Most of the generic ioctls are handled in the normal fallback path.
-   This assumes the blkdev's low level compat_ioctl always returns
-   ENOIOCTLCMD for unknown ioctls. */
-long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
-{
-       struct block_device *bdev = file->f_dentry->d_inode->i_bdev;
-       struct gendisk *disk = bdev->bd_disk;
-       int ret = -ENOIOCTLCMD;
-       if (disk->fops->compat_ioctl) {
-               lock_kernel();
-               ret = disk->fops->compat_ioctl(file, cmd, arg);
-               unlock_kernel();
-       }
-       return ret;
-}
-
-EXPORT_SYMBOL_GPL(blkdev_ioctl);
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
deleted file mode 100644 (file)
index 2747741..0000000
+++ /dev/null
@@ -1,3613 +0,0 @@
-/*
- *  linux/drivers/block/ll_rw_blk.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
- * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
- * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
- * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
- * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
- */
-
-/*
- * This handles all read/write requests to block devices
- */
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/backing-dev.h>
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/highmem.h>
-#include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>     /* for max_pfn/max_low_pfn */
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include <linux/writeback.h>
-#include <linux/blkdev.h>
-
-/*
- * for max sense size
- */
-#include <scsi/scsi_cmnd.h>
-
-static void blk_unplug_work(void *data);
-static void blk_unplug_timeout(unsigned long data);
-static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
-
-/*
- * For the allocated request tables
- */
-static kmem_cache_t *request_cachep;
-
-/*
- * For queue allocation
- */
-static kmem_cache_t *requestq_cachep;
-
-/*
- * For io context allocations
- */
-static kmem_cache_t *iocontext_cachep;
-
-static wait_queue_head_t congestion_wqh[2] = {
-               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
-               __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
-       };
-
-/*
- * Controlling structure to kblockd
- */
-static struct workqueue_struct *kblockd_workqueue; 
-
-unsigned long blk_max_low_pfn, blk_max_pfn;
-
-EXPORT_SYMBOL(blk_max_low_pfn);
-EXPORT_SYMBOL(blk_max_pfn);
-
-/* Amount of time in which a process may batch requests */
-#define BLK_BATCH_TIME (HZ/50UL)
-
-/* Number of requests a "batching" process may submit */
-#define BLK_BATCH_REQ  32
-
-/*
- * Return the threshold (number of used requests) at which the queue is
- * considered to be congested.  It include a little hysteresis to keep the
- * context switch rate down.
- */
-static inline int queue_congestion_on_threshold(struct request_queue *q)
-{
-       return q->nr_congestion_on;
-}
-
-/*
- * The threshold at which a queue is considered to be uncongested
- */
-static inline int queue_congestion_off_threshold(struct request_queue *q)
-{
-       return q->nr_congestion_off;
-}
-
-static void blk_queue_congestion_threshold(struct request_queue *q)
-{
-       int nr;
-
-       nr = q->nr_requests - (q->nr_requests / 8) + 1;
-       if (nr > q->nr_requests)
-               nr = q->nr_requests;
-       q->nr_congestion_on = nr;
-
-       nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
-       if (nr < 1)
-               nr = 1;
-       q->nr_congestion_off = nr;
-}
-
-/*
- * A queue has just exitted congestion.  Note this in the global counter of
- * congested queues, and wake up anyone who was waiting for requests to be
- * put back.
- */
-static void clear_queue_congested(request_queue_t *q, int rw)
-{
-       enum bdi_state bit;
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
-
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
-       clear_bit(bit, &q->backing_dev_info.state);
-       smp_mb__after_clear_bit();
-       if (waitqueue_active(wqh))
-               wake_up(wqh);
-}
-
-/*
- * A queue has just entered congestion.  Flag that in the queue's VM-visible
- * state flags and increment the global gounter of congested queues.
- */
-static void set_queue_congested(request_queue_t *q, int rw)
-{
-       enum bdi_state bit;
-
-       bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
-       set_bit(bit, &q->backing_dev_info.state);
-}
-
-/**
- * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
- * @bdev:      device
- *
- * Locates the passed device's request queue and returns the address of its
- * backing_dev_info
- *
- * Will return NULL if the request queue cannot be located.
- */
-struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
-{
-       struct backing_dev_info *ret = NULL;
-       request_queue_t *q = bdev_get_queue(bdev);
-
-       if (q)
-               ret = &q->backing_dev_info;
-       return ret;
-}
-
-EXPORT_SYMBOL(blk_get_backing_dev_info);
-
-void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
-{
-       q->activity_fn = fn;
-       q->activity_data = data;
-}
-
-EXPORT_SYMBOL(blk_queue_activity_fn);
-
-/**
- * blk_queue_prep_rq - set a prepare_request function for queue
- * @q:         queue
- * @pfn:       prepare_request function
- *
- * It's possible for a queue to register a prepare_request callback which
- * is invoked before the request is handed to the request_fn. The goal of
- * the function is to prepare a request for I/O, it can be used to build a
- * cdb from the request data for instance.
- *
- */
-void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
-{
-       q->prep_rq_fn = pfn;
-}
-
-EXPORT_SYMBOL(blk_queue_prep_rq);
-
-/**
- * blk_queue_merge_bvec - set a merge_bvec function for queue
- * @q:         queue
- * @mbfn:      merge_bvec_fn
- *
- * Usually queues have static limitations on the max sectors or segments that
- * we can put in a request. Stacking drivers may have some settings that
- * are dynamic, and thus we have to query the queue whether it is ok to
- * add a new bio_vec to a bio at a given offset or not. If the block device
- * has such limitations, it needs to register a merge_bvec_fn to control
- * the size of bio's sent to it. Note that a block device *must* allow a
- * single page to be added to an empty bio. The block device driver may want
- * to use the bio_split() function to deal with these bio's. By default
- * no merge_bvec_fn is defined for a queue, and only the fixed limits are
- * honored.
- */
-void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
-{
-       q->merge_bvec_fn = mbfn;
-}
-
-EXPORT_SYMBOL(blk_queue_merge_bvec);
-
-/**
- * blk_queue_make_request - define an alternate make_request function for a device
- * @q:  the request queue for the device to be affected
- * @mfn: the alternate make_request function
- *
- * Description:
- *    The normal way for &struct bios to be passed to a device
- *    driver is for them to be collected into requests on a request
- *    queue, and then to allow the device driver to select requests
- *    off that queue when it is ready.  This works well for many block
- *    devices. However some block devices (typically virtual devices
- *    such as md or lvm) do not benefit from the processing on the
- *    request queue, and are served best by having the requests passed
- *    directly to them.  This can be achieved by providing a function
- *    to blk_queue_make_request().
- *
- * Caveat:
- *    The driver that does this *must* be able to deal appropriately
- *    with buffers in "highmemory". This can be accomplished by either calling
- *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
- *    blk_queue_bounce() to create a buffer in normal memory.
- **/
-void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
-{
-       /*
-        * set defaults
-        */
-       q->nr_requests = BLKDEV_MAX_RQ;
-       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
-       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
-       q->make_request_fn = mfn;
-       q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
-       q->backing_dev_info.state = 0;
-       q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
-       blk_queue_max_sectors(q, MAX_SECTORS);
-       blk_queue_hardsect_size(q, 512);
-       blk_queue_dma_alignment(q, 511);
-       blk_queue_congestion_threshold(q);
-       q->nr_batching = BLK_BATCH_REQ;
-
-       q->unplug_thresh = 4;           /* hmm */
-       q->unplug_delay = (3 * HZ) / 1000;      /* 3 milliseconds */
-       if (q->unplug_delay == 0)
-               q->unplug_delay = 1;
-
-       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
-
-       q->unplug_timer.function = blk_unplug_timeout;
-       q->unplug_timer.data = (unsigned long)q;
-
-       /*
-        * by default assume old behaviour and bounce for any highmem page
-        */
-       blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
-
-       blk_queue_activity_fn(q, NULL, NULL);
-}
-
-EXPORT_SYMBOL(blk_queue_make_request);
-
-static inline void rq_init(request_queue_t *q, struct request *rq)
-{
-       INIT_LIST_HEAD(&rq->queuelist);
-
-       rq->errors = 0;
-       rq->rq_status = RQ_ACTIVE;
-       rq->bio = rq->biotail = NULL;
-       rq->ioprio = 0;
-       rq->buffer = NULL;
-       rq->ref_count = 1;
-       rq->q = q;
-       rq->waiting = NULL;
-       rq->special = NULL;
-       rq->data_len = 0;
-       rq->data = NULL;
-       rq->nr_phys_segments = 0;
-       rq->sense = NULL;
-       rq->end_io = NULL;
-       rq->end_io_data = NULL;
-}
-
-/**
- * blk_queue_ordered - does this queue support ordered writes
- * @q:     the request queue
- * @flag:  see below
- *
- * Description:
- *   For journalled file systems, doing ordered writes on a commit
- *   block instead of explicitly doing wait_on_buffer (which is bad
- *   for performance) can be a big win. Block drivers supporting this
- *   feature should call this function and indicate so.
- *
- **/
-void blk_queue_ordered(request_queue_t *q, int flag)
-{
-       switch (flag) {
-               case QUEUE_ORDERED_NONE:
-                       if (q->flush_rq)
-                               kmem_cache_free(request_cachep, q->flush_rq);
-                       q->flush_rq = NULL;
-                       q->ordered = flag;
-                       break;
-               case QUEUE_ORDERED_TAG:
-                       q->ordered = flag;
-                       break;
-               case QUEUE_ORDERED_FLUSH:
-                       q->ordered = flag;
-                       if (!q->flush_rq)
-                               q->flush_rq = kmem_cache_alloc(request_cachep,
-                                                               GFP_KERNEL);
-                       break;
-               default:
-                       printk("blk_queue_ordered: bad value %d\n", flag);
-                       break;
-       }
-}
-
-EXPORT_SYMBOL(blk_queue_ordered);
-
-/**
- * blk_queue_issue_flush_fn - set function for issuing a flush
- * @q:     the request queue
- * @iff:   the function to be called issuing the flush
- *
- * Description:
- *   If a driver supports issuing a flush command, the support is notified
- *   to the block layer by defining it through this call.
- *
- **/
-void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
-{
-       q->issue_flush_fn = iff;
-}
-
-EXPORT_SYMBOL(blk_queue_issue_flush_fn);
-
-/*
- * Cache flushing for ordered writes handling
- */
-static void blk_pre_flush_end_io(struct request *flush_rq)
-{
-       struct request *rq = flush_rq->end_io_data;
-       request_queue_t *q = rq->q;
-
-       elv_completed_request(q, flush_rq);
-
-       rq->flags |= REQ_BAR_PREFLUSH;
-
-       if (!flush_rq->errors)
-               elv_requeue_request(q, rq);
-       else {
-               q->end_flush_fn(q, flush_rq);
-               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-               q->request_fn(q);
-       }
-}
-
-static void blk_post_flush_end_io(struct request *flush_rq)
-{
-       struct request *rq = flush_rq->end_io_data;
-       request_queue_t *q = rq->q;
-
-       elv_completed_request(q, flush_rq);
-
-       rq->flags |= REQ_BAR_POSTFLUSH;
-
-       q->end_flush_fn(q, flush_rq);
-       clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-       q->request_fn(q);
-}
-
-struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
-{
-       struct request *flush_rq = q->flush_rq;
-
-       BUG_ON(!blk_barrier_rq(rq));
-
-       if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
-               return NULL;
-
-       rq_init(q, flush_rq);
-       flush_rq->elevator_private = NULL;
-       flush_rq->flags = REQ_BAR_FLUSH;
-       flush_rq->rq_disk = rq->rq_disk;
-       flush_rq->rl = NULL;
-
-       /*
-        * prepare_flush returns 0 if no flush is needed, just mark both
-        * pre and post flush as done in that case
-        */
-       if (!q->prepare_flush_fn(q, flush_rq)) {
-               rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
-               clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
-               return rq;
-       }
-
-       /*
-        * some drivers dequeue requests right away, some only after io
-        * completion. make sure the request is dequeued.
-        */
-       if (!list_empty(&rq->queuelist))
-               blkdev_dequeue_request(rq);
-
-       flush_rq->end_io_data = rq;
-       flush_rq->end_io = blk_pre_flush_end_io;
-
-       __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
-       return flush_rq;
-}
-
-static void blk_start_post_flush(request_queue_t *q, struct request *rq)
-{
-       struct request *flush_rq = q->flush_rq;
-
-       BUG_ON(!blk_barrier_rq(rq));
-
-       rq_init(q, flush_rq);
-       flush_rq->elevator_private = NULL;
-       flush_rq->flags = REQ_BAR_FLUSH;
-       flush_rq->rq_disk = rq->rq_disk;
-       flush_rq->rl = NULL;
-
-       if (q->prepare_flush_fn(q, flush_rq)) {
-               flush_rq->end_io_data = rq;
-               flush_rq->end_io = blk_post_flush_end_io;
-
-               __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
-               q->request_fn(q);
-       }
-}
-
-static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
-                                       int sectors)
-{
-       if (sectors > rq->nr_sectors)
-               sectors = rq->nr_sectors;
-
-       rq->nr_sectors -= sectors;
-       return rq->nr_sectors;
-}
-
-static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
-                                    int sectors, int queue_locked)
-{
-       if (q->ordered != QUEUE_ORDERED_FLUSH)
-               return 0;
-       if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
-               return 0;
-       if (blk_barrier_postflush(rq))
-               return 0;
-
-       if (!blk_check_end_barrier(q, rq, sectors)) {
-               unsigned long flags = 0;
-
-               if (!queue_locked)
-                       spin_lock_irqsave(q->queue_lock, flags);
-
-               blk_start_post_flush(q, rq);
-
-               if (!queue_locked)
-                       spin_unlock_irqrestore(q->queue_lock, flags);
-       }
-
-       return 1;
-}
-
-/**
- * blk_complete_barrier_rq - complete possible barrier request
- * @q:  the request queue for the device
- * @rq:  the request
- * @sectors:  number of sectors to complete
- *
- * Description:
- *   Used in driver end_io handling to determine whether to postpone
- *   completion of a barrier request until a post flush has been done. This
- *   is the unlocked variant, used if the caller doesn't already hold the
- *   queue lock.
- **/
-int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
-{
-       return __blk_complete_barrier_rq(q, rq, sectors, 0);
-}
-EXPORT_SYMBOL(blk_complete_barrier_rq);
-
-/**
- * blk_complete_barrier_rq_locked - complete possible barrier request
- * @q:  the request queue for the device
- * @rq:  the request
- * @sectors:  number of sectors to complete
- *
- * Description:
- *   See blk_complete_barrier_rq(). This variant must be used if the caller
- *   holds the queue lock.
- **/
-int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
-                                  int sectors)
-{
-       return __blk_complete_barrier_rq(q, rq, sectors, 1);
-}
-EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
-
-/**
- * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q:  the request queue for the device
- * @dma_addr:   bus address limit
- *
- * Description:
- *    Different hardware can have different requirements as to what pages
- *    it can do I/O directly to. A low level driver can call
- *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @page. By default
- *    the block layer sets this to the highest numbered "low" memory page.
- **/
-void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
-{
-       unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
-
-       /*
-        * set appropriate bounce gfp mask -- unfortunately we don't have a
-        * full 4GB zone, so we have to resort to low memory for any bounces.
-        * ISA has its own < 16MB zone.
-        */
-       if (bounce_pfn < blk_max_low_pfn) {
-               BUG_ON(dma_addr < BLK_BOUNCE_ISA);
-               init_emergency_isa_pool();
-               q->bounce_gfp = GFP_NOIO | GFP_DMA;
-       } else
-               q->bounce_gfp = GFP_NOIO;
-
-       q->bounce_pfn = bounce_pfn;
-}
-
-EXPORT_SYMBOL(blk_queue_bounce_limit);
-
-/**
- * blk_queue_max_sectors - set max sectors for a request for this queue
- * @q:  the request queue for the device
- * @max_sectors:  max sectors in the usual 512b unit
- *
- * Description:
- *    Enables a low level driver to set an upper limit on the size of
- *    received requests.
- **/
-void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
-{
-       if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
-               max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
-               printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
-       }
-
-       q->max_sectors = q->max_hw_sectors = max_sectors;
-}
-
-EXPORT_SYMBOL(blk_queue_max_sectors);
-
-/**
- * blk_queue_max_phys_segments - set max phys segments for a request for this queue
- * @q:  the request queue for the device
- * @max_segments:  max number of segments
- *
- * Description:
- *    Enables a low level driver to set an upper limit on the number of
- *    physical data segments in a request.  This would be the largest sized
- *    scatter list the driver could handle.
- **/
-void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
-{
-       if (!max_segments) {
-               max_segments = 1;
-               printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
-       }
-
-       q->max_phys_segments = max_segments;
-}
-
-EXPORT_SYMBOL(blk_queue_max_phys_segments);
-
-/**
- * blk_queue_max_hw_segments - set max hw segments for a request for this queue
- * @q:  the request queue for the device
- * @max_segments:  max number of segments
- *
- * Description:
- *    Enables a low level driver to set an upper limit on the number of
- *    hw data segments in a request.  This would be the largest number of
- *    address/length pairs the host adapter can actually give as once
- *    to the device.
- **/
-void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
-{
-       if (!max_segments) {
-               max_segments = 1;
-               printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
-       }
-
-       q->max_hw_segments = max_segments;
-}
-
-EXPORT_SYMBOL(blk_queue_max_hw_segments);
-
-/**
- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
- * @q:  the request queue for the device
- * @max_size:  max size of segment in bytes
- *
- * Description:
- *    Enables a low level driver to set an upper limit on the size of a
- *    coalesced segment
- **/
-void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
-{
-       if (max_size < PAGE_CACHE_SIZE) {
-               max_size = PAGE_CACHE_SIZE;
-               printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
-       }
-
-       q->max_segment_size = max_size;
-}
-
-EXPORT_SYMBOL(blk_queue_max_segment_size);
-
-/**
- * blk_queue_hardsect_size - set hardware sector size for the queue
- * @q:  the request queue for the device
- * @size:  the hardware sector size, in bytes
- *
- * Description:
- *   This should typically be set to the lowest possible sector size
- *   that the hardware can operate on (possible without reverting to
- *   even internal read-modify-write operations). Usually the default
- *   of 512 covers most hardware.
- **/
-void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
-{
-       q->hardsect_size = size;
-}
-
-EXPORT_SYMBOL(blk_queue_hardsect_size);
-
-/*
- * Returns the minimum that is _not_ zero, unless both are zero.
- */
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
-/**
- * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
- * @t: the stacking driver (top)
- * @b:  the underlying device (bottom)
- **/
-void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
-{
-       /* zero is "infinity" */
-       t->max_sectors = t->max_hw_sectors =
-               min_not_zero(t->max_sectors,b->max_sectors);
-
-       t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
-       t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
-       t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
-       t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
-}
-
-EXPORT_SYMBOL(blk_queue_stack_limits);
-
-/**
- * blk_queue_segment_boundary - set boundary rules for segment merging
- * @q:  the request queue for the device
- * @mask:  the memory boundary mask
- **/
-void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
-{
-       if (mask < PAGE_CACHE_SIZE - 1) {
-               mask = PAGE_CACHE_SIZE - 1;
-               printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
-       }
-
-       q->seg_boundary_mask = mask;
-}
-
-EXPORT_SYMBOL(blk_queue_segment_boundary);
-
-/**
- * blk_queue_dma_alignment - set dma length and memory alignment
- * @q:     the request queue for the device
- * @mask:  alignment mask
- *
- * description:
- *    set required memory and length aligment for direct dma transactions.
- *    this is used when buiding direct io requests for the queue.
- *
- **/
-void blk_queue_dma_alignment(request_queue_t *q, int mask)
-{
-       q->dma_alignment = mask;
-}
-
-EXPORT_SYMBOL(blk_queue_dma_alignment);
-
-/**
- * blk_queue_find_tag - find a request by its tag and queue
- *
- * @q:  The request queue for the device
- * @tag: The tag of the request
- *
- * Notes:
- *    Should be used when a device returns a tag and you want to match
- *    it with a request.
- *
- *    no locks need be held.
- **/
-struct request *blk_queue_find_tag(request_queue_t *q, int tag)
-{
-       struct blk_queue_tag *bqt = q->queue_tags;
-
-       if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
-               return NULL;
-
-       return bqt->tag_index[tag];
-}
-
-EXPORT_SYMBOL(blk_queue_find_tag);
-
-/**
- * __blk_queue_free_tags - release tag maintenance info
- * @q:  the request queue for the device
- *
- *  Notes:
- *    blk_cleanup_queue() will take care of calling this function, if tagging
- *    has been used. So there's no need to call this directly.
- **/
-static void __blk_queue_free_tags(request_queue_t *q)
-{
-       struct blk_queue_tag *bqt = q->queue_tags;
-
-       if (!bqt)
-               return;
-
-       if (atomic_dec_and_test(&bqt->refcnt)) {
-               BUG_ON(bqt->busy);
-               BUG_ON(!list_empty(&bqt->busy_list));
-
-               kfree(bqt->tag_index);
-               bqt->tag_index = NULL;
-
-               kfree(bqt->tag_map);
-               bqt->tag_map = NULL;
-
-               kfree(bqt);
-       }
-
-       q->queue_tags = NULL;
-       q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
-}
-
-/**
- * blk_queue_free_tags - release tag maintenance info
- * @q:  the request queue for the device
- *
- *  Notes:
- *     This is used to disabled tagged queuing to a device, yet leave
- *     queue in function.
- **/
-void blk_queue_free_tags(request_queue_t *q)
-{
-       clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
-}
-
-EXPORT_SYMBOL(blk_queue_free_tags);
-
-static int
-init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
-{
-       struct request **tag_index;
-       unsigned long *tag_map;
-       int nr_ulongs;
-
-       if (depth > q->nr_requests * 2) {
-               depth = q->nr_requests * 2;
-               printk(KERN_ERR "%s: adjusted depth to %d\n",
-                               __FUNCTION__, depth);
-       }
-
-       tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
-       if (!tag_index)
-               goto fail;
-
-       nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
-       tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
-       if (!tag_map)
-               goto fail;
-
-       memset(tag_index, 0, depth * sizeof(struct request *));
-       memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
-       tags->real_max_depth = depth;
-       tags->max_depth = depth;
-       tags->tag_index = tag_index;
-       tags->tag_map = tag_map;
-
-       return 0;
-fail:
-       kfree(tag_index);
-       return -ENOMEM;
-}
-
-/**
- * blk_queue_init_tags - initialize the queue tag info
- * @q:  the request queue for the device
- * @depth:  the maximum queue depth supported
- * @tags: the tag to use
- **/
-int blk_queue_init_tags(request_queue_t *q, int depth,
-                       struct blk_queue_tag *tags)
-{
-       int rc;
-
-       BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
-
-       if (!tags && !q->queue_tags) {
-               tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
-               if (!tags)
-                       goto fail;
-
-               if (init_tag_map(q, tags, depth))
-                       goto fail;
-
-               INIT_LIST_HEAD(&tags->busy_list);
-               tags->busy = 0;
-               atomic_set(&tags->refcnt, 1);
-       } else if (q->queue_tags) {
-               if ((rc = blk_queue_resize_tags(q, depth)))
-                       return rc;
-               set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
-               return 0;
-       } else
-               atomic_inc(&tags->refcnt);
-
-       /*
-        * assign it, all done
-        */
-       q->queue_tags = tags;
-       q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
-       return 0;
-fail:
-       kfree(tags);
-       return -ENOMEM;
-}
-
-EXPORT_SYMBOL(blk_queue_init_tags);
-
-/**
- * blk_queue_resize_tags - change the queueing depth
- * @q:  the request queue for the device
- * @new_depth: the new max command queueing depth
- *
- *  Notes:
- *    Must be called with the queue lock held.
- **/
-int blk_queue_resize_tags(request_queue_t *q, int new_depth)
-{
-       struct blk_queue_tag *bqt = q->queue_tags;
-       struct request **tag_index;
-       unsigned long *tag_map;
-       int max_depth, nr_ulongs;
-
-       if (!bqt)
-               return -ENXIO;
-
-       /*
-        * if we already have large enough real_max_depth.  just
-        * adjust max_depth.  *NOTE* as requests with tag value
-        * between new_depth and real_max_depth can be in-flight, tag
-        * map can not be shrunk blindly here.
-        */
-       if (new_depth <= bqt->real_max_depth) {
-               bqt->max_depth = new_depth;
-               return 0;
-       }
-
-       /*
-        * save the old state info, so we can copy it back
-        */
-       tag_index = bqt->tag_index;
-       tag_map = bqt->tag_map;
-       max_depth = bqt->real_max_depth;
-
-       if (init_tag_map(q, bqt, new_depth))
-               return -ENOMEM;
-
-       memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
-       nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
-       memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
-
-       kfree(tag_index);
-       kfree(tag_map);
-       return 0;
-}
-
-EXPORT_SYMBOL(blk_queue_resize_tags);
-
-/**
- * blk_queue_end_tag - end tag operations for a request
- * @q:  the request queue for the device
- * @rq: the request that has completed
- *
- *  Description:
- *    Typically called when end_that_request_first() returns 0, meaning
- *    all transfers have been done for a request. It's important to call
- *    this function before end_that_request_last(), as that will put the
- *    request back on the free list thus corrupting the internal tag list.
- *
- *  Notes:
- *   queue lock must be held.
- **/
-void blk_queue_end_tag(request_queue_t *q, struct request *rq)
-{
-       struct blk_queue_tag *bqt = q->queue_tags;
-       int tag = rq->tag;
-
-       BUG_ON(tag == -1);
-
-       if (unlikely(tag >= bqt->real_max_depth))
-               /*
-                * This can happen after tag depth has been reduced.
-                * FIXME: how about a warning or info message here?
-                */
-               return;
-
-       if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
-               printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
-                      __FUNCTION__, tag);
-               return;
-       }
-
-       list_del_init(&rq->queuelist);
-       rq->flags &= ~REQ_QUEUED;
-       rq->tag = -1;
-
-       if (unlikely(bqt->tag_index[tag] == NULL))
-               printk(KERN_ERR "%s: tag %d is missing\n",
-                      __FUNCTION__, tag);
-
-       bqt->tag_index[tag] = NULL;
-       bqt->busy--;
-}
-
-EXPORT_SYMBOL(blk_queue_end_tag);
-
-/**
- * blk_queue_start_tag - find a free tag and assign it
- * @q:  the request queue for the device
- * @rq:  the block request that needs tagging
- *
- *  Description:
- *    This can either be used as a stand-alone helper, or possibly be
- *    assigned as the queue &prep_rq_fn (in which case &struct request
- *    automagically gets a tag assigned). Note that this function
- *    assumes that any type of request can be queued! if this is not
- *    true for your device, you must check the request type before
- *    calling this function.  The request will also be removed from
- *    the request queue, so it's the drivers responsibility to readd
- *    it if it should need to be restarted for some reason.
- *
- *  Notes:
- *   queue lock must be held.
- **/
-int blk_queue_start_tag(request_queue_t *q, struct request *rq)
-{
-       struct blk_queue_tag *bqt = q->queue_tags;
-       int tag;
-
-       if (unlikely((rq->flags & REQ_QUEUED))) {
-               printk(KERN_ERR 
-                      "%s: request %p for device [%s] already tagged %d",
-                      __FUNCTION__, rq,
-                      rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
-               BUG();
-       }
-
-       tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
-       if (tag >= bqt->max_depth)
-               return 1;
-
-       __set_bit(tag, bqt->tag_map);
-
-       rq->flags |= REQ_QUEUED;
-       rq->tag = tag;
-       bqt->tag_index[tag] = rq;
-       blkdev_dequeue_request(rq);
-       list_add(&rq->queuelist, &bqt->busy_list);
-       bqt->busy++;
-       return 0;
-}
-
-EXPORT_SYMBOL(blk_queue_start_tag);
-
-/**
- * blk_queue_invalidate_tags - invalidate all pending tags
- * @q:  the request queue for the device
- *
- *  Description:
- *   Hardware conditions may dictate a need to stop all pending requests.
- *   In this case, we will safely clear the block side of the tag queue and
- *   readd all requests to the request queue in the right order.
- *
- *  Notes:
- *   queue lock must be held.
- **/
-void blk_queue_invalidate_tags(request_queue_t *q)
-{
-       struct blk_queue_tag *bqt = q->queue_tags;
-       struct list_head *tmp, *n;
-       struct request *rq;
-
-       list_for_each_safe(tmp, n, &bqt->busy_list) {
-               rq = list_entry_rq(tmp);
-
-               if (rq->tag == -1) {
-                       printk(KERN_ERR
-                              "%s: bad tag found on list\n", __FUNCTION__);
-                       list_del_init(&rq->queuelist);
-                       rq->flags &= ~REQ_QUEUED;
-               } else
-                       blk_queue_end_tag(q, rq);
-
-               rq->flags &= ~REQ_STARTED;
-               __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
-       }
-}
-
-EXPORT_SYMBOL(blk_queue_invalidate_tags);
-
-static char *rq_flags[] = {
-       "REQ_RW",
-       "REQ_FAILFAST",
-       "REQ_SORTED",
-       "REQ_SOFTBARRIER",
-       "REQ_HARDBARRIER",
-       "REQ_CMD",
-       "REQ_NOMERGE",
-       "REQ_STARTED",
-       "REQ_DONTPREP",
-       "REQ_QUEUED",
-       "REQ_ELVPRIV",
-       "REQ_PC",
-       "REQ_BLOCK_PC",
-       "REQ_SENSE",
-       "REQ_FAILED",
-       "REQ_QUIET",
-       "REQ_SPECIAL",
-       "REQ_DRIVE_CMD",
-       "REQ_DRIVE_TASK",
-       "REQ_DRIVE_TASKFILE",
-       "REQ_PREEMPT",
-       "REQ_PM_SUSPEND",
-       "REQ_PM_RESUME",
-       "REQ_PM_SHUTDOWN",
-};
-
-void blk_dump_rq_flags(struct request *rq, char *msg)
-{
-       int bit;
-
-       printk("%s: dev %s: flags = ", msg,
-               rq->rq_disk ? rq->rq_disk->disk_name : "?");
-       bit = 0;
-       do {
-               if (rq->flags & (1 << bit))
-                       printk("%s ", rq_flags[bit]);
-               bit++;
-       } while (bit < __REQ_NR_BITS);
-
-       printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
-                                                      rq->nr_sectors,
-                                                      rq->current_nr_sectors);
-       printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
-
-       if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
-               printk("cdb: ");
-               for (bit = 0; bit < sizeof(rq->cmd); bit++)
-                       printk("%02x ", rq->cmd[bit]);
-               printk("\n");
-       }
-}
-
-EXPORT_SYMBOL(blk_dump_rq_flags);
-
-void blk_recount_segments(request_queue_t *q, struct bio *bio)
-{
-       struct bio_vec *bv, *bvprv = NULL;
-       int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
-       int high, highprv = 1;
-
-       if (unlikely(!bio->bi_io_vec))
-               return;
-
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-       hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
-       bio_for_each_segment(bv, bio, i) {
-               /*
-                * the trick here is making sure that a high page is never
-                * considered part of another segment, since that might
-                * change with the bounce page.
-                */
-               high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
-               if (high || highprv)
-                       goto new_hw_segment;
-               if (cluster) {
-                       if (seg_size + bv->bv_len > q->max_segment_size)
-                               goto new_segment;
-                       if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
-                               goto new_segment;
-                       if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
-                               goto new_segment;
-                       if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
-                               goto new_hw_segment;
-
-                       seg_size += bv->bv_len;
-                       hw_seg_size += bv->bv_len;
-                       bvprv = bv;
-                       continue;
-               }
-new_segment:
-               if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
-                   !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
-                       hw_seg_size += bv->bv_len;
-               } else {
-new_hw_segment:
-                       if (hw_seg_size > bio->bi_hw_front_size)
-                               bio->bi_hw_front_size = hw_seg_size;
-                       hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
-                       nr_hw_segs++;
-               }
-
-               nr_phys_segs++;
-               bvprv = bv;
-               seg_size = bv->bv_len;
-               highprv = high;
-       }
-       if (hw_seg_size > bio->bi_hw_back_size)
-               bio->bi_hw_back_size = hw_seg_size;
-       if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
-               bio->bi_hw_front_size = hw_seg_size;
-       bio->bi_phys_segments = nr_phys_segs;
-       bio->bi_hw_segments = nr_hw_segs;
-       bio->bi_flags |= (1 << BIO_SEG_VALID);
-}
-
-
-static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
-                                  struct bio *nxt)
-{
-       if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
-               return 0;
-
-       if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
-               return 0;
-       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
-               return 0;
-
-       /*
-        * bio and nxt are contigous in memory, check if the queue allows
-        * these two to be merged into one
-        */
-       if (BIO_SEG_BOUNDARY(q, bio, nxt))
-               return 1;
-
-       return 0;
-}
-
-static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
-                                struct bio *nxt)
-{
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
-               blk_recount_segments(q, nxt);
-       if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
-           BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
-               return 0;
-       if (bio->bi_size + nxt->bi_size > q->max_segment_size)
-               return 0;
-
-       return 1;
-}
-
-/*
- * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries
- */
-int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
-{
-       struct bio_vec *bvec, *bvprv;
-       struct bio *bio;
-       int nsegs, i, cluster;
-
-       nsegs = 0;
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-
-       /*
-        * for each bio in rq
-        */
-       bvprv = NULL;
-       rq_for_each_bio(bio, rq) {
-               /*
-                * for each segment in bio
-                */
-               bio_for_each_segment(bvec, bio, i) {
-                       int nbytes = bvec->bv_len;
-
-                       if (bvprv && cluster) {
-                               if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
-                                       goto new_segment;
-
-                               if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
-                                       goto new_segment;
-                               if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
-                                       goto new_segment;
-
-                               sg[nsegs - 1].length += nbytes;
-                       } else {
-new_segment:
-                               memset(&sg[nsegs],0,sizeof(struct scatterlist));
-                               sg[nsegs].page = bvec->bv_page;
-                               sg[nsegs].length = nbytes;
-                               sg[nsegs].offset = bvec->bv_offset;
-
-                               nsegs++;
-                       }
-                       bvprv = bvec;
-               } /* segments in bio */
-       } /* bios in rq */
-
-       return nsegs;
-}
-
-EXPORT_SYMBOL(blk_rq_map_sg);
-
-/*
- * the standard queue merge functions, can be overridden with device
- * specific ones if so desired
- */
-
-static inline int ll_new_mergeable(request_queue_t *q,
-                                  struct request *req,
-                                  struct bio *bio)
-{
-       int nr_phys_segs = bio_phys_segments(q, bio);
-
-       if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-
-       /*
-        * A hw segment is just getting larger, bump just the phys
-        * counter.
-        */
-       req->nr_phys_segments += nr_phys_segs;
-       return 1;
-}
-
-static inline int ll_new_hw_segment(request_queue_t *q,
-                                   struct request *req,
-                                   struct bio *bio)
-{
-       int nr_hw_segs = bio_hw_segments(q, bio);
-       int nr_phys_segs = bio_phys_segments(q, bio);
-
-       if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
-           || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
-               req->flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-
-       /*
-        * This will form the start of a new hw segment.  Bump both
-        * counters.
-        */
-       req->nr_hw_segments += nr_hw_segs;
-       req->nr_phys_segments += nr_phys_segs;
-       return 1;
-}
-
-static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
-                           struct bio *bio)
-{
-       int len;
-
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-               req->flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-       if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
-               blk_recount_segments(q, req->biotail);
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-       len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (req->nr_hw_segments == 1)
-                               req->bio->bi_hw_front_size = len;
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
-
-       return ll_new_hw_segment(q, req, bio);
-}
-
-static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
-                            struct bio *bio)
-{
-       int len;
-
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-               req->flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
-       len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
-       if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, bio);
-       if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
-               blk_recount_segments(q, req->bio);
-       if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
-           !BIOVEC_VIRT_OVERSIZE(len)) {
-               int mergeable =  ll_new_mergeable(q, req, bio);
-
-               if (mergeable) {
-                       if (bio->bi_hw_segments == 1)
-                               bio->bi_hw_front_size = len;
-                       if (req->nr_hw_segments == 1)
-                               req->biotail->bi_hw_back_size = len;
-               }
-               return mergeable;
-       }
-
-       return ll_new_hw_segment(q, req, bio);
-}
-
-static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
-                               struct request *next)
-{
-       int total_phys_segments;
-       int total_hw_segments;
-
-       /*
-        * First check if the either of the requests are re-queued
-        * requests.  Can't merge them if they are.
-        */
-       if (req->special || next->special)
-               return 0;
-
-       /*
-        * Will it become too large?
-        */
-       if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
-               return 0;
-
-       total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
-       if (blk_phys_contig_segment(q, req->biotail, next->bio))
-               total_phys_segments--;
-
-       if (total_phys_segments > q->max_phys_segments)
-               return 0;
-
-       total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
-       if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
-               int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
-               /*
-                * propagate the combined length to the end of the requests
-                */
-               if (req->nr_hw_segments == 1)
-                       req->bio->bi_hw_front_size = len;
-               if (next->nr_hw_segments == 1)
-                       next->biotail->bi_hw_back_size = len;
-               total_hw_segments--;
-       }
-
-       if (total_hw_segments > q->max_hw_segments)
-               return 0;
-
-       /* Merge is OK... */
-       req->nr_phys_segments = total_phys_segments;
-       req->nr_hw_segments = total_hw_segments;
-       return 1;
-}
-
-/*
- * "plug" the device if there are no outstanding requests: this will
- * force the transfer to start only after we have put all the requests
- * on the list.
- *
- * This is called with interrupts off and no requests on the queue and
- * with the queue lock held.
- */
-void blk_plug_device(request_queue_t *q)
-{
-       WARN_ON(!irqs_disabled());
-
-       /*
-        * don't plug a stopped queue, it must be paired with blk_start_queue()
-        * which will restart the queueing
-        */
-       if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
-               return;
-
-       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
-               mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
-}
-
-EXPORT_SYMBOL(blk_plug_device);
-
-/*
- * remove the queue from the plugged list, if present. called with
- * queue lock held and interrupts disabled.
- */
-int blk_remove_plug(request_queue_t *q)
-{
-       WARN_ON(!irqs_disabled());
-
-       if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
-               return 0;
-
-       del_timer(&q->unplug_timer);
-       return 1;
-}
-
-EXPORT_SYMBOL(blk_remove_plug);
-
-/*
- * remove the plug and let it rip..
- */
-void __generic_unplug_device(request_queue_t *q)
-{
-       if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
-               return;
-
-       if (!blk_remove_plug(q))
-               return;
-
-       q->request_fn(q);
-}
-EXPORT_SYMBOL(__generic_unplug_device);
-
-/**
- * generic_unplug_device - fire a request queue
- * @q:    The &request_queue_t in question
- *
- * Description:
- *   Linux uses plugging to build bigger requests queues before letting
- *   the device have at them. If a queue is plugged, the I/O scheduler
- *   is still adding and merging requests on the queue. Once the queue
- *   gets unplugged, the request_fn defined for the queue is invoked and
- *   transfers started.
- **/
-void generic_unplug_device(request_queue_t *q)
-{
-       spin_lock_irq(q->queue_lock);
-       __generic_unplug_device(q);
-       spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(generic_unplug_device);
-
-static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
-                                  struct page *page)
-{
-       request_queue_t *q = bdi->unplug_io_data;
-
-       /*
-        * devices don't necessarily have an ->unplug_fn defined
-        */
-       if (q->unplug_fn)
-               q->unplug_fn(q);
-}
-
-static void blk_unplug_work(void *data)
-{
-       request_queue_t *q = data;
-
-       q->unplug_fn(q);
-}
-
-static void blk_unplug_timeout(unsigned long data)
-{
-       request_queue_t *q = (request_queue_t *)data;
-
-       kblockd_schedule_work(&q->unplug_work);
-}
-
-/**
- * blk_start_queue - restart a previously stopped queue
- * @q:    The &request_queue_t in question
- *
- * Description:
- *   blk_start_queue() will clear the stop flag on the queue, and call
- *   the request_fn for the queue if it was in a stopped state when
- *   entered. Also see blk_stop_queue(). Queue lock must be held.
- **/
-void blk_start_queue(request_queue_t *q)
-{
-       clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
-
-       /*
-        * one level of recursion is ok and is much faster than kicking
-        * the unplug handling
-        */
-       if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
-               q->request_fn(q);
-               clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
-       } else {
-               blk_plug_device(q);
-               kblockd_schedule_work(&q->unplug_work);
-       }
-}
-
-EXPORT_SYMBOL(blk_start_queue);
-
-/**
- * blk_stop_queue - stop a queue
- * @q:    The &request_queue_t in question
- *
- * Description:
- *   The Linux block layer assumes that a block driver will consume all
- *   entries on the request queue when the request_fn strategy is called.
- *   Often this will not happen, because of hardware limitations (queue
- *   depth settings). If a device driver gets a 'queue full' response,
- *   or if it simply chooses not to queue more I/O at one point, it can
- *   call this function to prevent the request_fn from being called until
- *   the driver has signalled it's ready to go again. This happens by calling
- *   blk_start_queue() to restart queue operations. Queue lock must be held.
- **/
-void blk_stop_queue(request_queue_t *q)
-{
-       blk_remove_plug(q);
-       set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
-}
-EXPORT_SYMBOL(blk_stop_queue);
-
-/**
- * blk_sync_queue - cancel any pending callbacks on a queue
- * @q: the queue
- *
- * Description:
- *     The block layer may perform asynchronous callback activity
- *     on a queue, such as calling the unplug function after a timeout.
- *     A block device may call blk_sync_queue to ensure that any
- *     such activity is cancelled, thus allowing it to release resources
- *     the the callbacks might use. The caller must already have made sure
- *     that its ->make_request_fn will not re-add plugging prior to calling
- *     this function.
- *
- */
-void blk_sync_queue(struct request_queue *q)
-{
-       del_timer_sync(&q->unplug_timer);
-       kblockd_flush();
-}
-EXPORT_SYMBOL(blk_sync_queue);
-
-/**
- * blk_run_queue - run a single device queue
- * @q: The queue to run
- */
-void blk_run_queue(struct request_queue *q)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       blk_remove_plug(q);
-       if (!elv_queue_empty(q))
-               q->request_fn(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_run_queue);
-
-/**
- * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
- * @q:    the request queue to be released
- *
- * Description:
- *     blk_cleanup_queue is the pair to blk_init_queue() or
- *     blk_queue_make_request().  It should be called when a request queue is
- *     being released; typically when a block device is being de-registered.
- *     Currently, its primary task it to free all the &struct request
- *     structures that were allocated to the queue and the queue itself.
- *
- * Caveat:
- *     Hopefully the low level driver will have finished any
- *     outstanding requests first...
- **/
-void blk_cleanup_queue(request_queue_t * q)
-{
-       struct request_list *rl = &q->rq;
-
-       if (!atomic_dec_and_test(&q->refcnt))
-               return;
-
-       if (q->elevator)
-               elevator_exit(q->elevator);
-
-       blk_sync_queue(q);
-
-       if (rl->rq_pool)
-               mempool_destroy(rl->rq_pool);
-
-       if (q->queue_tags)
-               __blk_queue_free_tags(q);
-
-       blk_queue_ordered(q, QUEUE_ORDERED_NONE);
-
-       kmem_cache_free(requestq_cachep, q);
-}
-
-EXPORT_SYMBOL(blk_cleanup_queue);
-
-static int blk_init_free_list(request_queue_t *q)
-{
-       struct request_list *rl = &q->rq;
-
-       rl->count[READ] = rl->count[WRITE] = 0;
-       rl->starved[READ] = rl->starved[WRITE] = 0;
-       rl->elvpriv = 0;
-       init_waitqueue_head(&rl->wait[READ]);
-       init_waitqueue_head(&rl->wait[WRITE]);
-
-       rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
-                               mempool_free_slab, request_cachep, q->node);
-
-       if (!rl->rq_pool)
-               return -ENOMEM;
-
-       return 0;
-}
-
-static int __make_request(request_queue_t *, struct bio *);
-
-request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
-{
-       return blk_alloc_queue_node(gfp_mask, -1);
-}
-EXPORT_SYMBOL(blk_alloc_queue);
-
-request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
-{
-       request_queue_t *q;
-
-       q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
-       if (!q)
-               return NULL;
-
-       memset(q, 0, sizeof(*q));
-       init_timer(&q->unplug_timer);
-       atomic_set(&q->refcnt, 1);
-
-       q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
-       q->backing_dev_info.unplug_io_data = q;
-
-       return q;
-}
-EXPORT_SYMBOL(blk_alloc_queue_node);
-
-/**
- * blk_init_queue  - prepare a request queue for use with a block device
- * @rfn:  The function to be called to process requests that have been
- *        placed on the queue.
- * @lock: Request queue spin lock
- *
- * Description:
- *    If a block device wishes to use the standard request handling procedures,
- *    which sorts requests and coalesces adjacent requests, then it must
- *    call blk_init_queue().  The function @rfn will be called when there
- *    are requests on the queue that need to be processed.  If the device
- *    supports plugging, then @rfn may not be called immediately when requests
- *    are available on the queue, but may be called at some time later instead.
- *    Plugged queues are generally unplugged when a buffer belonging to one
- *    of the requests on the queue is needed, or due to memory pressure.
- *
- *    @rfn is not required, or even expected, to remove all requests off the
- *    queue, but only as many as it can handle at a time.  If it does leave
- *    requests on the queue, it is responsible for arranging that the requests
- *    get dealt with eventually.
- *
- *    The queue spin lock must be held while manipulating the requests on the
- *    request queue.
- *
- *    Function returns a pointer to the initialized request queue, or NULL if
- *    it didn't succeed.
- *
- * Note:
- *    blk_init_queue() must be paired with a blk_cleanup_queue() call
- *    when the block device is deactivated (such as at module unload).
- **/
-
-request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
-{
-       return blk_init_queue_node(rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_queue);
-
-request_queue_t *
-blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
-{
-       request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
-
-       if (!q)
-               return NULL;
-
-       q->node = node_id;
-       if (blk_init_free_list(q))
-               goto out_init;
-
-       /*
-        * if caller didn't supply a lock, they get per-queue locking with
-        * our embedded lock
-        */
-       if (!lock) {
-               spin_lock_init(&q->__queue_lock);
-               lock = &q->__queue_lock;
-       }
-
-       q->request_fn           = rfn;
-       q->back_merge_fn        = ll_back_merge_fn;
-       q->front_merge_fn       = ll_front_merge_fn;
-       q->merge_requests_fn    = ll_merge_requests_fn;
-       q->prep_rq_fn           = NULL;
-       q->unplug_fn            = generic_unplug_device;
-       q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
-       q->queue_lock           = lock;
-
-       blk_queue_segment_boundary(q, 0xffffffff);
-
-       blk_queue_make_request(q, __make_request);
-       blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
-
-       blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
-       blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
-
-       /*
-        * all done
-        */
-       if (!elevator_init(q, NULL)) {
-               blk_queue_congestion_threshold(q);
-               return q;
-       }
-
-       blk_cleanup_queue(q);
-out_init:
-       kmem_cache_free(requestq_cachep, q);
-       return NULL;
-}
-EXPORT_SYMBOL(blk_init_queue_node);
-
-int blk_get_queue(request_queue_t *q)
-{
-       if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-               atomic_inc(&q->refcnt);
-               return 0;
-       }
-
-       return 1;
-}
-
-EXPORT_SYMBOL(blk_get_queue);
-
-static inline void blk_free_request(request_queue_t *q, struct request *rq)
-{
-       if (rq->flags & REQ_ELVPRIV)
-               elv_put_request(q, rq);
-       mempool_free(rq, q->rq.rq_pool);
-}
-
-static inline struct request *
-blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
-                 int priv, gfp_t gfp_mask)
-{
-       struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
-
-       if (!rq)
-               return NULL;
-
-       /*
-        * first three bits are identical in rq->flags and bio->bi_rw,
-        * see bio.h and blkdev.h
-        */
-       rq->flags = rw;
-
-       if (priv) {
-               if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
-                       mempool_free(rq, q->rq.rq_pool);
-                       return NULL;
-               }
-               rq->flags |= REQ_ELVPRIV;
-       }
-
-       return rq;
-}
-
-/*
- * ioc_batching returns true if the ioc is a valid batching request and
- * should be given priority access to a request.
- */
-static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
-{
-       if (!ioc)
-               return 0;
-
-       /*
-        * Make sure the process is able to allocate at least 1 request
-        * even if the batch times out, otherwise we could theoretically
-        * lose wakeups.
-        */
-       return ioc->nr_batch_requests == q->nr_batching ||
-               (ioc->nr_batch_requests > 0
-               && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
-}
-
-/*
- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
- * will cause the process to be a "batcher" on all queues in the system. This
- * is the behaviour we want though - once it gets a wakeup it should be given
- * a nice run.
- */
-static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
-{
-       if (!ioc || ioc_batching(q, ioc))
-               return;
-
-       ioc->nr_batch_requests = q->nr_batching;
-       ioc->last_waited = jiffies;
-}
-
-static void __freed_request(request_queue_t *q, int rw)
-{
-       struct request_list *rl = &q->rq;
-
-       if (rl->count[rw] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, rw);
-
-       if (rl->count[rw] + 1 <= q->nr_requests) {
-               if (waitqueue_active(&rl->wait[rw]))
-                       wake_up(&rl->wait[rw]);
-
-               blk_clear_queue_full(q, rw);
-       }
-}
-
-/*
- * A request has just been released.  Account for it, update the full and
- * congestion status, wake up any waiters.   Called under q->queue_lock.
- */
-static void freed_request(request_queue_t *q, int rw, int priv)
-{
-       struct request_list *rl = &q->rq;
-
-       rl->count[rw]--;
-       if (priv)
-               rl->elvpriv--;
-
-       __freed_request(q, rw);
-
-       if (unlikely(rl->starved[rw ^ 1]))
-               __freed_request(q, rw ^ 1);
-}
-
-#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
-/*
- * Get a free request, queue_lock must be held.
- * Returns NULL on failure, with queue_lock held.
- * Returns !NULL on success, with queue_lock *not held*.
- */
-static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
-                                  gfp_t gfp_mask)
-{
-       struct request *rq = NULL;
-       struct request_list *rl = &q->rq;
-       struct io_context *ioc = current_io_context(GFP_ATOMIC);
-       int priv;
-
-       if (rl->count[rw]+1 >= q->nr_requests) {
-               /*
-                * The queue will fill after this allocation, so set it as
-                * full, and mark this process as "batching". This process
-                * will be allowed to complete a batch of requests, others
-                * will be blocked.
-                */
-               if (!blk_queue_full(q, rw)) {
-                       ioc_set_batching(q, ioc);
-                       blk_set_queue_full(q, rw);
-               }
-       }
-
-       switch (elv_may_queue(q, rw, bio)) {
-               case ELV_MQUEUE_NO:
-                       goto rq_starved;
-               case ELV_MQUEUE_MAY:
-                       break;
-               case ELV_MQUEUE_MUST:
-                       goto get_rq;
-       }
-
-       if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
-               /*
-                * The queue is full and the allocating process is not a
-                * "batcher", and not exempted by the IO scheduler
-                */
-               goto out;
-       }
-
-get_rq:
-       /*
-        * Only allow batching queuers to allocate up to 50% over the defined
-        * limit of requests, otherwise we could have thousands of requests
-        * allocated with any setting of ->nr_requests
-        */
-       if (rl->count[rw] >= (3 * q->nr_requests / 2))
-               goto out;
-
-       rl->count[rw]++;
-       rl->starved[rw] = 0;
-       if (rl->count[rw] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, rw);
-
-       priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-       if (priv)
-               rl->elvpriv++;
-
-       spin_unlock_irq(q->queue_lock);
-
-       rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
-       if (!rq) {
-               /*
-                * Allocation failed presumably due to memory. Undo anything
-                * we might have messed up.
-                *
-                * Allocating task should really be put onto the front of the
-                * wait queue, but this is pretty rare.
-                */
-               spin_lock_irq(q->queue_lock);
-               freed_request(q, rw, priv);
-
-               /*
-                * in the very unlikely event that allocation failed and no
-                * requests for this direction was pending, mark us starved
-                * so that freeing of a request in the other direction will
-                * notice us. another possible fix would be to split the
-                * rq mempool into READ and WRITE
-                */
-rq_starved:
-               if (unlikely(rl->count[rw] == 0))
-                       rl->starved[rw] = 1;
-
-               goto out;
-       }
-
-       if (ioc_batching(q, ioc))
-               ioc->nr_batch_requests--;
-       
-       rq_init(q, rq);
-       rq->rl = rl;
-out:
-       return rq;
-}
-
-/*
- * No available requests for this queue, unplug the device and wait for some
- * requests to become available.
- *
- * Called with q->queue_lock held, and returns with it unlocked.
- */
-static struct request *get_request_wait(request_queue_t *q, int rw,
-                                       struct bio *bio)
-{
-       struct request *rq;
-
-       rq = get_request(q, rw, bio, GFP_NOIO);
-       while (!rq) {
-               DEFINE_WAIT(wait);
-               struct request_list *rl = &q->rq;
-
-               prepare_to_wait_exclusive(&rl->wait[rw], &wait,
-                               TASK_UNINTERRUPTIBLE);
-
-               rq = get_request(q, rw, bio, GFP_NOIO);
-
-               if (!rq) {
-                       struct io_context *ioc;
-
-                       __generic_unplug_device(q);
-                       spin_unlock_irq(q->queue_lock);
-                       io_schedule();
-
-                       /*
-                        * After sleeping, we become a "batching" process and
-                        * will be able to allocate at least one request, and
-                        * up to a big batch of them for a small period time.
-                        * See ioc_batching, ioc_set_batching
-                        */
-                       ioc = current_io_context(GFP_NOIO);
-                       ioc_set_batching(q, ioc);
-
-                       spin_lock_irq(q->queue_lock);
-               }
-               finish_wait(&rl->wait[rw], &wait);
-       }
-
-       return rq;
-}
-
-struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
-{
-       struct request *rq;
-
-       BUG_ON(rw != READ && rw != WRITE);
-
-       spin_lock_irq(q->queue_lock);
-       if (gfp_mask & __GFP_WAIT) {
-               rq = get_request_wait(q, rw, NULL);
-       } else {
-               rq = get_request(q, rw, NULL, gfp_mask);
-               if (!rq)
-                       spin_unlock_irq(q->queue_lock);
-       }
-       /* q->queue_lock is unlocked at this point */
-
-       return rq;
-}
-EXPORT_SYMBOL(blk_get_request);
-
-/**
- * blk_requeue_request - put a request back on queue
- * @q:         request queue where request should be inserted
- * @rq:                request to be inserted
- *
- * Description:
- *    Drivers often keep queueing requests until the hardware cannot accept
- *    more, when that condition happens we need to put the request back
- *    on the queue. Must be called with queue lock held.
- */
-void blk_requeue_request(request_queue_t *q, struct request *rq)
-{
-       if (blk_rq_tagged(rq))
-               blk_queue_end_tag(q, rq);
-
-       elv_requeue_request(q, rq);
-}
-
-EXPORT_SYMBOL(blk_requeue_request);
-
-/**
- * blk_insert_request - insert a special request in to a request queue
- * @q:         request queue where request should be inserted
- * @rq:                request to be inserted
- * @at_head:   insert request at head or tail of queue
- * @data:      private data
- *
- * Description:
- *    Many block devices need to execute commands asynchronously, so they don't
- *    block the whole kernel from preemption during request execution.  This is
- *    accomplished normally by inserting aritficial requests tagged as
- *    REQ_SPECIAL in to the corresponding request queue, and letting them be
- *    scheduled for actual execution by the request queue.
- *
- *    We have the option of inserting the head or the tail of the queue.
- *    Typically we use the tail for new ioctls and so forth.  We use the head
- *    of the queue for things like a QUEUE_FULL message from a device, or a
- *    host that is unable to accept a particular command.
- */
-void blk_insert_request(request_queue_t *q, struct request *rq,
-                       int at_head, void *data)
-{
-       int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
-       unsigned long flags;
-
-       /*
-        * tell I/O scheduler that this isn't a regular read/write (ie it
-        * must not attempt merges on this) and that it acts as a soft
-        * barrier
-        */
-       rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
-
-       rq->special = data;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       /*
-        * If command is tagged, release the tag
-        */
-       if (blk_rq_tagged(rq))
-               blk_queue_end_tag(q, rq);
-
-       drive_stat_acct(rq, rq->nr_sectors, 1);
-       __elv_add_request(q, rq, where, 0);
-
-       if (blk_queue_plugged(q))
-               __generic_unplug_device(q);
-       else
-               q->request_fn(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-EXPORT_SYMBOL(blk_insert_request);
-
-/**
- * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
- * @q:         request queue where request should be inserted
- * @rq:                request structure to fill
- * @ubuf:      the user buffer
- * @len:       length of user data
- *
- * Description:
- *    Data will be mapped directly for zero copy io, if possible. Otherwise
- *    a kernel bounce buffer is used.
- *
- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
- *    still in process context.
- *
- *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
- *    before being submitted to the device, as pages mapped may be out of
- *    reach. It's the callers responsibility to make sure this happens. The
- *    original bio must be passed back in to blk_rq_unmap_user() for proper
- *    unmapping.
- */
-int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
-                   unsigned int len)
-{
-       unsigned long uaddr;
-       struct bio *bio;
-       int reading;
-
-       if (len > (q->max_sectors << 9))
-               return -EINVAL;
-       if (!len || !ubuf)
-               return -EINVAL;
-
-       reading = rq_data_dir(rq) == READ;
-
-       /*
-        * if alignment requirement is satisfied, map in user pages for
-        * direct dma. else, set up kernel bounce buffers
-        */
-       uaddr = (unsigned long) ubuf;
-       if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
-               bio = bio_map_user(q, NULL, uaddr, len, reading);
-       else
-               bio = bio_copy_user(q, uaddr, len, reading);
-
-       if (!IS_ERR(bio)) {
-               rq->bio = rq->biotail = bio;
-               blk_rq_bio_prep(q, rq, bio);
-
-               rq->buffer = rq->data = NULL;
-               rq->data_len = len;
-               return 0;
-       }
-
-       /*
-        * bio is the err-ptr
-        */
-       return PTR_ERR(bio);
-}
-
-EXPORT_SYMBOL(blk_rq_map_user);
-
-/**
- * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
- * @q:         request queue where request should be inserted
- * @rq:                request to map data to
- * @iov:       pointer to the iovec
- * @iov_count: number of elements in the iovec
- *
- * Description:
- *    Data will be mapped directly for zero copy io, if possible. Otherwise
- *    a kernel bounce buffer is used.
- *
- *    A matching blk_rq_unmap_user() must be issued at the end of io, while
- *    still in process context.
- *
- *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
- *    before being submitted to the device, as pages mapped may be out of
- *    reach. It's the callers responsibility to make sure this happens. The
- *    original bio must be passed back in to blk_rq_unmap_user() for proper
- *    unmapping.
- */
-int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
-                       struct sg_iovec *iov, int iov_count)
-{
-       struct bio *bio;
-
-       if (!iov || iov_count <= 0)
-               return -EINVAL;
-
-       /* we don't allow misaligned data like bio_map_user() does.  If the
-        * user is using sg, they're expected to know the alignment constraints
-        * and respect them accordingly */
-       bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
-       if (IS_ERR(bio))
-               return PTR_ERR(bio);
-
-       rq->bio = rq->biotail = bio;
-       blk_rq_bio_prep(q, rq, bio);
-       rq->buffer = rq->data = NULL;
-       rq->data_len = bio->bi_size;
-       return 0;
-}
-
-EXPORT_SYMBOL(blk_rq_map_user_iov);
-
-/**
- * blk_rq_unmap_user - unmap a request with user data
- * @bio:       bio to be unmapped
- * @ulen:      length of user buffer
- *
- * Description:
- *    Unmap a bio previously mapped by blk_rq_map_user().
- */
-int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
-{
-       int ret = 0;
-
-       if (bio) {
-               if (bio_flagged(bio, BIO_USER_MAPPED))
-                       bio_unmap_user(bio);
-               else
-                       ret = bio_uncopy_user(bio);
-       }
-
-       return 0;
-}
-
-EXPORT_SYMBOL(blk_rq_unmap_user);
-
-/**
- * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
- * @q:         request queue where request should be inserted
- * @rq:                request to fill
- * @kbuf:      the kernel buffer
- * @len:       length of user data
- * @gfp_mask:  memory allocation flags
- */
-int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
-                   unsigned int len, gfp_t gfp_mask)
-{
-       struct bio *bio;
-
-       if (len > (q->max_sectors << 9))
-               return -EINVAL;
-       if (!len || !kbuf)
-               return -EINVAL;
-
-       bio = bio_map_kern(q, kbuf, len, gfp_mask);
-       if (IS_ERR(bio))
-               return PTR_ERR(bio);
-
-       if (rq_data_dir(rq) == WRITE)
-               bio->bi_rw |= (1 << BIO_RW);
-
-       rq->bio = rq->biotail = bio;
-       blk_rq_bio_prep(q, rq, bio);
-
-       rq->buffer = rq->data = NULL;
-       rq->data_len = len;
-       return 0;
-}
-
-EXPORT_SYMBOL(blk_rq_map_kern);
-
-/**
- * blk_execute_rq_nowait - insert a request into queue for execution
- * @q:         queue to insert the request in
- * @bd_disk:   matching gendisk
- * @rq:                request to insert
- * @at_head:    insert request at head or tail of queue
- * @done:      I/O completion handler
- *
- * Description:
- *    Insert a fully prepared request at the back of the io scheduler queue
- *    for execution.  Don't wait for completion.
- */
-void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
-                          struct request *rq, int at_head,
-                          void (*done)(struct request *))
-{
-       int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
-
-       rq->rq_disk = bd_disk;
-       rq->flags |= REQ_NOMERGE;
-       rq->end_io = done;
-       elv_add_request(q, rq, where, 1);
-       generic_unplug_device(q);
-}
-
-/**
- * blk_execute_rq - insert a request into queue for execution
- * @q:         queue to insert the request in
- * @bd_disk:   matching gendisk
- * @rq:                request to insert
- * @at_head:    insert request at head or tail of queue
- *
- * Description:
- *    Insert a fully prepared request at the back of the io scheduler queue
- *    for execution and wait for completion.
- */
-int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
-                  struct request *rq, int at_head)
-{
-       DECLARE_COMPLETION(wait);
-       char sense[SCSI_SENSE_BUFFERSIZE];
-       int err = 0;
-
-       /*
-        * we need an extra reference to the request, so we can look at
-        * it after io completion
-        */
-       rq->ref_count++;
-
-       if (!rq->sense) {
-               memset(sense, 0, sizeof(sense));
-               rq->sense = sense;
-               rq->sense_len = 0;
-       }
-
-       rq->waiting = &wait;
-       blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
-       wait_for_completion(&wait);
-       rq->waiting = NULL;
-
-       if (rq->errors)
-               err = -EIO;
-
-       return err;
-}
-
-EXPORT_SYMBOL(blk_execute_rq);
-
-/**
- * blkdev_issue_flush - queue a flush
- * @bdev:      blockdev to issue flush for
- * @error_sector:      error sector
- *
- * Description:
- *    Issue a flush for the block device in question. Caller can supply
- *    room for storing the error offset in case of a flush error, if they
- *    wish to.  Caller must run wait_for_completion() on its own.
- */
-int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
-{
-       request_queue_t *q;
-
-       if (bdev->bd_disk == NULL)
-               return -ENXIO;
-
-       q = bdev_get_queue(bdev);
-       if (!q)
-               return -ENXIO;
-       if (!q->issue_flush_fn)
-               return -EOPNOTSUPP;
-
-       return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
-}
-
-EXPORT_SYMBOL(blkdev_issue_flush);
-
-static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
-{
-       int rw = rq_data_dir(rq);
-
-       if (!blk_fs_request(rq) || !rq->rq_disk)
-               return;
-
-       if (!new_io) {
-               __disk_stat_inc(rq->rq_disk, merges[rw]);
-       } else {
-               disk_round_stats(rq->rq_disk);
-               rq->rq_disk->in_flight++;
-       }
-}
-
-/*
- * add-request adds a request to the linked list.
- * queue lock is held and interrupts disabled, as we muck with the
- * request queue list.
- */
-static inline void add_request(request_queue_t * q, struct request * req)
-{
-       drive_stat_acct(req, req->nr_sectors, 1);
-
-       if (q->activity_fn)
-               q->activity_fn(q->activity_data, rq_data_dir(req));
-
-       /*
-        * elevator indicated where it wants this request to be
-        * inserted at elevator_merge time
-        */
-       __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
-}
-/*
- * disk_round_stats()  - Round off the performance stats on a struct
- * disk_stats.
- *
- * The average IO queue length and utilisation statistics are maintained
- * by observing the current state of the queue length and the amount of
- * time it has been in this state for.
- *
- * Normally, that accounting is done on IO completion, but that can result
- * in more than a second's worth of IO being accounted for within any one
- * second, leading to >100% utilisation.  To deal with that, we call this
- * function to do a round-off before returning the results when reading
- * /proc/diskstats.  This accounts immediately for all queue usage up to
- * the current jiffies and restarts the counters again.
- */
-void disk_round_stats(struct gendisk *disk)
-{
-       unsigned long now = jiffies;
-
-       if (now == disk->stamp)
-               return;
-
-       if (disk->in_flight) {
-               __disk_stat_add(disk, time_in_queue,
-                               disk->in_flight * (now - disk->stamp));
-               __disk_stat_add(disk, io_ticks, (now - disk->stamp));
-       }
-       disk->stamp = now;
-}
-
-/*
- * queue lock must be held
- */
-static void __blk_put_request(request_queue_t *q, struct request *req)
-{
-       struct request_list *rl = req->rl;
-
-       if (unlikely(!q))
-               return;
-       if (unlikely(--req->ref_count))
-               return;
-
-       elv_completed_request(q, req);
-
-       req->rq_status = RQ_INACTIVE;
-       req->rl = NULL;
-
-       /*
-        * Request may not have originated from ll_rw_blk. if not,
-        * it didn't come out of our reserved rq pools
-        */
-       if (rl) {
-               int rw = rq_data_dir(req);
-               int priv = req->flags & REQ_ELVPRIV;
-
-               BUG_ON(!list_empty(&req->queuelist));
-
-               blk_free_request(q, req);
-               freed_request(q, rw, priv);
-       }
-}
-
-void blk_put_request(struct request *req)
-{
-       unsigned long flags;
-       request_queue_t *q = req->q;
-
-       /*
-        * Gee, IDE calls in w/ NULL q.  Fix IDE and remove the
-        * following if (q) test.
-        */
-       if (q) {
-               spin_lock_irqsave(q->queue_lock, flags);
-               __blk_put_request(q, req);
-               spin_unlock_irqrestore(q->queue_lock, flags);
-       }
-}
-
-EXPORT_SYMBOL(blk_put_request);
-
-/**
- * blk_end_sync_rq - executes a completion event on a request
- * @rq: request to complete
- */
-void blk_end_sync_rq(struct request *rq)
-{
-       struct completion *waiting = rq->waiting;
-
-       rq->waiting = NULL;
-       __blk_put_request(rq->q, rq);
-
-       /*
-        * complete last, if this is a stack request the process (and thus
-        * the rq pointer) could be invalid right after this complete()
-        */
-       complete(waiting);
-}
-EXPORT_SYMBOL(blk_end_sync_rq);
-
-/**
- * blk_congestion_wait - wait for a queue to become uncongested
- * @rw: READ or WRITE
- * @timeout: timeout in jiffies
- *
- * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
- * If no queues are congested then just wait for the next request to be
- * returned.
- */
-long blk_congestion_wait(int rw, long timeout)
-{
-       long ret;
-       DEFINE_WAIT(wait);
-       wait_queue_head_t *wqh = &congestion_wqh[rw];
-
-       prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
-       ret = io_schedule_timeout(timeout);
-       finish_wait(wqh, &wait);
-       return ret;
-}
-
-EXPORT_SYMBOL(blk_congestion_wait);
-
-/*
- * Has to be called with the request spinlock acquired
- */
-static int attempt_merge(request_queue_t *q, struct request *req,
-                         struct request *next)
-{
-       if (!rq_mergeable(req) || !rq_mergeable(next))
-               return 0;
-
-       /*
-        * not contigious
-        */
-       if (req->sector + req->nr_sectors != next->sector)
-               return 0;
-
-       if (rq_data_dir(req) != rq_data_dir(next)
-           || req->rq_disk != next->rq_disk
-           || next->waiting || next->special)
-               return 0;
-
-       /*
-        * If we are allowed to merge, then append bio list
-        * from next to rq and release next. merge_requests_fn
-        * will have updated segment counts, update sector
-        * counts here.
-        */
-       if (!q->merge_requests_fn(q, req, next))
-               return 0;
-
-       /*
-        * At this point we have either done a back merge
-        * or front merge. We need the smaller start_time of
-        * the merged requests to be the current request
-        * for accounting purposes.
-        */
-       if (time_after(req->start_time, next->start_time))
-               req->start_time = next->start_time;
-
-       req->biotail->bi_next = next->bio;
-       req->biotail = next->biotail;
-
-       req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
-
-       elv_merge_requests(q, req, next);
-
-       if (req->rq_disk) {
-               disk_round_stats(req->rq_disk);
-               req->rq_disk->in_flight--;
-       }
-
-       req->ioprio = ioprio_best(req->ioprio, next->ioprio);
-
-       __blk_put_request(q, next);
-       return 1;
-}
-
-static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
-{
-       struct request *next = elv_latter_request(q, rq);
-
-       if (next)
-               return attempt_merge(q, rq, next);
-
-       return 0;
-}
-
-static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
-{
-       struct request *prev = elv_former_request(q, rq);
-
-       if (prev)
-               return attempt_merge(q, prev, rq);
-
-       return 0;
-}
-
-/**
- * blk_attempt_remerge  - attempt to remerge active head with next request
- * @q:    The &request_queue_t belonging to the device
- * @rq:   The head request (usually)
- *
- * Description:
- *    For head-active devices, the queue can easily be unplugged so quickly
- *    that proper merging is not done on the front request. This may hurt
- *    performance greatly for some devices. The block layer cannot safely
- *    do merging on that first request for these queues, but the driver can
- *    call this function and make it happen any way. Only the driver knows
- *    when it is safe to do so.
- **/
-void blk_attempt_remerge(request_queue_t *q, struct request *rq)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       attempt_back_merge(q, rq);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-EXPORT_SYMBOL(blk_attempt_remerge);
-
-static int __make_request(request_queue_t *q, struct bio *bio)
-{
-       struct request *req;
-       int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
-       unsigned short prio;
-       sector_t sector;
-
-       sector = bio->bi_sector;
-       nr_sectors = bio_sectors(bio);
-       cur_nr_sectors = bio_cur_sectors(bio);
-       prio = bio_prio(bio);
-
-       rw = bio_data_dir(bio);
-       sync = bio_sync(bio);
-
-       /*
-        * low level driver can indicate that it wants pages above a
-        * certain limit bounced to low memory (ie for highmem, or even
-        * ISA dma in theory)
-        */
-       blk_queue_bounce(q, &bio);
-
-       spin_lock_prefetch(q->queue_lock);
-
-       barrier = bio_barrier(bio);
-       if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
-       spin_lock_irq(q->queue_lock);
-
-       if (unlikely(barrier) || elv_queue_empty(q))
-               goto get_rq;
-
-       el_ret = elv_merge(q, &req, bio);
-       switch (el_ret) {
-               case ELEVATOR_BACK_MERGE:
-                       BUG_ON(!rq_mergeable(req));
-
-                       if (!q->back_merge_fn(q, req, bio))
-                               break;
-
-                       req->biotail->bi_next = bio;
-                       req->biotail = bio;
-                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-                       req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, nr_sectors, 0);
-                       if (!attempt_back_merge(q, req))
-                               elv_merged_request(q, req);
-                       goto out;
-
-               case ELEVATOR_FRONT_MERGE:
-                       BUG_ON(!rq_mergeable(req));
-
-                       if (!q->front_merge_fn(q, req, bio))
-                               break;
-
-                       bio->bi_next = req->bio;
-                       req->bio = bio;
-
-                       /*
-                        * may not be valid. if the low level driver said
-                        * it didn't need a bounce buffer then it better
-                        * not touch req->buffer either...
-                        */
-                       req->buffer = bio_data(bio);
-                       req->current_nr_sectors = cur_nr_sectors;
-                       req->hard_cur_sectors = cur_nr_sectors;
-                       req->sector = req->hard_sector = sector;
-                       req->nr_sectors = req->hard_nr_sectors += nr_sectors;
-                       req->ioprio = ioprio_best(req->ioprio, prio);
-                       drive_stat_acct(req, nr_sectors, 0);
-                       if (!attempt_front_merge(q, req))
-                               elv_merged_request(q, req);
-                       goto out;
-
-               /* ELV_NO_MERGE: elevator says don't/can't merge. */
-               default:
-                       ;
-       }
-
-get_rq:
-       /*
-        * Grab a free request. This is might sleep but can not fail.
-        * Returns with the queue unlocked.
-        */
-       req = get_request_wait(q, rw, bio);
-
-       /*
-        * After dropping the lock and possibly sleeping here, our request
-        * may now be mergeable after it had proven unmergeable (above).
-        * We don't worry about that case for efficiency. It won't happen
-        * often, and the elevators are able to handle it.
-        */
-
-       req->flags |= REQ_CMD;
-
-       /*
-        * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
-        */
-       if (bio_rw_ahead(bio) || bio_failfast(bio))
-               req->flags |= REQ_FAILFAST;
-
-       /*
-        * REQ_BARRIER implies no merging, but lets make it explicit
-        */
-       if (unlikely(barrier))
-               req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
-
-       req->errors = 0;
-       req->hard_sector = req->sector = sector;
-       req->hard_nr_sectors = req->nr_sectors = nr_sectors;
-       req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
-       req->nr_phys_segments = bio_phys_segments(q, bio);
-       req->nr_hw_segments = bio_hw_segments(q, bio);
-       req->buffer = bio_data(bio);    /* see ->buffer comment above */
-       req->waiting = NULL;
-       req->bio = req->biotail = bio;
-       req->ioprio = prio;
-       req->rq_disk = bio->bi_bdev->bd_disk;
-       req->start_time = jiffies;
-
-       spin_lock_irq(q->queue_lock);
-       if (elv_queue_empty(q))
-               blk_plug_device(q);
-       add_request(q, req);
-out:
-       if (sync)
-               __generic_unplug_device(q);
-
-       spin_unlock_irq(q->queue_lock);
-       return 0;
-
-end_io:
-       bio_endio(bio, nr_sectors << 9, err);
-       return 0;
-}
-
-/*
- * If bio->bi_dev is a partition, remap the location
- */
-static inline void blk_partition_remap(struct bio *bio)
-{
-       struct block_device *bdev = bio->bi_bdev;
-
-       if (bdev != bdev->bd_contains) {
-               struct hd_struct *p = bdev->bd_part;
-               const int rw = bio_data_dir(bio);
-
-               p->sectors[rw] += bio_sectors(bio);
-               p->ios[rw]++;
-
-               bio->bi_sector += p->start_sect;
-               bio->bi_bdev = bdev->bd_contains;
-       }
-}
-
-static void handle_bad_sector(struct bio *bio)
-{
-       char b[BDEVNAME_SIZE];
-
-       printk(KERN_INFO "attempt to access beyond end of device\n");
-       printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
-                       bdevname(bio->bi_bdev, b),
-                       bio->bi_rw,
-                       (unsigned long long)bio->bi_sector + bio_sectors(bio),
-                       (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
-
-       set_bit(BIO_EOF, &bio->bi_flags);
-}
-
-/**
- * generic_make_request: hand a buffer to its device driver for I/O
- * @bio:  The bio describing the location in memory and on the device.
- *
- * generic_make_request() is used to make I/O requests of block
- * devices. It is passed a &struct bio, which describes the I/O that needs
- * to be done.
- *
- * generic_make_request() does not return any status.  The
- * success/failure status of the request, along with notification of
- * completion, is delivered asynchronously through the bio->bi_end_io
- * function described (one day) else where.
- *
- * The caller of generic_make_request must make sure that bi_io_vec
- * are set to describe the memory buffer, and that bi_dev and bi_sector are
- * set to describe the device address, and the
- * bi_end_io and optionally bi_private are set to describe how
- * completion notification should be signaled.
- *
- * generic_make_request and the drivers it calls may use bi_next if this
- * bio happens to be merged with someone else, and may change bi_dev and
- * bi_sector for remaps as it sees fit.  So the values of these fields
- * should NOT be depended on after the call to generic_make_request.
- */
-void generic_make_request(struct bio *bio)
-{
-       request_queue_t *q;
-       sector_t maxsector;
-       int ret, nr_sectors = bio_sectors(bio);
-
-       might_sleep();
-       /* Test device or partition size, when known. */
-       maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-       if (maxsector) {
-               sector_t sector = bio->bi_sector;
-
-               if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
-                       /*
-                        * This may well happen - the kernel calls bread()
-                        * without checking the size of the device, e.g., when
-                        * mounting a device.
-                        */
-                       handle_bad_sector(bio);
-                       goto end_io;
-               }
-       }
-
-       /*
-        * Resolve the mapping until finished. (drivers are
-        * still free to implement/resolve their own stacking
-        * by explicitly returning 0)
-        *
-        * NOTE: we don't repeat the blk_size check for each new device.
-        * Stacking drivers are expected to know what they are doing.
-        */
-       do {
-               char b[BDEVNAME_SIZE];
-
-               q = bdev_get_queue(bio->bi_bdev);
-               if (!q) {
-                       printk(KERN_ERR
-                              "generic_make_request: Trying to access "
-                               "nonexistent block-device %s (%Lu)\n",
-                               bdevname(bio->bi_bdev, b),
-                               (long long) bio->bi_sector);
-end_io:
-                       bio_endio(bio, bio->bi_size, -EIO);
-                       break;
-               }
-
-               if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
-                       printk("bio too big device %s (%u > %u)\n", 
-                               bdevname(bio->bi_bdev, b),
-                               bio_sectors(bio),
-                               q->max_hw_sectors);
-                       goto end_io;
-               }
-
-               if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
-                       goto end_io;
-
-               /*
-                * If this device has partitions, remap block n
-                * of partition p to block n+start(p) of the disk.
-                */
-               blk_partition_remap(bio);
-
-               ret = q->make_request_fn(q, bio);
-       } while (ret);
-}
-
-EXPORT_SYMBOL(generic_make_request);
-
-/**
- * submit_bio: submit a bio to the block device layer for I/O
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
- * @bio: The &struct bio which describes the I/O
- *
- * submit_bio() is very similar in purpose to generic_make_request(), and
- * uses that function to do most of the work. Both are fairly rough
- * interfaces, @bio must be presetup and ready for I/O.
- *
- */
-void submit_bio(int rw, struct bio *bio)
-{
-       int count = bio_sectors(bio);
-
-       BIO_BUG_ON(!bio->bi_size);
-       BIO_BUG_ON(!bio->bi_io_vec);
-       bio->bi_rw |= rw;
-       if (rw & WRITE)
-               mod_page_state(pgpgout, count);
-       else
-               mod_page_state(pgpgin, count);
-
-       if (unlikely(block_dump)) {
-               char b[BDEVNAME_SIZE];
-               printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
-                       current->comm, current->pid,
-                       (rw & WRITE) ? "WRITE" : "READ",
-                       (unsigned long long)bio->bi_sector,
-                       bdevname(bio->bi_bdev,b));
-       }
-
-       generic_make_request(bio);
-}
-
-EXPORT_SYMBOL(submit_bio);
-
-static void blk_recalc_rq_segments(struct request *rq)
-{
-       struct bio *bio, *prevbio = NULL;
-       int nr_phys_segs, nr_hw_segs;
-       unsigned int phys_size, hw_size;
-       request_queue_t *q = rq->q;
-
-       if (!rq->bio)
-               return;
-
-       phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
-       rq_for_each_bio(bio, rq) {
-               /* Force bio hw/phys segs to be recalculated. */
-               bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-
-               nr_phys_segs += bio_phys_segments(q, bio);
-               nr_hw_segs += bio_hw_segments(q, bio);
-               if (prevbio) {
-                       int pseg = phys_size + prevbio->bi_size + bio->bi_size;
-                       int hseg = hw_size + prevbio->bi_size + bio->bi_size;
-
-                       if (blk_phys_contig_segment(q, prevbio, bio) &&
-                           pseg <= q->max_segment_size) {
-                               nr_phys_segs--;
-                               phys_size += prevbio->bi_size + bio->bi_size;
-                       } else
-                               phys_size = 0;
-
-                       if (blk_hw_contig_segment(q, prevbio, bio) &&
-                           hseg <= q->max_segment_size) {
-                               nr_hw_segs--;
-                               hw_size += prevbio->bi_size + bio->bi_size;
-                       } else
-                               hw_size = 0;
-               }
-               prevbio = bio;
-       }
-
-       rq->nr_phys_segments = nr_phys_segs;
-       rq->nr_hw_segments = nr_hw_segs;
-}
-
-static void blk_recalc_rq_sectors(struct request *rq, int nsect)
-{
-       if (blk_fs_request(rq)) {
-               rq->hard_sector += nsect;
-               rq->hard_nr_sectors -= nsect;
-
-               /*
-                * Move the I/O submission pointers ahead if required.
-                */
-               if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
-                   (rq->sector <= rq->hard_sector)) {
-                       rq->sector = rq->hard_sector;
-                       rq->nr_sectors = rq->hard_nr_sectors;
-                       rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
-                       rq->current_nr_sectors = rq->hard_cur_sectors;
-                       rq->buffer = bio_data(rq->bio);
-               }
-
-               /*
-                * if total number of sectors is less than the first segment
-                * size, something has gone terribly wrong
-                */
-               if (rq->nr_sectors < rq->current_nr_sectors) {
-                       printk("blk: request botched\n");
-                       rq->nr_sectors = rq->current_nr_sectors;
-               }
-       }
-}
-
-static int __end_that_request_first(struct request *req, int uptodate,
-                                   int nr_bytes)
-{
-       int total_bytes, bio_nbytes, error, next_idx = 0;
-       struct bio *bio;
-
-       /*
-        * extend uptodate bool to allow < 0 value to be direct io error
-        */
-       error = 0;
-       if (end_io_error(uptodate))
-               error = !uptodate ? -EIO : uptodate;
-
-       /*
-        * for a REQ_BLOCK_PC request, we want to carry any eventual
-        * sense key with us all the way through
-        */
-       if (!blk_pc_request(req))
-               req->errors = 0;
-
-       if (!uptodate) {
-               if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
-                       printk("end_request: I/O error, dev %s, sector %llu\n",
-                               req->rq_disk ? req->rq_disk->disk_name : "?",
-                               (unsigned long long)req->sector);
-       }
-
-       if (blk_fs_request(req) && req->rq_disk) {
-               const int rw = rq_data_dir(req);
-
-               __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
-       }
-
-       total_bytes = bio_nbytes = 0;
-       while ((bio = req->bio) != NULL) {
-               int nbytes;
-
-               if (nr_bytes >= bio->bi_size) {
-                       req->bio = bio->bi_next;
-                       nbytes = bio->bi_size;
-                       bio_endio(bio, nbytes, error);
-                       next_idx = 0;
-                       bio_nbytes = 0;
-               } else {
-                       int idx = bio->bi_idx + next_idx;
-
-                       if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
-                               blk_dump_rq_flags(req, "__end_that");
-                               printk("%s: bio idx %d >= vcnt %d\n",
-                                               __FUNCTION__,
-                                               bio->bi_idx, bio->bi_vcnt);
-                               break;
-                       }
-
-                       nbytes = bio_iovec_idx(bio, idx)->bv_len;
-                       BIO_BUG_ON(nbytes > bio->bi_size);
-
-                       /*
-                        * not a complete bvec done
-                        */
-                       if (unlikely(nbytes > nr_bytes)) {
-                               bio_nbytes += nr_bytes;
-                               total_bytes += nr_bytes;
-                               break;
-                       }
-
-                       /*
-                        * advance to the next vector
-                        */
-                       next_idx++;
-                       bio_nbytes += nbytes;
-               }
-
-               total_bytes += nbytes;
-               nr_bytes -= nbytes;
-
-               if ((bio = req->bio)) {
-                       /*
-                        * end more in this run, or just return 'not-done'
-                        */
-                       if (unlikely(nr_bytes <= 0))
-                               break;
-               }
-       }
-
-       /*
-        * completely done
-        */
-       if (!req->bio)
-               return 0;
-
-       /*
-        * if the request wasn't completed, update state
-        */
-       if (bio_nbytes) {
-               bio_endio(bio, bio_nbytes, error);
-               bio->bi_idx += next_idx;
-               bio_iovec(bio)->bv_offset += nr_bytes;
-               bio_iovec(bio)->bv_len -= nr_bytes;
-       }
-
-       blk_recalc_rq_sectors(req, total_bytes >> 9);
-       blk_recalc_rq_segments(req);
-       return 1;
-}
-
-/**
- * end_that_request_first - end I/O on a request
- * @req:      the request being processed
- * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
- * @nr_sectors: number of sectors to end I/O on
- *
- * Description:
- *     Ends I/O on a number of sectors attached to @req, and sets it up
- *     for the next range of segments (if any) in the cluster.
- *
- * Return:
- *     0 - we are done with this request, call end_that_request_last()
- *     1 - still buffers pending for this request
- **/
-int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
-{
-       return __end_that_request_first(req, uptodate, nr_sectors << 9);
-}
-
-EXPORT_SYMBOL(end_that_request_first);
-
-/**
- * end_that_request_chunk - end I/O on a request
- * @req:      the request being processed
- * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- *     Ends I/O on a number of bytes attached to @req, and sets it up
- *     for the next range of segments (if any). Like end_that_request_first(),
- *     but deals with bytes instead of sectors.
- *
- * Return:
- *     0 - we are done with this request, call end_that_request_last()
- *     1 - still buffers pending for this request
- **/
-int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
-{
-       return __end_that_request_first(req, uptodate, nr_bytes);
-}
-
-EXPORT_SYMBOL(end_that_request_chunk);
-
-/*
- * queue lock must be held
- */
-void end_that_request_last(struct request *req)
-{
-       struct gendisk *disk = req->rq_disk;
-
-       if (unlikely(laptop_mode) && blk_fs_request(req))
-               laptop_io_completion();
-
-       if (disk && blk_fs_request(req)) {
-               unsigned long duration = jiffies - req->start_time;
-               const int rw = rq_data_dir(req);
-
-               __disk_stat_inc(disk, ios[rw]);
-               __disk_stat_add(disk, ticks[rw], duration);
-               disk_round_stats(disk);
-               disk->in_flight--;
-       }
-       if (req->end_io)
-               req->end_io(req);
-       else
-               __blk_put_request(req->q, req);
-}
-
-EXPORT_SYMBOL(end_that_request_last);
-
-void end_request(struct request *req, int uptodate)
-{
-       if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
-               add_disk_randomness(req->rq_disk);
-               blkdev_dequeue_request(req);
-               end_that_request_last(req);
-       }
-}
-
-EXPORT_SYMBOL(end_request);
-
-void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
-{
-       /* first three bits are identical in rq->flags and bio->bi_rw */
-       rq->flags |= (bio->bi_rw & 7);
-
-       rq->nr_phys_segments = bio_phys_segments(q, bio);
-       rq->nr_hw_segments = bio_hw_segments(q, bio);
-       rq->current_nr_sectors = bio_cur_sectors(bio);
-       rq->hard_cur_sectors = rq->current_nr_sectors;
-       rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
-       rq->buffer = bio_data(bio);
-
-       rq->bio = rq->biotail = bio;
-}
-
-EXPORT_SYMBOL(blk_rq_bio_prep);
-
-int kblockd_schedule_work(struct work_struct *work)
-{
-       return queue_work(kblockd_workqueue, work);
-}
-
-EXPORT_SYMBOL(kblockd_schedule_work);
-
-void kblockd_flush(void)
-{
-       flush_workqueue(kblockd_workqueue);
-}
-EXPORT_SYMBOL(kblockd_flush);
-
-int __init blk_dev_init(void)
-{
-       kblockd_workqueue = create_workqueue("kblockd");
-       if (!kblockd_workqueue)
-               panic("Failed to create kblockd\n");
-
-       request_cachep = kmem_cache_create("blkdev_requests",
-                       sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
-
-       requestq_cachep = kmem_cache_create("blkdev_queue",
-                       sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
-
-       iocontext_cachep = kmem_cache_create("blkdev_ioc",
-                       sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
-
-       blk_max_low_pfn = max_low_pfn;
-       blk_max_pfn = max_pfn;
-
-       return 0;
-}
-
-/*
- * IO Context helper functions
- */
-void put_io_context(struct io_context *ioc)
-{
-       if (ioc == NULL)
-               return;
-
-       BUG_ON(atomic_read(&ioc->refcount) == 0);
-
-       if (atomic_dec_and_test(&ioc->refcount)) {
-               if (ioc->aic && ioc->aic->dtor)
-                       ioc->aic->dtor(ioc->aic);
-               if (ioc->cic && ioc->cic->dtor)
-                       ioc->cic->dtor(ioc->cic);
-
-               kmem_cache_free(iocontext_cachep, ioc);
-       }
-}
-EXPORT_SYMBOL(put_io_context);
-
-/* Called by the exitting task */
-void exit_io_context(void)
-{
-       unsigned long flags;
-       struct io_context *ioc;
-
-       local_irq_save(flags);
-       task_lock(current);
-       ioc = current->io_context;
-       current->io_context = NULL;
-       ioc->task = NULL;
-       task_unlock(current);
-       local_irq_restore(flags);
-
-       if (ioc->aic && ioc->aic->exit)
-               ioc->aic->exit(ioc->aic);
-       if (ioc->cic && ioc->cic->exit)
-               ioc->cic->exit(ioc->cic);
-
-       put_io_context(ioc);
-}
-
-/*
- * If the current task has no IO context then create one and initialise it.
- * Otherwise, return its existing IO context.
- *
- * This returned IO context doesn't have a specifically elevated refcount,
- * but since the current task itself holds a reference, the context can be
- * used in general code, so long as it stays within `current` context.
- */
-struct io_context *current_io_context(gfp_t gfp_flags)
-{
-       struct task_struct *tsk = current;
-       struct io_context *ret;
-
-       ret = tsk->io_context;
-       if (likely(ret))
-               return ret;
-
-       ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
-       if (ret) {
-               atomic_set(&ret->refcount, 1);
-               ret->task = current;
-               ret->set_ioprio = NULL;
-               ret->last_waited = jiffies; /* doesn't matter... */
-               ret->nr_batch_requests = 0; /* because this is 0 */
-               ret->aic = NULL;
-               ret->cic = NULL;
-               tsk->io_context = ret;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(current_io_context);
-
-/*
- * If the current task has no IO context then create one and initialise it.
- * If it does have a context, take a ref on it.
- *
- * This is always called in the context of the task which submitted the I/O.
- */
-struct io_context *get_io_context(gfp_t gfp_flags)
-{
-       struct io_context *ret;
-       ret = current_io_context(gfp_flags);
-       if (likely(ret))
-               atomic_inc(&ret->refcount);
-       return ret;
-}
-EXPORT_SYMBOL(get_io_context);
-
-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
-{
-       struct io_context *src = *psrc;
-       struct io_context *dst = *pdst;
-
-       if (src) {
-               BUG_ON(atomic_read(&src->refcount) == 0);
-               atomic_inc(&src->refcount);
-               put_io_context(dst);
-               *pdst = src;
-       }
-}
-EXPORT_SYMBOL(copy_io_context);
-
-void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
-{
-       struct io_context *temp;
-       temp = *ioc1;
-       *ioc1 = *ioc2;
-       *ioc2 = temp;
-}
-EXPORT_SYMBOL(swap_io_context);
-
-/*
- * sysfs parts below
- */
-struct queue_sysfs_entry {
-       struct attribute attr;
-       ssize_t (*show)(struct request_queue *, char *);
-       ssize_t (*store)(struct request_queue *, const char *, size_t);
-};
-
-static ssize_t
-queue_var_show(unsigned int var, char *page)
-{
-       return sprintf(page, "%d\n", var);
-}
-
-static ssize_t
-queue_var_store(unsigned long *var, const char *page, size_t count)
-{
-       char *p = (char *) page;
-
-       *var = simple_strtoul(p, &p, 10);
-       return count;
-}
-
-static ssize_t queue_requests_show(struct request_queue *q, char *page)
-{
-       return queue_var_show(q->nr_requests, (page));
-}
-
-static ssize_t
-queue_requests_store(struct request_queue *q, const char *page, size_t count)
-{
-       struct request_list *rl = &q->rq;
-
-       int ret = queue_var_store(&q->nr_requests, page, count);
-       if (q->nr_requests < BLKDEV_MIN_RQ)
-               q->nr_requests = BLKDEV_MIN_RQ;
-       blk_queue_congestion_threshold(q);
-
-       if (rl->count[READ] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, READ);
-       else if (rl->count[READ] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, READ);
-
-       if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, WRITE);
-       else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
-               clear_queue_congested(q, WRITE);
-
-       if (rl->count[READ] >= q->nr_requests) {
-               blk_set_queue_full(q, READ);
-       } else if (rl->count[READ]+1 <= q->nr_requests) {
-               blk_clear_queue_full(q, READ);
-               wake_up(&rl->wait[READ]);
-       }
-
-       if (rl->count[WRITE] >= q->nr_requests) {
-               blk_set_queue_full(q, WRITE);
-       } else if (rl->count[WRITE]+1 <= q->nr_requests) {
-               blk_clear_queue_full(q, WRITE);
-               wake_up(&rl->wait[WRITE]);
-       }
-       return ret;
-}
-
-static ssize_t queue_ra_show(struct request_queue *q, char *page)
-{
-       int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
-
-       return queue_var_show(ra_kb, (page));
-}
-
-static ssize_t
-queue_ra_store(struct request_queue *q, const char *page, size_t count)
-{
-       unsigned long ra_kb;
-       ssize_t ret = queue_var_store(&ra_kb, page, count);
-
-       spin_lock_irq(q->queue_lock);
-       if (ra_kb > (q->max_sectors >> 1))
-               ra_kb = (q->max_sectors >> 1);
-
-       q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
-       spin_unlock_irq(q->queue_lock);
-
-       return ret;
-}
-
-static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
-{
-       int max_sectors_kb = q->max_sectors >> 1;
-
-       return queue_var_show(max_sectors_kb, (page));
-}
-
-static ssize_t
-queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
-{
-       unsigned long max_sectors_kb,
-                       max_hw_sectors_kb = q->max_hw_sectors >> 1,
-                       page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
-       ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
-       int ra_kb;
-
-       if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
-               return -EINVAL;
-       /*
-        * Take the queue lock to update the readahead and max_sectors
-        * values synchronously:
-        */
-       spin_lock_irq(q->queue_lock);
-       /*
-        * Trim readahead window as well, if necessary:
-        */
-       ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
-       if (ra_kb > max_sectors_kb)
-               q->backing_dev_info.ra_pages =
-                               max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
-
-       q->max_sectors = max_sectors_kb << 1;
-       spin_unlock_irq(q->queue_lock);
-
-       return ret;
-}
-
-static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
-{
-       int max_hw_sectors_kb = q->max_hw_sectors >> 1;
-
-       return queue_var_show(max_hw_sectors_kb, (page));
-}
-
-
-static struct queue_sysfs_entry queue_requests_entry = {
-       .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
-       .show = queue_requests_show,
-       .store = queue_requests_store,
-};
-
-static struct queue_sysfs_entry queue_ra_entry = {
-       .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
-       .show = queue_ra_show,
-       .store = queue_ra_store,
-};
-
-static struct queue_sysfs_entry queue_max_sectors_entry = {
-       .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
-       .show = queue_max_sectors_show,
-       .store = queue_max_sectors_store,
-};
-
-static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
-       .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
-       .show = queue_max_hw_sectors_show,
-};
-
-static struct queue_sysfs_entry queue_iosched_entry = {
-       .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
-       .show = elv_iosched_show,
-       .store = elv_iosched_store,
-};
-
-static struct attribute *default_attrs[] = {
-       &queue_requests_entry.attr,
-       &queue_ra_entry.attr,
-       &queue_max_hw_sectors_entry.attr,
-       &queue_max_sectors_entry.attr,
-       &queue_iosched_entry.attr,
-       NULL,
-};
-
-#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
-
-static ssize_t
-queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-       struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
-
-       q = container_of(kobj, struct request_queue, kobj);
-       if (!entry->show)
-               return -EIO;
-
-       return entry->show(q, page);
-}
-
-static ssize_t
-queue_attr_store(struct kobject *kobj, struct attribute *attr,
-                   const char *page, size_t length)
-{
-       struct queue_sysfs_entry *entry = to_queue(attr);
-       struct request_queue *q;
-
-       q = container_of(kobj, struct request_queue, kobj);
-       if (!entry->store)
-               return -EIO;
-
-       return entry->store(q, page, length);
-}
-
-static struct sysfs_ops queue_sysfs_ops = {
-       .show   = queue_attr_show,
-       .store  = queue_attr_store,
-};
-
-static struct kobj_type queue_ktype = {
-       .sysfs_ops      = &queue_sysfs_ops,
-       .default_attrs  = default_attrs,
-};
-
-int blk_register_queue(struct gendisk *disk)
-{
-       int ret;
-
-       request_queue_t *q = disk->queue;
-
-       if (!q || !q->request_fn)
-               return -ENXIO;
-
-       q->kobj.parent = kobject_get(&disk->kobj);
-       if (!q->kobj.parent)
-               return -EBUSY;
-
-       snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
-       q->kobj.ktype = &queue_ktype;
-
-       ret = kobject_register(&q->kobj);
-       if (ret < 0)
-               return ret;
-
-       ret = elv_register_queue(q);
-       if (ret) {
-               kobject_unregister(&q->kobj);
-               return ret;
-       }
-
-       return 0;
-}
-
-void blk_unregister_queue(struct gendisk *disk)
-{
-       request_queue_t *q = disk->queue;
-
-       if (q && q->request_fn) {
-               elv_unregister_queue(q);
-
-               kobject_unregister(&q->kobj);
-               kobject_put(&disk->kobj);
-       }
-}
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c
deleted file mode 100644 (file)
index e54f006..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * elevator noop
- */
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-static void elevator_noop_add_request(request_queue_t *q, struct request *rq)
-{
-       rq->flags |= REQ_NOMERGE;
-       elv_dispatch_add_tail(q, rq);
-}
-
-static int elevator_noop_dispatch(request_queue_t *q, int force)
-{
-       return 0;
-}
-
-static struct elevator_type elevator_noop = {
-       .ops = {
-               .elevator_dispatch_fn           = elevator_noop_dispatch,
-               .elevator_add_req_fn            = elevator_noop_add_request,
-       },
-       .elevator_name = "noop",
-       .elevator_owner = THIS_MODULE,
-};
-
-static int __init noop_init(void)
-{
-       return elv_register(&elevator_noop);
-}
-
-static void __exit noop_exit(void)
-{
-       elv_unregister(&elevator_noop);
-}
-
-module_init(noop_init);
-module_exit(noop_exit);
-
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
deleted file mode 100644 (file)
index 382dea7..0000000
+++ /dev/null
@@ -1,589 +0,0 @@
-/*
- * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
- *
- */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/completion.h>
-#include <linux/cdrom.h>
-#include <linux/slab.h>
-#include <linux/times.h>
-#include <asm/uaccess.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_cmnd.h>
-
-/* Command group 3 is reserved and should never be used.  */
-const unsigned char scsi_command_size[8] =
-{
-       6, 10, 10, 12,
-       16, 12, 10, 10
-};
-
-EXPORT_SYMBOL(scsi_command_size);
-
-#define BLK_DEFAULT_TIMEOUT    (60 * HZ)
-
-#include <scsi/sg.h>
-
-static int sg_get_version(int __user *p)
-{
-       static int sg_version_num = 30527;
-       return put_user(sg_version_num, p);
-}
-
-static int scsi_get_idlun(request_queue_t *q, int __user *p)
-{
-       return put_user(0, p);
-}
-
-static int scsi_get_bus(request_queue_t *q, int __user *p)
-{
-       return put_user(0, p);
-}
-
-static int sg_get_timeout(request_queue_t *q)
-{
-       return q->sg_timeout / (HZ / USER_HZ);
-}
-
-static int sg_set_timeout(request_queue_t *q, int __user *p)
-{
-       int timeout, err = get_user(timeout, p);
-
-       if (!err)
-               q->sg_timeout = timeout * (HZ / USER_HZ);
-
-       return err;
-}
-
-static int sg_get_reserved_size(request_queue_t *q, int __user *p)
-{
-       return put_user(q->sg_reserved_size, p);
-}
-
-static int sg_set_reserved_size(request_queue_t *q, int __user *p)
-{
-       int size, err = get_user(size, p);
-
-       if (err)
-               return err;
-
-       if (size < 0)
-               return -EINVAL;
-       if (size > (q->max_sectors << 9))
-               size = q->max_sectors << 9;
-
-       q->sg_reserved_size = size;
-       return 0;
-}
-
-/*
- * will always return that we are ATAPI even for a real SCSI drive, I'm not
- * so sure this is worth doing anything about (why would you care??)
- */
-static int sg_emulated_host(request_queue_t *q, int __user *p)
-{
-       return put_user(1, p);
-}
-
-#define CMD_READ_SAFE  0x01
-#define CMD_WRITE_SAFE 0x02
-#define CMD_WARNED     0x04
-#define safe_for_read(cmd)     [cmd] = CMD_READ_SAFE
-#define safe_for_write(cmd)    [cmd] = CMD_WRITE_SAFE
-
-static int verify_command(struct file *file, unsigned char *cmd)
-{
-       static unsigned char cmd_type[256] = {
-
-               /* Basic read-only commands */
-               safe_for_read(TEST_UNIT_READY),
-               safe_for_read(REQUEST_SENSE),
-               safe_for_read(READ_6),
-               safe_for_read(READ_10),
-               safe_for_read(READ_12),
-               safe_for_read(READ_16),
-               safe_for_read(READ_BUFFER),
-               safe_for_read(READ_DEFECT_DATA),
-               safe_for_read(READ_LONG),
-               safe_for_read(INQUIRY),
-               safe_for_read(MODE_SENSE),
-               safe_for_read(MODE_SENSE_10),
-               safe_for_read(LOG_SENSE),
-               safe_for_read(START_STOP),
-               safe_for_read(GPCMD_VERIFY_10),
-               safe_for_read(VERIFY_16),
-
-               /* Audio CD commands */
-               safe_for_read(GPCMD_PLAY_CD),
-               safe_for_read(GPCMD_PLAY_AUDIO_10),
-               safe_for_read(GPCMD_PLAY_AUDIO_MSF),
-               safe_for_read(GPCMD_PLAY_AUDIO_TI),
-               safe_for_read(GPCMD_PAUSE_RESUME),
-
-               /* CD/DVD data reading */
-               safe_for_read(GPCMD_READ_BUFFER_CAPACITY),
-               safe_for_read(GPCMD_READ_CD),
-               safe_for_read(GPCMD_READ_CD_MSF),
-               safe_for_read(GPCMD_READ_DISC_INFO),
-               safe_for_read(GPCMD_READ_CDVD_CAPACITY),
-               safe_for_read(GPCMD_READ_DVD_STRUCTURE),
-               safe_for_read(GPCMD_READ_HEADER),
-               safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
-               safe_for_read(GPCMD_READ_SUBCHANNEL),
-               safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
-               safe_for_read(GPCMD_REPORT_KEY),
-               safe_for_read(GPCMD_SCAN),
-               safe_for_read(GPCMD_GET_CONFIGURATION),
-               safe_for_read(GPCMD_READ_FORMAT_CAPACITIES),
-               safe_for_read(GPCMD_GET_EVENT_STATUS_NOTIFICATION),
-               safe_for_read(GPCMD_GET_PERFORMANCE),
-               safe_for_read(GPCMD_SEEK),
-               safe_for_read(GPCMD_STOP_PLAY_SCAN),
-
-               /* Basic writing commands */
-               safe_for_write(WRITE_6),
-               safe_for_write(WRITE_10),
-               safe_for_write(WRITE_VERIFY),
-               safe_for_write(WRITE_12),
-               safe_for_write(WRITE_VERIFY_12),
-               safe_for_write(WRITE_16),
-               safe_for_write(WRITE_LONG),
-               safe_for_write(WRITE_LONG_2),
-               safe_for_write(ERASE),
-               safe_for_write(GPCMD_MODE_SELECT_10),
-               safe_for_write(MODE_SELECT),
-               safe_for_write(LOG_SELECT),
-               safe_for_write(GPCMD_BLANK),
-               safe_for_write(GPCMD_CLOSE_TRACK),
-               safe_for_write(GPCMD_FLUSH_CACHE),
-               safe_for_write(GPCMD_FORMAT_UNIT),
-               safe_for_write(GPCMD_REPAIR_RZONE_TRACK),
-               safe_for_write(GPCMD_RESERVE_RZONE_TRACK),
-               safe_for_write(GPCMD_SEND_DVD_STRUCTURE),
-               safe_for_write(GPCMD_SEND_EVENT),
-               safe_for_write(GPCMD_SEND_KEY),
-               safe_for_write(GPCMD_SEND_OPC),
-               safe_for_write(GPCMD_SEND_CUE_SHEET),
-               safe_for_write(GPCMD_SET_SPEED),
-               safe_for_write(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL),
-               safe_for_write(GPCMD_LOAD_UNLOAD),
-               safe_for_write(GPCMD_SET_STREAMING),
-       };
-       unsigned char type = cmd_type[cmd[0]];
-
-       /* Anybody who can open the device can do a read-safe command */
-       if (type & CMD_READ_SAFE)
-               return 0;
-
-       /* Write-safe commands just require a writable open.. */
-       if (type & CMD_WRITE_SAFE) {
-               if (file->f_mode & FMODE_WRITE)
-                       return 0;
-       }
-
-       /* And root can do any command.. */
-       if (capable(CAP_SYS_RAWIO))
-               return 0;
-
-       if (!type) {
-               cmd_type[cmd[0]] = CMD_WARNED;
-               printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]);
-       }
-
-       /* Otherwise fail it with an "Operation not permitted" */
-       return -EPERM;
-}
-
-static int sg_io(struct file *file, request_queue_t *q,
-               struct gendisk *bd_disk, struct sg_io_hdr *hdr)
-{
-       unsigned long start_time;
-       int writing = 0, ret = 0;
-       struct request *rq;
-       struct bio *bio;
-       char sense[SCSI_SENSE_BUFFERSIZE];
-       unsigned char cmd[BLK_MAX_CDB];
-
-       if (hdr->interface_id != 'S')
-               return -EINVAL;
-       if (hdr->cmd_len > BLK_MAX_CDB)
-               return -EINVAL;
-       if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
-               return -EFAULT;
-       if (verify_command(file, cmd))
-               return -EPERM;
-
-       if (hdr->dxfer_len > (q->max_sectors << 9))
-               return -EIO;
-
-       if (hdr->dxfer_len)
-               switch (hdr->dxfer_direction) {
-               default:
-                       return -EINVAL;
-               case SG_DXFER_TO_FROM_DEV:
-               case SG_DXFER_TO_DEV:
-                       writing = 1;
-                       break;
-               case SG_DXFER_FROM_DEV:
-                       break;
-               }
-
-       rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
-       if (!rq)
-               return -ENOMEM;
-
-       if (hdr->iovec_count) {
-               const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
-               struct sg_iovec *iov;
-
-               iov = kmalloc(size, GFP_KERNEL);
-               if (!iov) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               if (copy_from_user(iov, hdr->dxferp, size)) {
-                       kfree(iov);
-                       ret = -EFAULT;
-                       goto out;
-               }
-
-               ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
-               kfree(iov);
-       } else if (hdr->dxfer_len)
-               ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
-
-       if (ret)
-               goto out;
-
-       /*
-        * fill in request structure
-        */
-       rq->cmd_len = hdr->cmd_len;
-       memcpy(rq->cmd, cmd, hdr->cmd_len);
-       if (sizeof(rq->cmd) != hdr->cmd_len)
-               memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
-
-       memset(sense, 0, sizeof(sense));
-       rq->sense = sense;
-       rq->sense_len = 0;
-
-       rq->flags |= REQ_BLOCK_PC;
-       bio = rq->bio;
-
-       /*
-        * bounce this after holding a reference to the original bio, it's
-        * needed for proper unmapping
-        */
-       if (rq->bio)
-               blk_queue_bounce(q, &rq->bio);
-
-       rq->timeout = (hdr->timeout * HZ) / 1000;
-       if (!rq->timeout)
-               rq->timeout = q->sg_timeout;
-       if (!rq->timeout)
-               rq->timeout = BLK_DEFAULT_TIMEOUT;
-
-       start_time = jiffies;
-
-       /* ignore return value. All information is passed back to caller
-        * (if he doesn't check that is his problem).
-        * N.B. a non-zero SCSI status is _not_ necessarily an error.
-        */
-       blk_execute_rq(q, bd_disk, rq, 0);
-
-       /* write to all output members */
-       hdr->status = 0xff & rq->errors;
-       hdr->masked_status = status_byte(rq->errors);
-       hdr->msg_status = msg_byte(rq->errors);
-       hdr->host_status = host_byte(rq->errors);
-       hdr->driver_status = driver_byte(rq->errors);
-       hdr->info = 0;
-       if (hdr->masked_status || hdr->host_status || hdr->driver_status)
-               hdr->info |= SG_INFO_CHECK;
-       hdr->resid = rq->data_len;
-       hdr->duration = ((jiffies - start_time) * 1000) / HZ;
-       hdr->sb_len_wr = 0;
-
-       if (rq->sense_len && hdr->sbp) {
-               int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
-
-               if (!copy_to_user(hdr->sbp, rq->sense, len))
-                       hdr->sb_len_wr = len;
-       }
-
-       if (blk_rq_unmap_user(bio, hdr->dxfer_len))
-               ret = -EFAULT;
-
-       /* may not have succeeded, but output values written to control
-        * structure (struct sg_io_hdr).  */
-out:
-       blk_put_request(rq);
-       return ret;
-}
-
-#define OMAX_SB_LEN 16          /* For backward compatibility */
-
-static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
-                        struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
-{
-       struct request *rq;
-       int err;
-       unsigned int in_len, out_len, bytes, opcode, cmdlen;
-       char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
-
-       /*
-        * get in an out lengths, verify they don't exceed a page worth of data
-        */
-       if (get_user(in_len, &sic->inlen))
-               return -EFAULT;
-       if (get_user(out_len, &sic->outlen))
-               return -EFAULT;
-       if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
-               return -EINVAL;
-       if (get_user(opcode, sic->data))
-               return -EFAULT;
-
-       bytes = max(in_len, out_len);
-       if (bytes) {
-               buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
-               if (!buffer)
-                       return -ENOMEM;
-
-               memset(buffer, 0, bytes);
-       }
-
-       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
-
-       cmdlen = COMMAND_SIZE(opcode);
-
-       /*
-        * get command and data to send to device, if any
-        */
-       err = -EFAULT;
-       rq->cmd_len = cmdlen;
-       if (copy_from_user(rq->cmd, sic->data, cmdlen))
-               goto error;
-
-       if (copy_from_user(buffer, sic->data + cmdlen, in_len))
-               goto error;
-
-       err = verify_command(file, rq->cmd);
-       if (err)
-               goto error;
-
-       switch (opcode) {
-               case SEND_DIAGNOSTIC:
-               case FORMAT_UNIT:
-                       rq->timeout = FORMAT_UNIT_TIMEOUT;
-                       break;
-               case START_STOP:
-                       rq->timeout = START_STOP_TIMEOUT;
-                       break;
-               case MOVE_MEDIUM:
-                       rq->timeout = MOVE_MEDIUM_TIMEOUT;
-                       break;
-               case READ_ELEMENT_STATUS:
-                       rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
-                       break;
-               case READ_DEFECT_DATA:
-                       rq->timeout = READ_DEFECT_DATA_TIMEOUT;
-                       break;
-               default:
-                       rq->timeout = BLK_DEFAULT_TIMEOUT;
-                       break;
-       }
-
-       memset(sense, 0, sizeof(sense));
-       rq->sense = sense;
-       rq->sense_len = 0;
-
-       rq->data = buffer;
-       rq->data_len = bytes;
-       rq->flags |= REQ_BLOCK_PC;
-
-       blk_execute_rq(q, bd_disk, rq, 0);
-       err = rq->errors & 0xff;        /* only 8 bit SCSI status */
-       if (err) {
-               if (rq->sense_len && rq->sense) {
-                       bytes = (OMAX_SB_LEN > rq->sense_len) ?
-                               rq->sense_len : OMAX_SB_LEN;
-                       if (copy_to_user(sic->data, rq->sense, bytes))
-                               err = -EFAULT;
-               }
-       } else {
-               if (copy_to_user(sic->data, buffer, out_len))
-                       err = -EFAULT;
-       }
-       
-error:
-       kfree(buffer);
-       blk_put_request(rq);
-       return err;
-}
-
-int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
-{
-       request_queue_t *q;
-       struct request *rq;
-       int close = 0, err;
-
-       q = bd_disk->queue;
-       if (!q)
-               return -ENXIO;
-
-       if (blk_get_queue(q))
-               return -ENXIO;
-
-       switch (cmd) {
-               /*
-                * new sgv3 interface
-                */
-               case SG_GET_VERSION_NUM:
-                       err = sg_get_version(arg);
-                       break;
-               case SCSI_IOCTL_GET_IDLUN:
-                       err = scsi_get_idlun(q, arg);
-                       break;
-               case SCSI_IOCTL_GET_BUS_NUMBER:
-                       err = scsi_get_bus(q, arg);
-                       break;
-               case SG_SET_TIMEOUT:
-                       err = sg_set_timeout(q, arg);
-                       break;
-               case SG_GET_TIMEOUT:
-                       err = sg_get_timeout(q);
-                       break;
-               case SG_GET_RESERVED_SIZE:
-                       err = sg_get_reserved_size(q, arg);
-                       break;
-               case SG_SET_RESERVED_SIZE:
-                       err = sg_set_reserved_size(q, arg);
-                       break;
-               case SG_EMULATED_HOST:
-                       err = sg_emulated_host(q, arg);
-                       break;
-               case SG_IO: {
-                       struct sg_io_hdr hdr;
-
-                       err = -EFAULT;
-                       if (copy_from_user(&hdr, arg, sizeof(hdr)))
-                               break;
-                       err = sg_io(file, q, bd_disk, &hdr);
-                       if (err == -EFAULT)
-                               break;
-
-                       if (copy_to_user(arg, &hdr, sizeof(hdr)))
-                               err = -EFAULT;
-                       break;
-               }
-               case CDROM_SEND_PACKET: {
-                       struct cdrom_generic_command cgc;
-                       struct sg_io_hdr hdr;
-
-                       err = -EFAULT;
-                       if (copy_from_user(&cgc, arg, sizeof(cgc)))
-                               break;
-                       cgc.timeout = clock_t_to_jiffies(cgc.timeout);
-                       memset(&hdr, 0, sizeof(hdr));
-                       hdr.interface_id = 'S';
-                       hdr.cmd_len = sizeof(cgc.cmd);
-                       hdr.dxfer_len = cgc.buflen;
-                       err = 0;
-                       switch (cgc.data_direction) {
-                               case CGC_DATA_UNKNOWN:
-                                       hdr.dxfer_direction = SG_DXFER_UNKNOWN;
-                                       break;
-                               case CGC_DATA_WRITE:
-                                       hdr.dxfer_direction = SG_DXFER_TO_DEV;
-                                       break;
-                               case CGC_DATA_READ:
-                                       hdr.dxfer_direction = SG_DXFER_FROM_DEV;
-                                       break;
-                               case CGC_DATA_NONE:
-                                       hdr.dxfer_direction = SG_DXFER_NONE;
-                                       break;
-                               default:
-                                       err = -EINVAL;
-                       }
-                       if (err)
-                               break;
-
-                       hdr.dxferp = cgc.buffer;
-                       hdr.sbp = cgc.sense;
-                       if (hdr.sbp)
-                               hdr.mx_sb_len = sizeof(struct request_sense);
-                       hdr.timeout = cgc.timeout;
-                       hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
-                       hdr.cmd_len = sizeof(cgc.cmd);
-
-                       err = sg_io(file, q, bd_disk, &hdr);
-                       if (err == -EFAULT)
-                               break;
-
-                       if (hdr.status)
-                               err = -EIO;
-
-                       cgc.stat = err;
-                       cgc.buflen = hdr.resid;
-                       if (copy_to_user(arg, &cgc, sizeof(cgc)))
-                               err = -EFAULT;
-
-                       break;
-               }
-
-               /*
-                * old junk scsi send command ioctl
-                */
-               case SCSI_IOCTL_SEND_COMMAND:
-                       printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
-                       err = -EINVAL;
-                       if (!arg)
-                               break;
-
-                       err = sg_scsi_ioctl(file, q, bd_disk, arg);
-                       break;
-               case CDROMCLOSETRAY:
-                       close = 1;
-               case CDROMEJECT:
-                       rq = blk_get_request(q, WRITE, __GFP_WAIT);
-                       rq->flags |= REQ_BLOCK_PC;
-                       rq->data = NULL;
-                       rq->data_len = 0;
-                       rq->timeout = BLK_DEFAULT_TIMEOUT;
-                       memset(rq->cmd, 0, sizeof(rq->cmd));
-                       rq->cmd[0] = GPCMD_START_STOP_UNIT;
-                       rq->cmd[4] = 0x02 + (close != 0);
-                       rq->cmd_len = 6;
-                       err = blk_execute_rq(q, bd_disk, rq, 0);
-                       blk_put_request(rq);
-                       break;
-               default:
-                       err = -ENOTTY;
-       }
-
-       blk_put_queue(q);
-       return err;
-}
-
-EXPORT_SYMBOL(scsi_cmd_ioctl);
index 3dcbd5bfd49843168d6b45f62d3ae5a7b8cfecdc..ea097e0a9c02964abbe421d5aaf3efc1459e4010 100644 (file)
@@ -501,3 +501,7 @@ config STOP_MACHINE
        help
          Need stop_machine() primitive.
 endmenu
+
+menu "Block layer"
+source "block/Kconfig"
+endmenu