]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
block: get rid of QUEUE_FLAG_REENTER
authorJens Axboe <jaxboe@fusionio.com>
Tue, 19 Apr 2011 11:32:46 +0000 (13:32 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Tue, 19 Apr 2011 11:32:46 +0000 (13:32 +0200)
We are currently using this flag to check whether it's safe
to call into ->request_fn(). If it is set, we punt to kblockd.
But we get a lot of false positives and excessive punts to
kblockd, which hurts performance.

The only real abuser of this infrastructure is SCSI. So export
the async queue run and convert SCSI over to use that. There's
room for improvement in that SCSI need not always use the async
call, but this fixes our performance issue and they can fix that
up in due time.

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
block/blk-core.c
block/blk.h
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
include/linux/blkdev.h

index 580eee5743e5a1d23feb83f15e6ffd904a84698c..40725b9091f15a4191e3e5fd247bcfbfa8be32bf 100644 (file)
@@ -303,15 +303,7 @@ void __blk_run_queue(struct request_queue *q)
        if (unlikely(blk_queue_stopped(q)))
                return;
 
-       /*
-        * Only recurse once to avoid overrunning the stack, let the unplug
-        * handling reinvoke the handler shortly if we already got there.
-        */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
-               q->request_fn(q);
-               queue_flag_clear(QUEUE_FLAG_REENTER, q);
-       } else
-               queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+       q->request_fn(q);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -328,6 +320,7 @@ void blk_run_queue_async(struct request_queue *q)
        if (likely(!blk_queue_stopped(q)))
                queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
+EXPORT_SYMBOL(blk_run_queue_async);
 
 /**
  * blk_run_queue - run a single device queue
index c9df8fc3c99979de8fe21f439b82989d1361e859..61263463e38e17be7c7f742f0bbe9233eec2ecd2 100644 (file)
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
 void blk_delete_timer(struct request *);
 void blk_add_timer(struct request *);
 void __generic_unplug_device(struct request_queue *);
-void blk_run_queue_async(struct request_queue *q);
 
 /*
  * Internal atomic flags for request handling
index ab55c2fa7ce209563f99691a7201122fb619fdd9..e9901b8f8443ec3cf6b75487cf36d57584bccd17 100644 (file)
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
        list_splice_init(&shost->starved_list, &starved_list);
 
        while (!list_empty(&starved_list)) {
-               int flagset;
-
                /*
                 * As long as shost is accepting commands and we have
                 * starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
                        continue;
                }
 
-               spin_unlock(shost->host_lock);
-
-               spin_lock(sdev->request_queue->queue_lock);
-               flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
-                               !test_bit(QUEUE_FLAG_REENTER,
-                                       &sdev->request_queue->queue_flags);
-               if (flagset)
-                       queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue);
-               if (flagset)
-                       queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
-               spin_unlock(sdev->request_queue->queue_lock);
-
-               spin_lock(shost->host_lock);
+               blk_run_queue_async(sdev->request_queue);
        }
        /* put any unprocessed entries back */
        list_splice(&starved_list, &shost->starved_list);
index 28c33506e4ada98b560f020410f9bcff803a074e..815069d13f9b57edc5ddaf22dbb223c452d74811 100644 (file)
@@ -3816,28 +3816,17 @@ fail_host_msg:
 static void
 fc_bsg_goose_queue(struct fc_rport *rport)
 {
-       int flagset;
-       unsigned long flags;
-
        if (!rport->rqst_q)
                return;
 
+       /*
+        * This get/put dance makes no sense
+        */
        get_device(&rport->dev);
-
-       spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
-       flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
-                 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
-       if (flagset)
-               queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q);
-       if (flagset)
-               queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
-       spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
-
+       blk_run_queue_async(rport->rqst_q);
        put_device(&rport->dev);
 }
 
-
 /**
  * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
  * @q:         rport request queue
index cbbfd98ad4a3f73f9f088a996236f0ae015eef7a..2ad95fa1d130f8c8f5cfd68e5fe7a0108aec31d9 100644 (file)
@@ -388,20 +388,19 @@ struct request_queue
 #define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
 #define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
 #define QUEUE_FLAG_DEAD                5       /* queue being torn down */
-#define QUEUE_FLAG_REENTER     6       /* Re-entrancy avoidance */
-#define QUEUE_FLAG_ELVSWITCH   7       /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI                8       /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES     9      /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP   10      /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO     11      /* fake timeout */
-#define QUEUE_FLAG_STACKABLE   12      /* supports request stacking */
-#define QUEUE_FLAG_NONROT      13      /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH   6       /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI                7       /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES     8      /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP   9       /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO     10      /* fake timeout */
+#define QUEUE_FLAG_STACKABLE   11      /* supports request stacking */
+#define QUEUE_FLAG_NONROT      12      /* non-rotational device (SSD) */
 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT     15      /* do IO stats */
-#define QUEUE_FLAG_DISCARD     16      /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES   17      /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM  18      /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD  19      /* supports SECDISCARD */
+#define QUEUE_FLAG_IO_STAT     13      /* do IO stats */
+#define QUEUE_FLAG_DISCARD     14      /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES   15      /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM  16      /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD  17      /* supports SECDISCARD */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -699,6 +698,7 @@ extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
 extern void __blk_run_queue(struct request_queue *q);
 extern void blk_run_queue(struct request_queue *);
+extern void blk_run_queue_async(struct request_queue *q);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
                           gfp_t);