]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/ll_rw_blk.c
[BLOCK] mark some block/ variables cons
[mirror_ubuntu-bionic-kernel.git] / block / ll_rw_blk.c
index 5f52e30b43f812c75cef8289571e9a1a35bd2808..e02c88ca8fb5fe5778dce66500c60e80ef0178aa 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  linux/drivers/block/ll_rw_blk.c
- *
  * Copyright (C) 1991, 1992 Linus Torvalds
  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
@@ -241,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
        q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
-       blk_queue_max_sectors(q, MAX_SECTORS);
+       blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
        blk_queue_hardsect_size(q, 512);
        blk_queue_dma_alignment(q, 511);
        blk_queue_congestion_threshold(q);
@@ -557,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
                printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
        }
 
-       q->max_sectors = q->max_hw_sectors = max_sectors;
+       if (BLK_DEF_MAX_SECTORS > max_sectors)
+               q->max_hw_sectors = q->max_sectors = max_sectors;
+       else {
+               q->max_sectors = BLK_DEF_MAX_SECTORS;
+               q->max_hw_sectors = max_sectors;
+       }
 }
 
 EXPORT_SYMBOL(blk_queue_max_sectors);
@@ -659,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
 {
        /* zero is "infinity" */
-       t->max_sectors = t->max_hw_sectors =
-               min_not_zero(t->max_sectors,b->max_sectors);
+       t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+       t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
 
        t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
        t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
@@ -1036,7 +1039,7 @@ void blk_queue_invalidate_tags(request_queue_t *q)
 
 EXPORT_SYMBOL(blk_queue_invalidate_tags);
 
-static char *rq_flags[] = {
+static const char * const rq_flags[] = {
        "REQ_RW",
        "REQ_FAILFAST",
        "REQ_SORTED",
@@ -1295,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q,
 static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
                            struct bio *bio)
 {
+       unsigned short max_sectors;
        int len;
 
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+       if (unlikely(blk_pc_request(req)))
+               max_sectors = q->max_hw_sectors;
+       else
+               max_sectors = q->max_sectors;
+
+       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
                req->flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -1327,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
 static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
                             struct bio *bio)
 {
+       unsigned short max_sectors;
        int len;
 
-       if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+       if (unlikely(blk_pc_request(req)))
+               max_sectors = q->max_hw_sectors;
+       else
+               max_sectors = q->max_sectors;
+
+
+       if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
                req->flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -1892,40 +1908,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
 {
        struct request *rq = NULL;
        struct request_list *rl = &q->rq;
-       struct io_context *ioc = current_io_context(GFP_ATOMIC);
-       int priv;
+       struct io_context *ioc = NULL;
+       int may_queue, priv;
 
-       if (rl->count[rw]+1 >= q->nr_requests) {
-               /*
-                * The queue will fill after this allocation, so set it as
-                * full, and mark this process as "batching". This process
-                * will be allowed to complete a batch of requests, others
-                * will be blocked.
-                */
-               if (!blk_queue_full(q, rw)) {
-                       ioc_set_batching(q, ioc);
-                       blk_set_queue_full(q, rw);
-               }
-       }
-
-       switch (elv_may_queue(q, rw, bio)) {
-               case ELV_MQUEUE_NO:
-                       goto rq_starved;
-               case ELV_MQUEUE_MAY:
-                       break;
-               case ELV_MQUEUE_MUST:
-                       goto get_rq;
-       }
+       may_queue = elv_may_queue(q, rw, bio);
+       if (may_queue == ELV_MQUEUE_NO)
+               goto rq_starved;
 
-       if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
-               /*
-                * The queue is full and the allocating process is not a
-                * "batcher", and not exempted by the IO scheduler
-                */
-               goto out;
+       if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
+               if (rl->count[rw]+1 >= q->nr_requests) {
+                       ioc = current_io_context(GFP_ATOMIC);
+                       /*
+                        * The queue will fill after this allocation, so set
+                        * it as full, and mark this process as "batching".
+                        * This process will be allowed to complete a batch of
+                        * requests, others will be blocked.
+                        */
+                       if (!blk_queue_full(q, rw)) {
+                               ioc_set_batching(q, ioc);
+                               blk_set_queue_full(q, rw);
+                       } else {
+                               if (may_queue != ELV_MQUEUE_MUST
+                                               && !ioc_batching(q, ioc)) {
+                                       /*
+                                        * The queue is full and the allocating
+                                        * process is not a "batcher", and not
+                                        * exempted by the IO scheduler
+                                        */
+                                       goto out;
+                               }
+                       }
+               }
+               set_queue_congested(q, rw);
        }
 
-get_rq:
        /*
         * Only allow batching queuers to allocate up to 50% over the defined
         * limit of requests, otherwise we could have thousands of requests
@@ -1936,8 +1952,6 @@ get_rq:
 
        rl->count[rw]++;
        rl->starved[rw] = 0;
-       if (rl->count[rw] >= queue_congestion_on_threshold(q))
-               set_queue_congested(q, rw);
 
        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
        if (priv)
@@ -1946,7 +1960,7 @@ get_rq:
        spin_unlock_irq(q->queue_lock);
 
        rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
-       if (!rq) {
+       if (unlikely(!rq)) {
                /*
                 * Allocation failed presumably due to memory. Undo anything
                 * we might have messed up.
@@ -1971,6 +1985,12 @@ rq_starved:
                goto out;
        }
 
+       /*
+        * ioc may be NULL here, and ioc_batching will be false. That's
+        * OK, if the queue is under the request limit then requests need
+        * not count toward the nr_batch_requests limit. There will always
+        * be some limit enforced by BLK_BATCH_TIME.
+        */
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
        
@@ -2146,7 +2166,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
        struct bio *bio;
        int reading;
 
-       if (len > (q->max_sectors << 9))
+       if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !ubuf)
                return -EINVAL;
@@ -2261,7 +2281,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
 {
        struct bio *bio;
 
-       if (len > (q->max_sectors << 9))
+       if (len > (q->max_hw_sectors << 9))
                return -EINVAL;
        if (!len || !kbuf)
                return -EINVAL;
@@ -2308,6 +2328,8 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
        generic_unplug_device(q);
 }
 
+EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+
 /**
  * blk_execute_rq - insert a request into queue for execution
  * @q:         queue to insert the request in
@@ -2446,7 +2468,7 @@ void disk_round_stats(struct gendisk *disk)
 /*
  * queue lock must be held
  */
-static void __blk_put_request(request_queue_t *q, struct request *req)
+void __blk_put_request(request_queue_t *q, struct request *req)
 {
        struct request_list *rl = req->rl;
 
@@ -2475,6 +2497,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
        }
 }
 
+EXPORT_SYMBOL_GPL(__blk_put_request);
+
 void blk_put_request(struct request *req)
 {
        unsigned long flags;