]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - block/blk-core.c
block: optionally merge discontiguous discard bios into a single request
[mirror_ubuntu-artful-kernel.git] / block / blk-core.c
index dcac0352c14c09acd6309bc8456ec02ca8633bf1..c0e4d41d3d3336d8a40c8d5fdd29325b0fce9fe2 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/ratelimit.h>
 #include <linux/pm_runtime.h>
 #include <linux/blk-cgroup.h>
+#include <linux/debugfs.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/block.h>
 #include "blk-mq-sched.h"
 #include "blk-wbt.h"
 
+#ifdef CONFIG_DEBUG_FS
+struct dentry *blk_debugfs_root;
+#endif
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
@@ -105,22 +110,6 @@ void blk_queue_congestion_threshold(struct request_queue *q)
        q->nr_congestion_off = nr;
 }
 
-/**
- * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
- * @bdev:      device
- *
- * Locates the passed device's request queue and returns the address of its
- * backing_dev_info.  This function can only be called if @bdev is opened
- * and the return value is never NULL.
- */
-struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
-{
-       struct request_queue *q = bdev_get_queue(bdev);
-
-       return q->backing_dev_info;
-}
-EXPORT_SYMBOL(blk_get_backing_dev_info);
-
 void blk_rq_init(struct request_queue *q, struct request *rq)
 {
        memset(rq, 0, sizeof(*rq));
@@ -588,6 +577,7 @@ void blk_cleanup_queue(struct request_queue *q)
        spin_unlock_irq(lock);
 
        bdi_unregister(q->backing_dev_info);
+       put_disk_devt(q->disk_devt);
 
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
@@ -713,7 +703,6 @@ static void blk_rq_timed_out_timer(unsigned long data)
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        struct request_queue *q;
-       int err;
 
        q = kmem_cache_alloc_node(blk_requestq_cachep,
                                gfp_mask | __GFP_ZERO, node_id);
@@ -728,17 +717,16 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        if (!q->bio_split)
                goto fail_id;
 
-       q->backing_dev_info = &q->_backing_dev_info;
+       q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
+       if (!q->backing_dev_info)
+               goto fail_split;
+
        q->backing_dev_info->ra_pages =
                        (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
        q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
        q->backing_dev_info->name = "block";
        q->node = node_id;
 
-       err = bdi_init(q->backing_dev_info);
-       if (err)
-               goto fail_split;
-
        setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
@@ -789,7 +777,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 fail_ref:
        percpu_ref_exit(&q->q_usage_counter);
 fail_bdi:
-       bdi_destroy(q->backing_dev_info);
+       bdi_put(q->backing_dev_info);
 fail_split:
        bioset_free(q->bio_split);
 fail_id:
@@ -1495,6 +1483,30 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
        return true;
 }
 
+bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
+               struct bio *bio)
+{
+       unsigned short segments = blk_rq_nr_discard_segments(req);
+
+       if (segments >= queue_max_discard_segments(q))
+               goto no_merge;
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req, blk_rq_pos(req)))
+               goto no_merge;
+
+       req->biotail->bi_next = bio;
+       req->biotail = bio;
+       req->__data_len += bio->bi_iter.bi_size;
+       req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
+       req->nr_phys_segments = segments + 1;
+
+       blk_account_io_start(req, false);
+       return true;
+no_merge:
+       req_set_nomerge(q, req);
+       return false;
+}
+
 /**
  * blk_attempt_plug_merge - try to merge with %current's plugged list
  * @q: request_queue new bio is being queued at
@@ -1523,12 +1535,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 {
        struct blk_plug *plug;
        struct request *rq;
-       bool ret = false;
        struct list_head *plug_list;
 
        plug = current->plug;
        if (!plug)
-               goto out;
+               return false;
        *request_count = 0;
 
        if (q->mq_ops)
@@ -1537,7 +1548,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
                plug_list = &plug->list;
 
        list_for_each_entry_reverse(rq, plug_list, queuelist) {
-               int el_ret;
+               bool merged = false;
 
                if (rq->q == q) {
                        (*request_count)++;
@@ -1553,19 +1564,25 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
                if (rq->q != q || !blk_rq_merge_ok(rq, bio))
                        continue;
 
-               el_ret = blk_try_merge(rq, bio);
-               if (el_ret == ELEVATOR_BACK_MERGE) {
-                       ret = bio_attempt_back_merge(q, rq, bio);
-                       if (ret)
-                               break;
-               } else if (el_ret == ELEVATOR_FRONT_MERGE) {
-                       ret = bio_attempt_front_merge(q, rq, bio);
-                       if (ret)
-                               break;
+               switch (blk_try_merge(rq, bio)) {
+               case ELEVATOR_BACK_MERGE:
+                       merged = bio_attempt_back_merge(q, rq, bio);
+                       break;
+               case ELEVATOR_FRONT_MERGE:
+                       merged = bio_attempt_front_merge(q, rq, bio);
+                       break;
+               case ELEVATOR_DISCARD_MERGE:
+                       merged = bio_attempt_discard_merge(q, rq, bio);
+                       break;
+               default:
+                       break;
                }
+
+               if (merged)
+                       return true;
        }
-out:
-       return ret;
+
+       return false;
 }
 
 unsigned int blk_plug_queued_count(struct request_queue *q)
@@ -1607,8 +1624,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
        struct blk_plug *plug;
-       int el_ret, where = ELEVATOR_INSERT_SORT;
-       struct request *req;
+       int where = ELEVATOR_INSERT_SORT;
+       struct request *req, *free;
        unsigned int request_count = 0;
        unsigned int wb_acct;
 
@@ -1645,21 +1662,29 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 
        spin_lock_irq(q->queue_lock);
 
-       el_ret = elv_merge(q, &req, bio);
-       if (el_ret == ELEVATOR_BACK_MERGE) {
-               if (bio_attempt_back_merge(q, req, bio)) {
-                       elv_bio_merged(q, req, bio);
-                       if (!attempt_back_merge(q, req))
-                               elv_merged_request(q, req, el_ret);
-                       goto out_unlock;
-               }
-       } else if (el_ret == ELEVATOR_FRONT_MERGE) {
-               if (bio_attempt_front_merge(q, req, bio)) {
-                       elv_bio_merged(q, req, bio);
-                       if (!attempt_front_merge(q, req))
-                               elv_merged_request(q, req, el_ret);
-                       goto out_unlock;
-               }
+       switch (elv_merge(q, &req, bio)) {
+       case ELEVATOR_BACK_MERGE:
+               if (!bio_attempt_back_merge(q, req, bio))
+                       break;
+               elv_bio_merged(q, req, bio);
+               free = attempt_back_merge(q, req);
+               if (free)
+                       __blk_put_request(q, free);
+               else
+                       elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
+               goto out_unlock;
+       case ELEVATOR_FRONT_MERGE:
+               if (!bio_attempt_front_merge(q, req, bio))
+                       break;
+               elv_bio_merged(q, req, bio);
+               free = attempt_front_merge(q, req);
+               if (free)
+                       __blk_put_request(q, free);
+               else
+                       elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
+               goto out_unlock;
+       default:
+               break;
        }
 
 get_rq:
@@ -3458,5 +3483,9 @@ int __init blk_dev_init(void)
        blk_requestq_cachep = kmem_cache_create("request_queue",
                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
 
+#ifdef CONFIG_DEBUG_FS
+       blk_debugfs_root = debugfs_create_dir("block", NULL);
+#endif
+
        return 0;
 }