]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/blk-mq-tag.c
compat_ioctl: block: handle BLKREPORTZONE/BLKRESETZONE
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq-tag.c
index 6714507aa6c75b716d34a53c708952bf5d0ae619..c0d0493c6f0ceede572c7b9476f8503ec36d4dcd 100644 (file)
@@ -23,6 +23,9 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
 
 /*
  * If a previously inactive queue goes active, bump the active user count.
+ * We need to do this before try to allocate driver tag, then even if fail
+ * to get tag when first time, the other shared-tag users could reserve
+ * budget for it.
  */
 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
@@ -298,12 +301,12 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 }
 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
 
-int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
-                        int (reinit_request)(void *, struct request *))
+int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
+                        int (fn)(void *, struct request *))
 {
        int i, j, ret = 0;
 
-       if (WARN_ON_ONCE(!reinit_request))
+       if (WARN_ON_ONCE(!fn))
                goto out;
 
        for (i = 0; i < set->nr_hw_queues; i++) {
@@ -316,8 +319,7 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
                        if (!tags->static_rqs[j])
                                continue;
 
-                       ret = reinit_request(set->driver_data,
-                                            tags->static_rqs[j]);
+                       ret = fn(data, tags->static_rqs[j]);
                        if (ret)
                                goto out;
                }
@@ -326,7 +328,7 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
 out:
        return ret;
 }
-EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
+EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
 
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv)
@@ -416,8 +418,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
        if (tdepth <= tags->nr_reserved_tags)
                return -EINVAL;
 
-       tdepth -= tags->nr_reserved_tags;
-
        /*
         * If we are allowed to grow beyond the original size, allocate
         * a new set of tags before freeing the old one.
@@ -437,7 +437,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
                if (tdepth > 16 * BLKDEV_MAX_RQ)
                        return -EINVAL;
 
-               new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
+               new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
+                               tags->nr_reserved_tags);
                if (!new)
                        return -ENOMEM;
                ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
@@ -454,7 +455,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
                 * Don't need (or can't) update reserved tags here, they
                 * remain static and should never need resizing.
                 */
-               sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
+               sbitmap_queue_resize(&tags->bitmap_tags,
+                               tdepth - tags->nr_reserved_tags);
        }
 
        return 0;