]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - block/blk-mq.c
nbd: make sure request completion won't concurrent
[mirror_ubuntu-jammy-kernel.git] / block / blk-mq.c
index 108a352051be5fea9c7a5c132576844c3f3ee8a7..82de39926a9f6e20364d83600edaf46edf6b0614 100644 (file)
@@ -188,9 +188,11 @@ void blk_mq_freeze_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
-void blk_mq_unfreeze_queue(struct request_queue *q)
+void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
 {
        mutex_lock(&q->mq_freeze_lock);
+       if (force_atomic)
+               q->q_usage_counter.data->force_atomic = true;
        q->mq_freeze_depth--;
        WARN_ON_ONCE(q->mq_freeze_depth < 0);
        if (!q->mq_freeze_depth) {
@@ -199,6 +201,11 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
        }
        mutex_unlock(&q->mq_freeze_lock);
 }
+
+void blk_mq_unfreeze_queue(struct request_queue *q)
+{
+       __blk_mq_unfreeze_queue(q, false);
+}
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
 /*
@@ -756,7 +763,6 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
        /* this request will be re-inserted to io scheduler queue */
        blk_mq_sched_requeue_request(rq);
 
-       BUG_ON(!list_empty(&rq->queuelist));
        blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
 }
 EXPORT_SYMBOL(blk_mq_requeue_request);
@@ -1318,6 +1324,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
        int errors, queued;
        blk_status_t ret = BLK_STS_OK;
        LIST_HEAD(zone_list);
+       bool needs_resource = false;
 
        if (list_empty(list))
                return false;
@@ -1363,6 +1370,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
                        queued++;
                        break;
                case BLK_STS_RESOURCE:
+                       needs_resource = true;
+                       fallthrough;
                case BLK_STS_DEV_RESOURCE:
                        blk_mq_handle_dev_resource(rq, list);
                        goto out;
@@ -1373,6 +1382,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
                         * accept.
                         */
                        blk_mq_handle_zone_resource(rq, &zone_list);
+                       needs_resource = true;
                        break;
                default:
                        errors++;
@@ -1399,7 +1409,6 @@ out:
                /* For non-shared tags, the RESTART check will suffice */
                bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
                        (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
-               bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
 
                if (nr_budgets)
                        blk_mq_release_budgets(q, list);
@@ -1440,14 +1449,16 @@ out:
                 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
                 * bit is set, run queue after a delay to avoid IO stalls
                 * that could otherwise occur if the queue is idle.  We'll do
-                * similar if we couldn't get budget and SCHED_RESTART is set.
+                * similar if we couldn't get budget or couldn't lock a zone
+                * and SCHED_RESTART is set.
                 */
                needs_restart = blk_mq_sched_needs_restart(hctx);
+               if (prep == PREP_DISPATCH_NO_BUDGET)
+                       needs_resource = true;
                if (!needs_restart ||
                    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
                        blk_mq_run_hw_queue(hctx, true);
-               else if (needs_restart && (ret == BLK_STS_RESOURCE ||
-                                          no_budget_avail))
+               else if (needs_restart && needs_resource)
                        blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
 
                blk_mq_update_dispatch_busy(hctx, true);
@@ -2136,14 +2147,14 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
 }
 
 /*
- * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
+ * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
  * queues. This is important for md arrays to benefit from merging
  * requests.
  */
 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
 {
        if (plug->multiple_queues)
-               return BLK_MAX_REQUEST_COUNT * 4;
+               return BLK_MAX_REQUEST_COUNT * 2;
        return BLK_MAX_REQUEST_COUNT;
 }
 
@@ -4007,6 +4018,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_rq_cpu);
 
+void blk_mq_cancel_work_sync(struct request_queue *q)
+{
+       if (queue_is_mq(q)) {
+               struct blk_mq_hw_ctx *hctx;
+               int i;
+
+               cancel_delayed_work_sync(&q->requeue_work);
+
+               queue_for_each_hw_ctx(q, hctx, i)
+                       cancel_delayed_work_sync(&hctx->run_work);
+       }
+}
+
 static int __init blk_mq_init(void)
 {
        int i;