]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
blk-mq: fix corruption with direct issue
authorJens Axboe <axboe@kernel.dk>
Wed, 27 Nov 2019 20:18:18 +0000 (17:18 -0300)
committerMarcelo Henrique Cerri <marcelo.cerri@canonical.com>
Fri, 17 Jan 2020 17:23:13 +0000 (14:23 -0300)
BugLink: https://bugs.launchpad.net/bugs/1848739
If we attempt a direct issue to a SCSI device, and it returns BUSY, then
we queue the request up normally. However, the SCSI layer may have
already setup SG tables etc for this particular command. If we later
merge with this request, then the old tables are no longer valid. Once
we issue the IO, we only read/write the original part of the request,
not the new state of it.

This causes data corruption, and is most often noticed with the file
system complaining about the just read data being invalid:

[  235.934465] EXT4-fs error (device sda1): ext4_iget:4831: inode #7142: comm dpkg-query: bad extra_isize 24937 (inode size 256)

because most of it is garbage...

This doesn't happen from the normal issue path, as we will simply defer
the request to the hardware queue dispatch list if we fail. Once it's on
the dispatch list, we never merge with it.

Fix this from the direct issue path by flagging the request as
REQ_NOMERGE so we don't change the size of it before issue.

See also:
  https://bugzilla.kernel.org/show_bug.cgi?id=201685

Tested-by: Guenter Roeck <linux@roeck-us.net>
Fixes: 6ce3dd6eec1 ("blk-mq: issue directly if hw queue isn't busy in case of 'none'")
Cc: stable@vger.kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
(cherry picked from commit ffe81d45322cc3cb140f0db080a4727ea284661e)
Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
block/blk-mq.c

index 6a0ad28fa17638a63b90bfa0143d09b140201207..d419f793afed9e2b4f1cf8aacba1d13ab43ddf12 100644 (file)
@@ -1778,6 +1778,15 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
                break;
        case BLK_STS_RESOURCE:
        case BLK_STS_DEV_RESOURCE:
+               /*
+                * If direct dispatch fails, we cannot allow any merging on
+                * this IO. Drivers (like SCSI) may have set up permanent state
+                * for this request, like SG tables and mappings, and if we
+                * merge to it later on then we'll still only do IO to the
+                * original part.
+                */
+               rq->cmd_flags |= REQ_NOMERGE;
+
                blk_mq_update_dispatch_busy(hctx, true);
                __blk_mq_requeue_request(rq);
                break;
@@ -1790,6 +1799,18 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
+/*
+ * Don't allow direct dispatch of anything but regular reads/writes,
+ * as some of the other commands can potentially share request space
+ * with data we need for the IO scheduler. If we attempt a direct dispatch
+ * on those and fail, we can't safely add it to the scheduler afterwards
+ * without potentially overwriting data that the driver has already written.
+ */
+static bool blk_rq_can_direct_dispatch(struct request *rq)
+{
+       return req_op(rq) == REQ_OP_READ || req_op(rq) == REQ_OP_WRITE;
+}
+
 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                                                struct request *rq,
                                                blk_qc_t *cookie,
@@ -1811,7 +1832,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                goto insert;
        }
 
-       if (q->elevator && !bypass_insert)
+       if (!blk_rq_can_direct_dispatch(rq) || (q->elevator && !bypass_insert))
                goto insert;
 
        if (!blk_mq_get_dispatch_budget(hctx))
@@ -1873,6 +1894,9 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct request *rq = list_first_entry(list, struct request,
                                queuelist);
 
+               if (!blk_rq_can_direct_dispatch(rq))
+                       break;
+
                list_del_init(&rq->queuelist);
                ret = blk_mq_request_issue_directly(rq);
                if (ret != BLK_STS_OK) {