]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
io_uring: async workers should inherit the user creds
authorJens Axboe <axboe@kernel.dk>
Mon, 25 Nov 2019 15:52:30 +0000 (08:52 -0700)
committerSeth Forshee <seth.forshee@canonical.com>
Thu, 5 Dec 2019 22:30:05 +0000 (16:30 -0600)
BugLink: https://bugs.launchpad.net/bugs/1855326
[ Upstream commit 181e448d8709e517c9c7b523fcd209f24eb38ca7 ]

If we don't inherit the original task creds, then we can confuse users
like fuse that pass creds in the request header. See link below on
identical aio issue.

Link: https://lore.kernel.org/linux-fsdevel/26f0d78e-99ca-2f1b-78b9-433088053a61@scylladb.com/T/#u
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
fs/io_uring.c

index 2c819c3c855d2c3600baf31aa5bea6fdef0ac385..cbe8dabb6479c456ea9f92f64f7a339da183d059 100644 (file)
@@ -238,6 +238,8 @@ struct io_ring_ctx {
 
        struct user_struct      *user;
 
+       struct cred             *creds;
+
        struct completion       ctx_done;
 
        struct {
@@ -1752,8 +1754,11 @@ static void io_poll_complete_work(struct work_struct *work)
        struct io_poll_iocb *poll = &req->poll;
        struct poll_table_struct pt = { ._key = poll->events };
        struct io_ring_ctx *ctx = req->ctx;
+       const struct cred *old_cred;
        __poll_t mask = 0;
 
+       old_cred = override_creds(ctx->creds);
+
        if (!READ_ONCE(poll->canceled))
                mask = vfs_poll(poll->file, &pt) & poll->events;
 
@@ -1768,7 +1773,7 @@ static void io_poll_complete_work(struct work_struct *work)
        if (!mask && !READ_ONCE(poll->canceled)) {
                add_wait_queue(poll->head, &poll->wait);
                spin_unlock_irq(&ctx->completion_lock);
-               return;
+               goto out;
        }
        list_del_init(&req->list);
        io_poll_complete(ctx, req, mask);
@@ -1776,6 +1781,8 @@ static void io_poll_complete_work(struct work_struct *work)
 
        io_cqring_ev_posted(ctx);
        io_put_req(req);
+out:
+       revert_creds(old_cred);
 }
 
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -2147,10 +2154,12 @@ static void io_sq_wq_submit_work(struct work_struct *work)
        struct io_ring_ctx *ctx = req->ctx;
        struct mm_struct *cur_mm = NULL;
        struct async_list *async_list;
+       const struct cred *old_cred;
        LIST_HEAD(req_list);
        mm_segment_t old_fs;
        int ret;
 
+       old_cred = override_creds(ctx->creds);
        async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
 restart:
        do {
@@ -2258,6 +2267,7 @@ out:
                unuse_mm(cur_mm);
                mmput(cur_mm);
        }
+       revert_creds(old_cred);
 }
 
 /*
@@ -2663,6 +2673,7 @@ static int io_sq_thread(void *data)
 {
        struct io_ring_ctx *ctx = data;
        struct mm_struct *cur_mm = NULL;
+       const struct cred *old_cred;
        mm_segment_t old_fs;
        DEFINE_WAIT(wait);
        unsigned inflight;
@@ -2672,6 +2683,7 @@ static int io_sq_thread(void *data)
 
        old_fs = get_fs();
        set_fs(USER_DS);
+       old_cred = override_creds(ctx->creds);
 
        timeout = inflight = 0;
        while (!kthread_should_park()) {
@@ -2782,6 +2794,7 @@ static int io_sq_thread(void *data)
                unuse_mm(cur_mm);
                mmput(cur_mm);
        }
+       revert_creds(old_cred);
 
        kthread_parkme();
 
@@ -3567,6 +3580,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
                io_unaccount_mem(ctx->user,
                                ring_pages(ctx->sq_entries, ctx->cq_entries));
        free_uid(ctx->user);
+       if (ctx->creds)
+               put_cred(ctx->creds);
        kfree(ctx);
 }
 
@@ -3838,6 +3853,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        ctx->account_mem = account_mem;
        ctx->user = user;
 
+       ctx->creds = prepare_creds();
+       if (!ctx->creds) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
        ret = io_allocate_scq_urings(ctx, p);
        if (ret)
                goto err;