]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - fs/fuse/dev.c
fuse: handle zero sized retrieve correctly
[mirror_ubuntu-bionic-kernel.git] / fs / fuse / dev.c
index 17f0d05bfd4c4b4dc9a877832ac3d7f88f0acbaa..cfae27c7a828b5b080eae0663c20386ecf67afc8 100644 (file)
@@ -114,8 +114,8 @@ static void __fuse_put_request(struct fuse_req *req)
 
 static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
 {
-       req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
-       req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
+       req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
+       req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
        req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
 }
 
@@ -131,6 +131,20 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
        return !fc->initialized || (for_background && fc->blocked);
 }
 
+static void fuse_drop_waiting(struct fuse_conn *fc)
+{
+       /*
+        * lockess check of fc->connected is okay, because atomic_dec_and_test()
+        * provides a memory barrier mached with the one in fuse_wait_aborted()
+        * to ensure no wake-up is missed.
+        */
+       if (atomic_dec_and_test(&fc->num_waiting) &&
+           !READ_ONCE(fc->connected)) {
+               /* wake up aborters */
+               wake_up_all(&fc->blocked_waitq);
+       }
+}
+
 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
                                       bool for_background)
 {
@@ -167,11 +181,15 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
        __set_bit(FR_WAITING, &req->flags);
        if (for_background)
                __set_bit(FR_BACKGROUND, &req->flags);
+       if (req->in.h.uid == (uid_t)-1 || req->in.h.gid == (gid_t)-1) {
+               fuse_put_request(fc, req);
+               return ERR_PTR(-EOVERFLOW);
+       }
 
        return req;
 
  out:
-       atomic_dec(&fc->num_waiting);
+       fuse_drop_waiting(fc);
        return ERR_PTR(err);
 }
 
@@ -278,7 +296,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 
                if (test_bit(FR_WAITING, &req->flags)) {
                        __clear_bit(FR_WAITING, &req->flags);
-                       atomic_dec(&fc->num_waiting);
+                       fuse_drop_waiting(fc);
                }
 
                if (req->stolen_file)
@@ -364,7 +382,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
        struct fuse_iqueue *fiq = &fc->iq;
 
        if (test_and_set_bit(FR_FINISHED, &req->flags))
-               return;
+               goto put_request;
 
        spin_lock(&fiq->waitq.lock);
        list_del_init(&req->intr_entry);
@@ -374,15 +392,21 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
        if (test_bit(FR_BACKGROUND, &req->flags)) {
                spin_lock(&fc->lock);
                clear_bit(FR_BACKGROUND, &req->flags);
-               if (fc->num_background == fc->max_background)
+               if (fc->num_background == fc->max_background) {
                        fc->blocked = 0;
-
-               /* Wake up next waiter, if any */
-               if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
                        wake_up(&fc->blocked_waitq);
+               } else if (!fc->blocked) {
+                       /*
+                        * Wake up next waiter, if any.  It's okay to use
+                        * waitqueue_active(), as we've already synced up
+                        * fc->blocked with waiters with the wake_up() call
+                        * above.
+                        */
+                       if (waitqueue_active(&fc->blocked_waitq))
+                               wake_up(&fc->blocked_waitq);
+               }
 
-               if (fc->num_background == fc->congestion_threshold &&
-                   fc->connected && fc->sb) {
+               if (fc->num_background == fc->congestion_threshold && fc->sb) {
                        clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
                        clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
                }
@@ -394,6 +418,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
        wake_up(&req->waitq);
        if (req->end)
                req->end(fc, req);
+put_request:
        fuse_put_request(fc, req);
 }
 
@@ -1222,6 +1247,9 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
        struct fuse_in *in;
        unsigned reqsize;
 
+       if (current_user_ns() != fc->user_ns)
+               return -EIO;
+
  restart:
        spin_lock(&fiq->waitq.lock);
        err = -EAGAIN;
@@ -1299,12 +1327,14 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
                goto out_end;
        }
        list_move_tail(&req->list, &fpq->processing);
-       spin_unlock(&fpq->lock);
+       __fuse_get_request(req);
        set_bit(FR_SENT, &req->flags);
+       spin_unlock(&fpq->lock);
        /* matches barrier in request_wait_answer() */
        smp_mb__after_atomic();
        if (test_bit(FR_INTERRUPTED, &req->flags))
                queue_interrupt(fiq, req);
+       fuse_put_request(fc, req);
 
        return reqsize;
 
@@ -1672,7 +1702,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        req->in.h.nodeid = outarg->nodeid;
        req->in.numargs = 2;
        req->in.argpages = 1;
-       req->page_descs[0].offset = offset;
        req->end = fuse_retrieve_end;
 
        index = outarg->offset >> PAGE_SHIFT;
@@ -1687,6 +1716,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
 
                this_num = min_t(unsigned, num, PAGE_SIZE - offset);
                req->pages[req->num_pages] = page;
+               req->page_descs[req->num_pages].offset = offset;
                req->page_descs[req->num_pages].length = this_num;
                req->num_pages++;
 
@@ -1702,8 +1732,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        req->in.args[1].size = total_len;
 
        err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
-       if (err)
+       if (err) {
                fuse_retrieve_end(fc, req);
+               fuse_put_request(fc, req);
+       }
 
        return err;
 }
@@ -1827,6 +1859,9 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
        struct fuse_req *req;
        struct fuse_out_header oh;
 
+       if (current_user_ns() != fc->user_ns)
+               return -EIO;
+
        if (nbytes < sizeof(struct fuse_out_header))
                return -EINVAL;
 
@@ -1862,16 +1897,20 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
 
        /* Is it an interrupt reply? */
        if (req->intr_unique == oh.unique) {
+               __fuse_get_request(req);
                spin_unlock(&fpq->lock);
 
                err = -EINVAL;
-               if (nbytes != sizeof(struct fuse_out_header))
+               if (nbytes != sizeof(struct fuse_out_header)) {
+                       fuse_put_request(fc, req);
                        goto err_finish;
+               }
 
                if (oh.error == -ENOSYS)
                        fc->no_interrupt = 1;
                else if (oh.error == -EAGAIN)
                        queue_interrupt(&fc->iq, req);
+               fuse_put_request(fc, req);
 
                fuse_copy_finish(cs);
                return nbytes;
@@ -1942,11 +1981,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        if (!fud)
                return -EPERM;
 
+       pipe_lock(pipe);
+
        bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
-       if (!bufs)
+       if (!bufs) {
+               pipe_unlock(pipe);
                return -ENOMEM;
+       }
 
-       pipe_lock(pipe);
        nbuf = 0;
        rem = 0;
        for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
@@ -2101,6 +2143,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
                                set_bit(FR_ABORTED, &req->flags);
                                if (!test_bit(FR_LOCKED, &req->flags)) {
                                        set_bit(FR_PRIVATE, &req->flags);
+                                       __fuse_get_request(req);
                                        list_move(&req->list, &to_end1);
                                }
                                spin_unlock(&req->waitq.lock);
@@ -2127,7 +2170,6 @@ void fuse_abort_conn(struct fuse_conn *fc)
 
                while (!list_empty(&to_end1)) {
                        req = list_first_entry(&to_end1, struct fuse_req, list);
-                       __fuse_get_request(req);
                        list_del_init(&req->list);
                        request_end(fc, req);
                }
@@ -2138,6 +2180,13 @@ void fuse_abort_conn(struct fuse_conn *fc)
 }
 EXPORT_SYMBOL_GPL(fuse_abort_conn);
 
+void fuse_wait_aborted(struct fuse_conn *fc)
+{
+       /* matches implicit memory barrier in fuse_drop_waiting() */
+       smp_mb();
+       wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+}
+
 int fuse_dev_release(struct inode *inode, struct file *file)
 {
        struct fuse_dev *fud = fuse_get_dev(file);
@@ -2145,9 +2194,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
        if (fud) {
                struct fuse_conn *fc = fud->fc;
                struct fuse_pqueue *fpq = &fud->pq;
+               LIST_HEAD(to_end);
 
+               spin_lock(&fpq->lock);
                WARN_ON(!list_empty(&fpq->io));
-               end_requests(fc, &fpq->processing);
+               list_splice_init(&fpq->processing, &to_end);
+               spin_unlock(&fpq->lock);
+
+               end_requests(fc, &to_end);
+
                /* Are we the last open device? */
                if (atomic_dec_and_test(&fc->dev_count)) {
                        WARN_ON(fc->iq.fasync != NULL);