]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ceph/file.c
ceph: quota: support for ceph.quota.max_files
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/file.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/falloc.h>
12
13 #include "super.h"
14 #include "mds_client.h"
15 #include "cache.h"
16
17 static __le32 ceph_flags_sys2wire(u32 flags)
18 {
19 u32 wire_flags = 0;
20
21 switch (flags & O_ACCMODE) {
22 case O_RDONLY:
23 wire_flags |= CEPH_O_RDONLY;
24 break;
25 case O_WRONLY:
26 wire_flags |= CEPH_O_WRONLY;
27 break;
28 case O_RDWR:
29 wire_flags |= CEPH_O_RDWR;
30 break;
31 }
32
33 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
34
35 ceph_sys2wire(O_CREAT);
36 ceph_sys2wire(O_EXCL);
37 ceph_sys2wire(O_TRUNC);
38 ceph_sys2wire(O_DIRECTORY);
39 ceph_sys2wire(O_NOFOLLOW);
40
41 #undef ceph_sys2wire
42
43 if (flags)
44 dout("unused open flags: %x", flags);
45
46 return cpu_to_le32(wire_flags);
47 }
48
49 /*
50 * Ceph file operations
51 *
52 * Implement basic open/close functionality, and implement
53 * read/write.
54 *
55 * We implement three modes of file I/O:
56 * - buffered uses the generic_file_aio_{read,write} helpers
57 *
58 * - synchronous is used when there is multi-client read/write
59 * sharing, avoids the page cache, and synchronously waits for an
60 * ack from the OSD.
61 *
62 * - direct io takes the variant of the sync path that references
63 * user pages directly.
64 *
65 * fsync() flushes and waits on dirty pages, but just queues metadata
66 * for writeback: since the MDS can recover size and mtime there is no
67 * need to wait for MDS acknowledgement.
68 */
69
70 /*
71 * Calculate the length sum of direct io vectors that can
72 * be combined into one page vector.
73 */
74 static size_t dio_get_pagev_size(const struct iov_iter *it)
75 {
76 const struct iovec *iov = it->iov;
77 const struct iovec *iovend = iov + it->nr_segs;
78 size_t size;
79
80 size = iov->iov_len - it->iov_offset;
81 /*
82 * An iov can be page vectored when both the current tail
83 * and the next base are page aligned.
84 */
85 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
86 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
87 size += iov->iov_len;
88 }
89 dout("dio_get_pagevlen len = %zu\n", size);
90 return size;
91 }
92
93 /*
94 * Allocate a page vector based on (@it, @nbytes).
95 * The return value is the tuple describing a page vector,
96 * that is (@pages, @page_align, @num_pages).
97 */
98 static struct page **
99 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
100 size_t *page_align, int *num_pages)
101 {
102 struct iov_iter tmp_it = *it;
103 size_t align;
104 struct page **pages;
105 int ret = 0, idx, npages;
106
107 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
108 (PAGE_SIZE - 1);
109 npages = calc_pages_for(align, nbytes);
110 pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL);
111 if (!pages)
112 return ERR_PTR(-ENOMEM);
113
114 for (idx = 0; idx < npages; ) {
115 size_t start;
116 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
117 npages - idx, &start);
118 if (ret < 0)
119 goto fail;
120
121 iov_iter_advance(&tmp_it, ret);
122 nbytes -= ret;
123 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
124 }
125
126 BUG_ON(nbytes != 0);
127 *num_pages = npages;
128 *page_align = align;
129 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
130 return pages;
131 fail:
132 ceph_put_page_vector(pages, idx, false);
133 return ERR_PTR(ret);
134 }
135
136 /*
137 * Prepare an open request. Preallocate ceph_cap to avoid an
138 * inopportune ENOMEM later.
139 */
140 static struct ceph_mds_request *
141 prepare_open_request(struct super_block *sb, int flags, int create_mode)
142 {
143 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
144 struct ceph_mds_client *mdsc = fsc->mdsc;
145 struct ceph_mds_request *req;
146 int want_auth = USE_ANY_MDS;
147 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
148
149 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
150 want_auth = USE_AUTH_MDS;
151
152 req = ceph_mdsc_create_request(mdsc, op, want_auth);
153 if (IS_ERR(req))
154 goto out;
155 req->r_fmode = ceph_flags_to_mode(flags);
156 req->r_args.open.flags = ceph_flags_sys2wire(flags);
157 req->r_args.open.mode = cpu_to_le32(create_mode);
158 out:
159 return req;
160 }
161
162 /*
163 * initialize private struct file data.
164 * if we fail, clean up by dropping fmode reference on the ceph_inode
165 */
166 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
167 {
168 struct ceph_file_info *cf;
169 int ret = 0;
170
171 switch (inode->i_mode & S_IFMT) {
172 case S_IFREG:
173 ceph_fscache_register_inode_cookie(inode);
174 ceph_fscache_file_set_cookie(inode, file);
175 case S_IFDIR:
176 dout("init_file %p %p 0%o (regular)\n", inode, file,
177 inode->i_mode);
178 cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
179 if (!cf) {
180 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
181 return -ENOMEM;
182 }
183 cf->fmode = fmode;
184
185 spin_lock_init(&cf->rw_contexts_lock);
186 INIT_LIST_HEAD(&cf->rw_contexts);
187
188 cf->next_offset = 2;
189 cf->readdir_cache_idx = -1;
190 file->private_data = cf;
191 BUG_ON(inode->i_fop->release != ceph_release);
192 break;
193
194 case S_IFLNK:
195 dout("init_file %p %p 0%o (symlink)\n", inode, file,
196 inode->i_mode);
197 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
198 break;
199
200 default:
201 dout("init_file %p %p 0%o (special)\n", inode, file,
202 inode->i_mode);
203 /*
204 * we need to drop the open ref now, since we don't
205 * have .release set to ceph_release.
206 */
207 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
208 BUG_ON(inode->i_fop->release == ceph_release);
209
210 /* call the proper open fop */
211 ret = inode->i_fop->open(inode, file);
212 }
213 return ret;
214 }
215
216 /*
217 * try renew caps after session gets killed.
218 */
219 int ceph_renew_caps(struct inode *inode)
220 {
221 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
222 struct ceph_inode_info *ci = ceph_inode(inode);
223 struct ceph_mds_request *req;
224 int err, flags, wanted;
225
226 spin_lock(&ci->i_ceph_lock);
227 wanted = __ceph_caps_file_wanted(ci);
228 if (__ceph_is_any_real_caps(ci) &&
229 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
230 int issued = __ceph_caps_issued(ci, NULL);
231 spin_unlock(&ci->i_ceph_lock);
232 dout("renew caps %p want %s issued %s updating mds_wanted\n",
233 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
234 ceph_check_caps(ci, 0, NULL);
235 return 0;
236 }
237 spin_unlock(&ci->i_ceph_lock);
238
239 flags = 0;
240 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
241 flags = O_RDWR;
242 else if (wanted & CEPH_CAP_FILE_RD)
243 flags = O_RDONLY;
244 else if (wanted & CEPH_CAP_FILE_WR)
245 flags = O_WRONLY;
246 #ifdef O_LAZY
247 if (wanted & CEPH_CAP_FILE_LAZYIO)
248 flags |= O_LAZY;
249 #endif
250
251 req = prepare_open_request(inode->i_sb, flags, 0);
252 if (IS_ERR(req)) {
253 err = PTR_ERR(req);
254 goto out;
255 }
256
257 req->r_inode = inode;
258 ihold(inode);
259 req->r_num_caps = 1;
260 req->r_fmode = -1;
261
262 err = ceph_mdsc_do_request(mdsc, NULL, req);
263 ceph_mdsc_put_request(req);
264 out:
265 dout("renew caps %p open result=%d\n", inode, err);
266 return err < 0 ? err : 0;
267 }
268
269 /*
270 * If we already have the requisite capabilities, we can satisfy
271 * the open request locally (no need to request new caps from the
272 * MDS). We do, however, need to inform the MDS (asynchronously)
273 * if our wanted caps set expands.
274 */
275 int ceph_open(struct inode *inode, struct file *file)
276 {
277 struct ceph_inode_info *ci = ceph_inode(inode);
278 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
279 struct ceph_mds_client *mdsc = fsc->mdsc;
280 struct ceph_mds_request *req;
281 struct ceph_file_info *cf = file->private_data;
282 int err;
283 int flags, fmode, wanted;
284
285 if (cf) {
286 dout("open file %p is already opened\n", file);
287 return 0;
288 }
289
290 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
291 flags = file->f_flags & ~(O_CREAT|O_EXCL);
292 if (S_ISDIR(inode->i_mode))
293 flags = O_DIRECTORY; /* mds likes to know */
294
295 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
296 ceph_vinop(inode), file, flags, file->f_flags);
297 fmode = ceph_flags_to_mode(flags);
298 wanted = ceph_caps_for_mode(fmode);
299
300 /* snapped files are read-only */
301 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
302 return -EROFS;
303
304 /* trivially open snapdir */
305 if (ceph_snap(inode) == CEPH_SNAPDIR) {
306 spin_lock(&ci->i_ceph_lock);
307 __ceph_get_fmode(ci, fmode);
308 spin_unlock(&ci->i_ceph_lock);
309 return ceph_init_file(inode, file, fmode);
310 }
311
312 /*
313 * No need to block if we have caps on the auth MDS (for
314 * write) or any MDS (for read). Update wanted set
315 * asynchronously.
316 */
317 spin_lock(&ci->i_ceph_lock);
318 if (__ceph_is_any_real_caps(ci) &&
319 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
320 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
321 int issued = __ceph_caps_issued(ci, NULL);
322
323 dout("open %p fmode %d want %s issued %s using existing\n",
324 inode, fmode, ceph_cap_string(wanted),
325 ceph_cap_string(issued));
326 __ceph_get_fmode(ci, fmode);
327 spin_unlock(&ci->i_ceph_lock);
328
329 /* adjust wanted? */
330 if ((issued & wanted) != wanted &&
331 (mds_wanted & wanted) != wanted &&
332 ceph_snap(inode) != CEPH_SNAPDIR)
333 ceph_check_caps(ci, 0, NULL);
334
335 return ceph_init_file(inode, file, fmode);
336 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
337 (ci->i_snap_caps & wanted) == wanted) {
338 __ceph_get_fmode(ci, fmode);
339 spin_unlock(&ci->i_ceph_lock);
340 return ceph_init_file(inode, file, fmode);
341 }
342
343 spin_unlock(&ci->i_ceph_lock);
344
345 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
346 req = prepare_open_request(inode->i_sb, flags, 0);
347 if (IS_ERR(req)) {
348 err = PTR_ERR(req);
349 goto out;
350 }
351 req->r_inode = inode;
352 ihold(inode);
353
354 req->r_num_caps = 1;
355 err = ceph_mdsc_do_request(mdsc, NULL, req);
356 if (!err)
357 err = ceph_init_file(inode, file, req->r_fmode);
358 ceph_mdsc_put_request(req);
359 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
360 out:
361 return err;
362 }
363
364
365 /*
366 * Do a lookup + open with a single request. If we get a non-existent
367 * file or symlink, return 1 so the VFS can retry.
368 */
369 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
370 struct file *file, unsigned flags, umode_t mode,
371 int *opened)
372 {
373 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
374 struct ceph_mds_client *mdsc = fsc->mdsc;
375 struct ceph_mds_request *req;
376 struct dentry *dn;
377 struct ceph_acls_info acls = {};
378 int mask;
379 int err;
380
381 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
382 dir, dentry, dentry,
383 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
384
385 if (dentry->d_name.len > NAME_MAX)
386 return -ENAMETOOLONG;
387
388 if (flags & O_CREAT) {
389 if (ceph_quota_is_max_files_exceeded(dir))
390 return -EDQUOT;
391 err = ceph_pre_init_acls(dir, &mode, &acls);
392 if (err < 0)
393 return err;
394 }
395
396 /* do the open */
397 req = prepare_open_request(dir->i_sb, flags, mode);
398 if (IS_ERR(req)) {
399 err = PTR_ERR(req);
400 goto out_acl;
401 }
402 req->r_dentry = dget(dentry);
403 req->r_num_caps = 2;
404 if (flags & O_CREAT) {
405 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
406 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
407 if (acls.pagelist) {
408 req->r_pagelist = acls.pagelist;
409 acls.pagelist = NULL;
410 }
411 }
412
413 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
414 if (ceph_security_xattr_wanted(dir))
415 mask |= CEPH_CAP_XATTR_SHARED;
416 req->r_args.open.mask = cpu_to_le32(mask);
417
418 req->r_parent = dir;
419 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
420 err = ceph_mdsc_do_request(mdsc,
421 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
422 req);
423 err = ceph_handle_snapdir(req, dentry, err);
424 if (err)
425 goto out_req;
426
427 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
428 err = ceph_handle_notrace_create(dir, dentry);
429
430 if (d_in_lookup(dentry)) {
431 dn = ceph_finish_lookup(req, dentry, err);
432 if (IS_ERR(dn))
433 err = PTR_ERR(dn);
434 } else {
435 /* we were given a hashed negative dentry */
436 dn = NULL;
437 }
438 if (err)
439 goto out_req;
440 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
441 /* make vfs retry on splice, ENOENT, or symlink */
442 dout("atomic_open finish_no_open on dn %p\n", dn);
443 err = finish_no_open(file, dn);
444 } else {
445 dout("atomic_open finish_open on dn %p\n", dn);
446 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
447 ceph_init_inode_acls(d_inode(dentry), &acls);
448 *opened |= FILE_CREATED;
449 }
450 err = finish_open(file, dentry, ceph_open, opened);
451 }
452 out_req:
453 if (!req->r_err && req->r_target_inode)
454 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
455 ceph_mdsc_put_request(req);
456 out_acl:
457 ceph_release_acls_info(&acls);
458 dout("atomic_open result=%d\n", err);
459 return err;
460 }
461
462 int ceph_release(struct inode *inode, struct file *file)
463 {
464 struct ceph_inode_info *ci = ceph_inode(inode);
465 struct ceph_file_info *cf = file->private_data;
466
467 dout("release inode %p file %p\n", inode, file);
468 ceph_put_fmode(ci, cf->fmode);
469 if (cf->last_readdir)
470 ceph_mdsc_put_request(cf->last_readdir);
471 kfree(cf->last_name);
472 kfree(cf->dir_info);
473 WARN_ON(!list_empty(&cf->rw_contexts));
474 kmem_cache_free(ceph_file_cachep, cf);
475
476 /* wake up anyone waiting for caps on this inode */
477 wake_up_all(&ci->i_cap_wq);
478 return 0;
479 }
480
481 enum {
482 HAVE_RETRIED = 1,
483 CHECK_EOF = 2,
484 READ_INLINE = 3,
485 };
486
487 /*
488 * Read a range of bytes striped over one or more objects. Iterate over
489 * objects we stripe over. (That's not atomic, but good enough for now.)
490 *
491 * If we get a short result from the OSD, check against i_size; we need to
492 * only return a short read to the caller if we hit EOF.
493 */
494 static int striped_read(struct inode *inode,
495 u64 pos, u64 len,
496 struct page **pages, int num_pages,
497 int page_align, int *checkeof)
498 {
499 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
500 struct ceph_inode_info *ci = ceph_inode(inode);
501 u64 this_len;
502 loff_t i_size;
503 int page_idx;
504 int ret, read = 0;
505 bool hit_stripe, was_short;
506
507 /*
508 * we may need to do multiple reads. not atomic, unfortunately.
509 */
510 more:
511 this_len = len;
512 page_idx = (page_align + read) >> PAGE_SHIFT;
513 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
514 &ci->i_layout, pos, &this_len,
515 ci->i_truncate_seq, ci->i_truncate_size,
516 pages + page_idx, num_pages - page_idx,
517 ((page_align + read) & ~PAGE_MASK));
518 if (ret == -ENOENT)
519 ret = 0;
520 hit_stripe = this_len < len;
521 was_short = ret >= 0 && ret < this_len;
522 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
523 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
524
525 i_size = i_size_read(inode);
526 if (ret >= 0) {
527 if (was_short && (pos + ret < i_size)) {
528 int zlen = min(this_len - ret, i_size - pos - ret);
529 int zoff = page_align + read + ret;
530 dout(" zero gap %llu to %llu\n",
531 pos + ret, pos + ret + zlen);
532 ceph_zero_page_vector_range(zoff, zlen, pages);
533 ret += zlen;
534 }
535
536 read += ret;
537 pos += ret;
538 len -= ret;
539
540 /* hit stripe and need continue*/
541 if (len && hit_stripe && pos < i_size)
542 goto more;
543 }
544
545 if (read > 0) {
546 ret = read;
547 /* did we bounce off eof? */
548 if (pos + len > i_size)
549 *checkeof = CHECK_EOF;
550 }
551
552 dout("striped_read returns %d\n", ret);
553 return ret;
554 }
555
556 /*
557 * Completely synchronous read and write methods. Direct from __user
558 * buffer to osd, or directly to user pages (if O_DIRECT).
559 *
560 * If the read spans object boundary, just do multiple reads.
561 */
562 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
563 int *checkeof)
564 {
565 struct file *file = iocb->ki_filp;
566 struct inode *inode = file_inode(file);
567 struct page **pages;
568 u64 off = iocb->ki_pos;
569 int num_pages;
570 ssize_t ret;
571 size_t len = iov_iter_count(to);
572
573 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
574 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
575
576 if (!len)
577 return 0;
578 /*
579 * flush any page cache pages in this range. this
580 * will make concurrent normal and sync io slow,
581 * but it will at least behave sensibly when they are
582 * in sequence.
583 */
584 ret = filemap_write_and_wait_range(inode->i_mapping, off,
585 off + len);
586 if (ret < 0)
587 return ret;
588
589 if (unlikely(to->type & ITER_PIPE)) {
590 size_t page_off;
591 ret = iov_iter_get_pages_alloc(to, &pages, len,
592 &page_off);
593 if (ret <= 0)
594 return -ENOMEM;
595 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
596
597 ret = striped_read(inode, off, ret, pages, num_pages,
598 page_off, checkeof);
599 if (ret > 0) {
600 iov_iter_advance(to, ret);
601 off += ret;
602 } else {
603 iov_iter_advance(to, 0);
604 }
605 ceph_put_page_vector(pages, num_pages, false);
606 } else {
607 num_pages = calc_pages_for(off, len);
608 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
609 if (IS_ERR(pages))
610 return PTR_ERR(pages);
611
612 ret = striped_read(inode, off, len, pages, num_pages,
613 (off & ~PAGE_MASK), checkeof);
614 if (ret > 0) {
615 int l, k = 0;
616 size_t left = ret;
617
618 while (left) {
619 size_t page_off = off & ~PAGE_MASK;
620 size_t copy = min_t(size_t, left,
621 PAGE_SIZE - page_off);
622 l = copy_page_to_iter(pages[k++], page_off,
623 copy, to);
624 off += l;
625 left -= l;
626 if (l < copy)
627 break;
628 }
629 }
630 ceph_release_page_vector(pages, num_pages);
631 }
632
633 if (off > iocb->ki_pos) {
634 ret = off - iocb->ki_pos;
635 iocb->ki_pos = off;
636 }
637
638 dout("sync_read result %zd\n", ret);
639 return ret;
640 }
641
642 struct ceph_aio_request {
643 struct kiocb *iocb;
644 size_t total_len;
645 bool write;
646 bool should_dirty;
647 int error;
648 struct list_head osd_reqs;
649 unsigned num_reqs;
650 atomic_t pending_reqs;
651 struct timespec mtime;
652 struct ceph_cap_flush *prealloc_cf;
653 };
654
655 struct ceph_aio_work {
656 struct work_struct work;
657 struct ceph_osd_request *req;
658 };
659
660 static void ceph_aio_retry_work(struct work_struct *work);
661
662 static void ceph_aio_complete(struct inode *inode,
663 struct ceph_aio_request *aio_req)
664 {
665 struct ceph_inode_info *ci = ceph_inode(inode);
666 int ret;
667
668 if (!atomic_dec_and_test(&aio_req->pending_reqs))
669 return;
670
671 ret = aio_req->error;
672 if (!ret)
673 ret = aio_req->total_len;
674
675 dout("ceph_aio_complete %p rc %d\n", inode, ret);
676
677 if (ret >= 0 && aio_req->write) {
678 int dirty;
679
680 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
681 if (endoff > i_size_read(inode)) {
682 if (ceph_inode_set_size(inode, endoff))
683 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
684 }
685
686 spin_lock(&ci->i_ceph_lock);
687 ci->i_inline_version = CEPH_INLINE_NONE;
688 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
689 &aio_req->prealloc_cf);
690 spin_unlock(&ci->i_ceph_lock);
691 if (dirty)
692 __mark_inode_dirty(inode, dirty);
693
694 }
695
696 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
697 CEPH_CAP_FILE_RD));
698
699 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
700
701 ceph_free_cap_flush(aio_req->prealloc_cf);
702 kfree(aio_req);
703 }
704
705 static void ceph_aio_complete_req(struct ceph_osd_request *req)
706 {
707 int rc = req->r_result;
708 struct inode *inode = req->r_inode;
709 struct ceph_aio_request *aio_req = req->r_priv;
710 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
711 int num_pages = calc_pages_for((u64)osd_data->alignment,
712 osd_data->length);
713
714 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
715 inode, rc, osd_data->length);
716
717 if (rc == -EOLDSNAPC) {
718 struct ceph_aio_work *aio_work;
719 BUG_ON(!aio_req->write);
720
721 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
722 if (aio_work) {
723 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
724 aio_work->req = req;
725 queue_work(ceph_inode_to_client(inode)->inode_wq,
726 &aio_work->work);
727 return;
728 }
729 rc = -ENOMEM;
730 } else if (!aio_req->write) {
731 if (rc == -ENOENT)
732 rc = 0;
733 if (rc >= 0 && osd_data->length > rc) {
734 int zoff = osd_data->alignment + rc;
735 int zlen = osd_data->length - rc;
736 /*
737 * If read is satisfied by single OSD request,
738 * it can pass EOF. Otherwise read is within
739 * i_size.
740 */
741 if (aio_req->num_reqs == 1) {
742 loff_t i_size = i_size_read(inode);
743 loff_t endoff = aio_req->iocb->ki_pos + rc;
744 if (endoff < i_size)
745 zlen = min_t(size_t, zlen,
746 i_size - endoff);
747 aio_req->total_len = rc + zlen;
748 }
749
750 if (zlen > 0)
751 ceph_zero_page_vector_range(zoff, zlen,
752 osd_data->pages);
753 }
754 }
755
756 ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
757 ceph_osdc_put_request(req);
758
759 if (rc < 0)
760 cmpxchg(&aio_req->error, 0, rc);
761
762 ceph_aio_complete(inode, aio_req);
763 return;
764 }
765
766 static void ceph_aio_retry_work(struct work_struct *work)
767 {
768 struct ceph_aio_work *aio_work =
769 container_of(work, struct ceph_aio_work, work);
770 struct ceph_osd_request *orig_req = aio_work->req;
771 struct ceph_aio_request *aio_req = orig_req->r_priv;
772 struct inode *inode = orig_req->r_inode;
773 struct ceph_inode_info *ci = ceph_inode(inode);
774 struct ceph_snap_context *snapc;
775 struct ceph_osd_request *req;
776 int ret;
777
778 spin_lock(&ci->i_ceph_lock);
779 if (__ceph_have_pending_cap_snap(ci)) {
780 struct ceph_cap_snap *capsnap =
781 list_last_entry(&ci->i_cap_snaps,
782 struct ceph_cap_snap,
783 ci_item);
784 snapc = ceph_get_snap_context(capsnap->context);
785 } else {
786 BUG_ON(!ci->i_head_snapc);
787 snapc = ceph_get_snap_context(ci->i_head_snapc);
788 }
789 spin_unlock(&ci->i_ceph_lock);
790
791 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
792 false, GFP_NOFS);
793 if (!req) {
794 ret = -ENOMEM;
795 req = orig_req;
796 goto out;
797 }
798
799 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
800 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
801 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
802
803 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
804 if (ret) {
805 ceph_osdc_put_request(req);
806 req = orig_req;
807 goto out;
808 }
809
810 req->r_ops[0] = orig_req->r_ops[0];
811
812 req->r_mtime = aio_req->mtime;
813 req->r_data_offset = req->r_ops[0].extent.offset;
814
815 ceph_osdc_put_request(orig_req);
816
817 req->r_callback = ceph_aio_complete_req;
818 req->r_inode = inode;
819 req->r_priv = aio_req;
820 req->r_abort_on_full = true;
821
822 ret = ceph_osdc_start_request(req->r_osdc, req, false);
823 out:
824 if (ret < 0) {
825 req->r_result = ret;
826 ceph_aio_complete_req(req);
827 }
828
829 ceph_put_snap_context(snapc);
830 kfree(aio_work);
831 }
832
833 static ssize_t
834 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
835 struct ceph_snap_context *snapc,
836 struct ceph_cap_flush **pcf)
837 {
838 struct file *file = iocb->ki_filp;
839 struct inode *inode = file_inode(file);
840 struct ceph_inode_info *ci = ceph_inode(inode);
841 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
842 struct ceph_vino vino;
843 struct ceph_osd_request *req;
844 struct page **pages;
845 struct ceph_aio_request *aio_req = NULL;
846 int num_pages = 0;
847 int flags;
848 int ret;
849 struct timespec mtime = current_time(inode);
850 size_t count = iov_iter_count(iter);
851 loff_t pos = iocb->ki_pos;
852 bool write = iov_iter_rw(iter) == WRITE;
853 bool should_dirty = !write && iter_is_iovec(iter);
854
855 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
856 return -EROFS;
857
858 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
859 (write ? "write" : "read"), file, pos, (unsigned)count,
860 snapc, snapc->seq);
861
862 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
863 if (ret < 0)
864 return ret;
865
866 if (write) {
867 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
868 pos >> PAGE_SHIFT,
869 (pos + count) >> PAGE_SHIFT);
870 if (ret2 < 0)
871 dout("invalidate_inode_pages2_range returned %d\n", ret2);
872
873 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
874 } else {
875 flags = CEPH_OSD_FLAG_READ;
876 }
877
878 while (iov_iter_count(iter) > 0) {
879 u64 size = dio_get_pagev_size(iter);
880 size_t start = 0;
881 ssize_t len;
882
883 if (write)
884 size = min_t(u64, size, fsc->mount_options->wsize);
885 else
886 size = min_t(u64, size, fsc->mount_options->rsize);
887
888 vino = ceph_vino(inode);
889 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
890 vino, pos, &size, 0,
891 1,
892 write ? CEPH_OSD_OP_WRITE :
893 CEPH_OSD_OP_READ,
894 flags, snapc,
895 ci->i_truncate_seq,
896 ci->i_truncate_size,
897 false);
898 if (IS_ERR(req)) {
899 ret = PTR_ERR(req);
900 break;
901 }
902
903 len = size;
904 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
905 if (IS_ERR(pages)) {
906 ceph_osdc_put_request(req);
907 ret = PTR_ERR(pages);
908 break;
909 }
910
911 /*
912 * To simplify error handling, allow AIO when IO within i_size
913 * or IO can be satisfied by single OSD request.
914 */
915 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
916 (len == count || pos + count <= i_size_read(inode))) {
917 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
918 if (aio_req) {
919 aio_req->iocb = iocb;
920 aio_req->write = write;
921 aio_req->should_dirty = should_dirty;
922 INIT_LIST_HEAD(&aio_req->osd_reqs);
923 if (write) {
924 aio_req->mtime = mtime;
925 swap(aio_req->prealloc_cf, *pcf);
926 }
927 }
928 /* ignore error */
929 }
930
931 if (write) {
932 /*
933 * throw out any page cache pages in this range. this
934 * may block.
935 */
936 truncate_inode_pages_range(inode->i_mapping, pos,
937 (pos+len) | (PAGE_SIZE - 1));
938
939 req->r_mtime = mtime;
940 }
941
942 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
943 false, false);
944
945 if (aio_req) {
946 aio_req->total_len += len;
947 aio_req->num_reqs++;
948 atomic_inc(&aio_req->pending_reqs);
949
950 req->r_callback = ceph_aio_complete_req;
951 req->r_inode = inode;
952 req->r_priv = aio_req;
953 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
954
955 pos += len;
956 iov_iter_advance(iter, len);
957 continue;
958 }
959
960 ret = ceph_osdc_start_request(req->r_osdc, req, false);
961 if (!ret)
962 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
963
964 size = i_size_read(inode);
965 if (!write) {
966 if (ret == -ENOENT)
967 ret = 0;
968 if (ret >= 0 && ret < len && pos + ret < size) {
969 int zlen = min_t(size_t, len - ret,
970 size - pos - ret);
971 ceph_zero_page_vector_range(start + ret, zlen,
972 pages);
973 ret += zlen;
974 }
975 if (ret >= 0)
976 len = ret;
977 }
978
979 ceph_put_page_vector(pages, num_pages, should_dirty);
980
981 ceph_osdc_put_request(req);
982 if (ret < 0)
983 break;
984
985 pos += len;
986 iov_iter_advance(iter, len);
987
988 if (!write && pos >= size)
989 break;
990
991 if (write && pos > size) {
992 if (ceph_inode_set_size(inode, pos))
993 ceph_check_caps(ceph_inode(inode),
994 CHECK_CAPS_AUTHONLY,
995 NULL);
996 }
997 }
998
999 if (aio_req) {
1000 LIST_HEAD(osd_reqs);
1001
1002 if (aio_req->num_reqs == 0) {
1003 kfree(aio_req);
1004 return ret;
1005 }
1006
1007 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1008 CEPH_CAP_FILE_RD);
1009
1010 list_splice(&aio_req->osd_reqs, &osd_reqs);
1011 while (!list_empty(&osd_reqs)) {
1012 req = list_first_entry(&osd_reqs,
1013 struct ceph_osd_request,
1014 r_unsafe_item);
1015 list_del_init(&req->r_unsafe_item);
1016 if (ret >= 0)
1017 ret = ceph_osdc_start_request(req->r_osdc,
1018 req, false);
1019 if (ret < 0) {
1020 req->r_result = ret;
1021 ceph_aio_complete_req(req);
1022 }
1023 }
1024 return -EIOCBQUEUED;
1025 }
1026
1027 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1028 ret = pos - iocb->ki_pos;
1029 iocb->ki_pos = pos;
1030 }
1031 return ret;
1032 }
1033
1034 /*
1035 * Synchronous write, straight from __user pointer or user pages.
1036 *
1037 * If write spans object boundary, just do multiple writes. (For a
1038 * correct atomic write, we should e.g. take write locks on all
1039 * objects, rollback on failure, etc.)
1040 */
1041 static ssize_t
1042 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1043 struct ceph_snap_context *snapc)
1044 {
1045 struct file *file = iocb->ki_filp;
1046 struct inode *inode = file_inode(file);
1047 struct ceph_inode_info *ci = ceph_inode(inode);
1048 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1049 struct ceph_vino vino;
1050 struct ceph_osd_request *req;
1051 struct page **pages;
1052 u64 len;
1053 int num_pages;
1054 int written = 0;
1055 int flags;
1056 int ret;
1057 bool check_caps = false;
1058 struct timespec mtime = current_time(inode);
1059 size_t count = iov_iter_count(from);
1060
1061 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1062 return -EROFS;
1063
1064 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1065 file, pos, (unsigned)count, snapc, snapc->seq);
1066
1067 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1068 if (ret < 0)
1069 return ret;
1070
1071 ret = invalidate_inode_pages2_range(inode->i_mapping,
1072 pos >> PAGE_SHIFT,
1073 (pos + count) >> PAGE_SHIFT);
1074 if (ret < 0)
1075 dout("invalidate_inode_pages2_range returned %d\n", ret);
1076
1077 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1078
1079 while ((len = iov_iter_count(from)) > 0) {
1080 size_t left;
1081 int n;
1082
1083 vino = ceph_vino(inode);
1084 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1085 vino, pos, &len, 0, 1,
1086 CEPH_OSD_OP_WRITE, flags, snapc,
1087 ci->i_truncate_seq,
1088 ci->i_truncate_size,
1089 false);
1090 if (IS_ERR(req)) {
1091 ret = PTR_ERR(req);
1092 break;
1093 }
1094
1095 /*
1096 * write from beginning of first page,
1097 * regardless of io alignment
1098 */
1099 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1100
1101 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1102 if (IS_ERR(pages)) {
1103 ret = PTR_ERR(pages);
1104 goto out;
1105 }
1106
1107 left = len;
1108 for (n = 0; n < num_pages; n++) {
1109 size_t plen = min_t(size_t, left, PAGE_SIZE);
1110 ret = copy_page_from_iter(pages[n], 0, plen, from);
1111 if (ret != plen) {
1112 ret = -EFAULT;
1113 break;
1114 }
1115 left -= ret;
1116 }
1117
1118 if (ret < 0) {
1119 ceph_release_page_vector(pages, num_pages);
1120 goto out;
1121 }
1122
1123 req->r_inode = inode;
1124
1125 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1126 false, true);
1127
1128 req->r_mtime = mtime;
1129 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1130 if (!ret)
1131 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1132
1133 out:
1134 ceph_osdc_put_request(req);
1135 if (ret != 0) {
1136 ceph_set_error_write(ci);
1137 break;
1138 }
1139
1140 ceph_clear_error_write(ci);
1141 pos += len;
1142 written += len;
1143 if (pos > i_size_read(inode)) {
1144 check_caps = ceph_inode_set_size(inode, pos);
1145 if (check_caps)
1146 ceph_check_caps(ceph_inode(inode),
1147 CHECK_CAPS_AUTHONLY,
1148 NULL);
1149 }
1150
1151 }
1152
1153 if (ret != -EOLDSNAPC && written > 0) {
1154 ret = written;
1155 iocb->ki_pos = pos;
1156 }
1157 return ret;
1158 }
1159
1160 /*
1161 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1162 * Atomically grab references, so that those bits are not released
1163 * back to the MDS mid-read.
1164 *
1165 * Hmm, the sync read case isn't actually async... should it be?
1166 */
1167 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1168 {
1169 struct file *filp = iocb->ki_filp;
1170 struct ceph_file_info *fi = filp->private_data;
1171 size_t len = iov_iter_count(to);
1172 struct inode *inode = file_inode(filp);
1173 struct ceph_inode_info *ci = ceph_inode(inode);
1174 struct page *pinned_page = NULL;
1175 ssize_t ret;
1176 int want, got = 0;
1177 int retry_op = 0, read = 0;
1178
1179 again:
1180 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1181 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1182
1183 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1184 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1185 else
1186 want = CEPH_CAP_FILE_CACHE;
1187 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1188 if (ret < 0)
1189 return ret;
1190
1191 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1192 (iocb->ki_flags & IOCB_DIRECT) ||
1193 (fi->flags & CEPH_F_SYNC)) {
1194
1195 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1196 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1197 ceph_cap_string(got));
1198
1199 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1200 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1201 ret = ceph_direct_read_write(iocb, to,
1202 NULL, NULL);
1203 if (ret >= 0 && ret < len)
1204 retry_op = CHECK_EOF;
1205 } else {
1206 ret = ceph_sync_read(iocb, to, &retry_op);
1207 }
1208 } else {
1209 retry_op = READ_INLINE;
1210 }
1211 } else {
1212 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1213 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1214 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1215 ceph_cap_string(got));
1216 ceph_add_rw_context(fi, &rw_ctx);
1217 ret = generic_file_read_iter(iocb, to);
1218 ceph_del_rw_context(fi, &rw_ctx);
1219 }
1220 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1221 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1222 if (pinned_page) {
1223 put_page(pinned_page);
1224 pinned_page = NULL;
1225 }
1226 ceph_put_cap_refs(ci, got);
1227 if (retry_op > HAVE_RETRIED && ret >= 0) {
1228 int statret;
1229 struct page *page = NULL;
1230 loff_t i_size;
1231 if (retry_op == READ_INLINE) {
1232 page = __page_cache_alloc(GFP_KERNEL);
1233 if (!page)
1234 return -ENOMEM;
1235 }
1236
1237 statret = __ceph_do_getattr(inode, page,
1238 CEPH_STAT_CAP_INLINE_DATA, !!page);
1239 if (statret < 0) {
1240 if (page)
1241 __free_page(page);
1242 if (statret == -ENODATA) {
1243 BUG_ON(retry_op != READ_INLINE);
1244 goto again;
1245 }
1246 return statret;
1247 }
1248
1249 i_size = i_size_read(inode);
1250 if (retry_op == READ_INLINE) {
1251 BUG_ON(ret > 0 || read > 0);
1252 if (iocb->ki_pos < i_size &&
1253 iocb->ki_pos < PAGE_SIZE) {
1254 loff_t end = min_t(loff_t, i_size,
1255 iocb->ki_pos + len);
1256 end = min_t(loff_t, end, PAGE_SIZE);
1257 if (statret < end)
1258 zero_user_segment(page, statret, end);
1259 ret = copy_page_to_iter(page,
1260 iocb->ki_pos & ~PAGE_MASK,
1261 end - iocb->ki_pos, to);
1262 iocb->ki_pos += ret;
1263 read += ret;
1264 }
1265 if (iocb->ki_pos < i_size && read < len) {
1266 size_t zlen = min_t(size_t, len - read,
1267 i_size - iocb->ki_pos);
1268 ret = iov_iter_zero(zlen, to);
1269 iocb->ki_pos += ret;
1270 read += ret;
1271 }
1272 __free_pages(page, 0);
1273 return read;
1274 }
1275
1276 /* hit EOF or hole? */
1277 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1278 ret < len) {
1279 dout("sync_read hit hole, ppos %lld < size %lld"
1280 ", reading more\n", iocb->ki_pos, i_size);
1281
1282 read += ret;
1283 len -= ret;
1284 retry_op = HAVE_RETRIED;
1285 goto again;
1286 }
1287 }
1288
1289 if (ret >= 0)
1290 ret += read;
1291
1292 return ret;
1293 }
1294
1295 /*
1296 * Take cap references to avoid releasing caps to MDS mid-write.
1297 *
1298 * If we are synchronous, and write with an old snap context, the OSD
1299 * may return EOLDSNAPC. In that case, retry the write.. _after_
1300 * dropping our cap refs and allowing the pending snap to logically
1301 * complete _before_ this write occurs.
1302 *
1303 * If we are near ENOSPC, write synchronously.
1304 */
1305 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1306 {
1307 struct file *file = iocb->ki_filp;
1308 struct ceph_file_info *fi = file->private_data;
1309 struct inode *inode = file_inode(file);
1310 struct ceph_inode_info *ci = ceph_inode(inode);
1311 struct ceph_osd_client *osdc =
1312 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1313 struct ceph_cap_flush *prealloc_cf;
1314 ssize_t count, written = 0;
1315 int err, want, got;
1316 loff_t pos;
1317
1318 if (ceph_snap(inode) != CEPH_NOSNAP)
1319 return -EROFS;
1320
1321 prealloc_cf = ceph_alloc_cap_flush();
1322 if (!prealloc_cf)
1323 return -ENOMEM;
1324
1325 retry_snap:
1326 inode_lock(inode);
1327
1328 /* We can write back this queue in page reclaim */
1329 current->backing_dev_info = inode_to_bdi(inode);
1330
1331 if (iocb->ki_flags & IOCB_APPEND) {
1332 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1333 if (err < 0)
1334 goto out;
1335 }
1336
1337 err = generic_write_checks(iocb, from);
1338 if (err <= 0)
1339 goto out;
1340
1341 pos = iocb->ki_pos;
1342 count = iov_iter_count(from);
1343 err = file_remove_privs(file);
1344 if (err)
1345 goto out;
1346
1347 err = file_update_time(file);
1348 if (err)
1349 goto out;
1350
1351 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1352 err = ceph_uninline_data(file, NULL);
1353 if (err < 0)
1354 goto out;
1355 }
1356
1357 /* FIXME: not complete since it doesn't account for being at quota */
1358 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1359 err = -ENOSPC;
1360 goto out;
1361 }
1362
1363 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1364 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1365 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1366 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1367 else
1368 want = CEPH_CAP_FILE_BUFFER;
1369 got = 0;
1370 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1371 &got, NULL);
1372 if (err < 0)
1373 goto out;
1374
1375 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1376 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1377
1378 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1379 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1380 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1381 struct ceph_snap_context *snapc;
1382 struct iov_iter data;
1383 inode_unlock(inode);
1384
1385 spin_lock(&ci->i_ceph_lock);
1386 if (__ceph_have_pending_cap_snap(ci)) {
1387 struct ceph_cap_snap *capsnap =
1388 list_last_entry(&ci->i_cap_snaps,
1389 struct ceph_cap_snap,
1390 ci_item);
1391 snapc = ceph_get_snap_context(capsnap->context);
1392 } else {
1393 BUG_ON(!ci->i_head_snapc);
1394 snapc = ceph_get_snap_context(ci->i_head_snapc);
1395 }
1396 spin_unlock(&ci->i_ceph_lock);
1397
1398 /* we might need to revert back to that point */
1399 data = *from;
1400 if (iocb->ki_flags & IOCB_DIRECT)
1401 written = ceph_direct_read_write(iocb, &data, snapc,
1402 &prealloc_cf);
1403 else
1404 written = ceph_sync_write(iocb, &data, pos, snapc);
1405 if (written > 0)
1406 iov_iter_advance(from, written);
1407 ceph_put_snap_context(snapc);
1408 } else {
1409 /*
1410 * No need to acquire the i_truncate_mutex. Because
1411 * the MDS revokes Fwb caps before sending truncate
1412 * message to us. We can't get Fwb cap while there
1413 * are pending vmtruncate. So write and vmtruncate
1414 * can not run at the same time
1415 */
1416 written = generic_perform_write(file, from, pos);
1417 if (likely(written >= 0))
1418 iocb->ki_pos = pos + written;
1419 inode_unlock(inode);
1420 }
1421
1422 if (written >= 0) {
1423 int dirty;
1424 spin_lock(&ci->i_ceph_lock);
1425 ci->i_inline_version = CEPH_INLINE_NONE;
1426 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1427 &prealloc_cf);
1428 spin_unlock(&ci->i_ceph_lock);
1429 if (dirty)
1430 __mark_inode_dirty(inode, dirty);
1431 }
1432
1433 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1434 inode, ceph_vinop(inode), pos, (unsigned)count,
1435 ceph_cap_string(got));
1436 ceph_put_cap_refs(ci, got);
1437
1438 if (written == -EOLDSNAPC) {
1439 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1440 inode, ceph_vinop(inode), pos, (unsigned)count);
1441 goto retry_snap;
1442 }
1443
1444 if (written >= 0) {
1445 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1446 iocb->ki_flags |= IOCB_DSYNC;
1447 written = generic_write_sync(iocb, written);
1448 }
1449
1450 goto out_unlocked;
1451
1452 out:
1453 inode_unlock(inode);
1454 out_unlocked:
1455 ceph_free_cap_flush(prealloc_cf);
1456 current->backing_dev_info = NULL;
1457 return written ? written : err;
1458 }
1459
1460 /*
1461 * llseek. be sure to verify file size on SEEK_END.
1462 */
1463 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1464 {
1465 struct inode *inode = file->f_mapping->host;
1466 loff_t i_size;
1467 loff_t ret;
1468
1469 inode_lock(inode);
1470
1471 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1472 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1473 if (ret < 0)
1474 goto out;
1475 }
1476
1477 i_size = i_size_read(inode);
1478 switch (whence) {
1479 case SEEK_END:
1480 offset += i_size;
1481 break;
1482 case SEEK_CUR:
1483 /*
1484 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1485 * position-querying operation. Avoid rewriting the "same"
1486 * f_pos value back to the file because a concurrent read(),
1487 * write() or lseek() might have altered it
1488 */
1489 if (offset == 0) {
1490 ret = file->f_pos;
1491 goto out;
1492 }
1493 offset += file->f_pos;
1494 break;
1495 case SEEK_DATA:
1496 if (offset < 0 || offset >= i_size) {
1497 ret = -ENXIO;
1498 goto out;
1499 }
1500 break;
1501 case SEEK_HOLE:
1502 if (offset < 0 || offset >= i_size) {
1503 ret = -ENXIO;
1504 goto out;
1505 }
1506 offset = i_size;
1507 break;
1508 }
1509
1510 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1511
1512 out:
1513 inode_unlock(inode);
1514 return ret;
1515 }
1516
1517 static inline void ceph_zero_partial_page(
1518 struct inode *inode, loff_t offset, unsigned size)
1519 {
1520 struct page *page;
1521 pgoff_t index = offset >> PAGE_SHIFT;
1522
1523 page = find_lock_page(inode->i_mapping, index);
1524 if (page) {
1525 wait_on_page_writeback(page);
1526 zero_user(page, offset & (PAGE_SIZE - 1), size);
1527 unlock_page(page);
1528 put_page(page);
1529 }
1530 }
1531
1532 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1533 loff_t length)
1534 {
1535 loff_t nearly = round_up(offset, PAGE_SIZE);
1536 if (offset < nearly) {
1537 loff_t size = nearly - offset;
1538 if (length < size)
1539 size = length;
1540 ceph_zero_partial_page(inode, offset, size);
1541 offset += size;
1542 length -= size;
1543 }
1544 if (length >= PAGE_SIZE) {
1545 loff_t size = round_down(length, PAGE_SIZE);
1546 truncate_pagecache_range(inode, offset, offset + size - 1);
1547 offset += size;
1548 length -= size;
1549 }
1550 if (length)
1551 ceph_zero_partial_page(inode, offset, length);
1552 }
1553
1554 static int ceph_zero_partial_object(struct inode *inode,
1555 loff_t offset, loff_t *length)
1556 {
1557 struct ceph_inode_info *ci = ceph_inode(inode);
1558 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1559 struct ceph_osd_request *req;
1560 int ret = 0;
1561 loff_t zero = 0;
1562 int op;
1563
1564 if (!length) {
1565 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1566 length = &zero;
1567 } else {
1568 op = CEPH_OSD_OP_ZERO;
1569 }
1570
1571 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1572 ceph_vino(inode),
1573 offset, length,
1574 0, 1, op,
1575 CEPH_OSD_FLAG_WRITE,
1576 NULL, 0, 0, false);
1577 if (IS_ERR(req)) {
1578 ret = PTR_ERR(req);
1579 goto out;
1580 }
1581
1582 req->r_mtime = inode->i_mtime;
1583 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1584 if (!ret) {
1585 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1586 if (ret == -ENOENT)
1587 ret = 0;
1588 }
1589 ceph_osdc_put_request(req);
1590
1591 out:
1592 return ret;
1593 }
1594
1595 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1596 {
1597 int ret = 0;
1598 struct ceph_inode_info *ci = ceph_inode(inode);
1599 s32 stripe_unit = ci->i_layout.stripe_unit;
1600 s32 stripe_count = ci->i_layout.stripe_count;
1601 s32 object_size = ci->i_layout.object_size;
1602 u64 object_set_size = object_size * stripe_count;
1603 u64 nearly, t;
1604
1605 /* round offset up to next period boundary */
1606 nearly = offset + object_set_size - 1;
1607 t = nearly;
1608 nearly -= do_div(t, object_set_size);
1609
1610 while (length && offset < nearly) {
1611 loff_t size = length;
1612 ret = ceph_zero_partial_object(inode, offset, &size);
1613 if (ret < 0)
1614 return ret;
1615 offset += size;
1616 length -= size;
1617 }
1618 while (length >= object_set_size) {
1619 int i;
1620 loff_t pos = offset;
1621 for (i = 0; i < stripe_count; ++i) {
1622 ret = ceph_zero_partial_object(inode, pos, NULL);
1623 if (ret < 0)
1624 return ret;
1625 pos += stripe_unit;
1626 }
1627 offset += object_set_size;
1628 length -= object_set_size;
1629 }
1630 while (length) {
1631 loff_t size = length;
1632 ret = ceph_zero_partial_object(inode, offset, &size);
1633 if (ret < 0)
1634 return ret;
1635 offset += size;
1636 length -= size;
1637 }
1638 return ret;
1639 }
1640
1641 static long ceph_fallocate(struct file *file, int mode,
1642 loff_t offset, loff_t length)
1643 {
1644 struct ceph_file_info *fi = file->private_data;
1645 struct inode *inode = file_inode(file);
1646 struct ceph_inode_info *ci = ceph_inode(inode);
1647 struct ceph_osd_client *osdc =
1648 &ceph_inode_to_client(inode)->client->osdc;
1649 struct ceph_cap_flush *prealloc_cf;
1650 int want, got = 0;
1651 int dirty;
1652 int ret = 0;
1653 loff_t endoff = 0;
1654 loff_t size;
1655
1656 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1657 return -EOPNOTSUPP;
1658
1659 if (!S_ISREG(inode->i_mode))
1660 return -EOPNOTSUPP;
1661
1662 prealloc_cf = ceph_alloc_cap_flush();
1663 if (!prealloc_cf)
1664 return -ENOMEM;
1665
1666 inode_lock(inode);
1667
1668 if (ceph_snap(inode) != CEPH_NOSNAP) {
1669 ret = -EROFS;
1670 goto unlock;
1671 }
1672
1673 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1674 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1675 ret = -ENOSPC;
1676 goto unlock;
1677 }
1678
1679 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1680 ret = ceph_uninline_data(file, NULL);
1681 if (ret < 0)
1682 goto unlock;
1683 }
1684
1685 size = i_size_read(inode);
1686 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
1687 endoff = offset + length;
1688 ret = inode_newsize_ok(inode, endoff);
1689 if (ret)
1690 goto unlock;
1691 }
1692
1693 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1694 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1695 else
1696 want = CEPH_CAP_FILE_BUFFER;
1697
1698 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1699 if (ret < 0)
1700 goto unlock;
1701
1702 if (mode & FALLOC_FL_PUNCH_HOLE) {
1703 if (offset < size)
1704 ceph_zero_pagecache_range(inode, offset, length);
1705 ret = ceph_zero_objects(inode, offset, length);
1706 } else if (endoff > size) {
1707 truncate_pagecache_range(inode, size, -1);
1708 if (ceph_inode_set_size(inode, endoff))
1709 ceph_check_caps(ceph_inode(inode),
1710 CHECK_CAPS_AUTHONLY, NULL);
1711 }
1712
1713 if (!ret) {
1714 spin_lock(&ci->i_ceph_lock);
1715 ci->i_inline_version = CEPH_INLINE_NONE;
1716 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1717 &prealloc_cf);
1718 spin_unlock(&ci->i_ceph_lock);
1719 if (dirty)
1720 __mark_inode_dirty(inode, dirty);
1721 }
1722
1723 ceph_put_cap_refs(ci, got);
1724 unlock:
1725 inode_unlock(inode);
1726 ceph_free_cap_flush(prealloc_cf);
1727 return ret;
1728 }
1729
1730 const struct file_operations ceph_file_fops = {
1731 .open = ceph_open,
1732 .release = ceph_release,
1733 .llseek = ceph_llseek,
1734 .read_iter = ceph_read_iter,
1735 .write_iter = ceph_write_iter,
1736 .mmap = ceph_mmap,
1737 .fsync = ceph_fsync,
1738 .lock = ceph_lock,
1739 .flock = ceph_flock,
1740 .splice_read = generic_file_splice_read,
1741 .splice_write = iter_file_splice_write,
1742 .unlocked_ioctl = ceph_ioctl,
1743 .compat_ioctl = ceph_ioctl,
1744 .fallocate = ceph_fallocate,
1745 };
1746