1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/file.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/falloc.h>
14 #include "mds_client.h"
17 static __le32
ceph_flags_sys2wire(u32 flags
)
21 switch (flags
& O_ACCMODE
) {
23 wire_flags
|= CEPH_O_RDONLY
;
26 wire_flags
|= CEPH_O_WRONLY
;
29 wire_flags
|= CEPH_O_RDWR
;
33 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
35 ceph_sys2wire(O_CREAT
);
36 ceph_sys2wire(O_EXCL
);
37 ceph_sys2wire(O_TRUNC
);
38 ceph_sys2wire(O_DIRECTORY
);
39 ceph_sys2wire(O_NOFOLLOW
);
44 dout("unused open flags: %x", flags
);
46 return cpu_to_le32(wire_flags
);
50 * Ceph file operations
52 * Implement basic open/close functionality, and implement
55 * We implement three modes of file I/O:
56 * - buffered uses the generic_file_aio_{read,write} helpers
58 * - synchronous is used when there is multi-client read/write
59 * sharing, avoids the page cache, and synchronously waits for an
62 * - direct io takes the variant of the sync path that references
63 * user pages directly.
65 * fsync() flushes and waits on dirty pages, but just queues metadata
66 * for writeback: since the MDS can recover size and mtime there is no
67 * need to wait for MDS acknowledgement.
71 * Calculate the length sum of direct io vectors that can
72 * be combined into one page vector.
74 static size_t dio_get_pagev_size(const struct iov_iter
*it
)
76 const struct iovec
*iov
= it
->iov
;
77 const struct iovec
*iovend
= iov
+ it
->nr_segs
;
80 size
= iov
->iov_len
- it
->iov_offset
;
82 * An iov can be page vectored when both the current tail
83 * and the next base are page aligned.
85 while (PAGE_ALIGNED((iov
->iov_base
+ iov
->iov_len
)) &&
86 (++iov
< iovend
&& PAGE_ALIGNED((iov
->iov_base
)))) {
89 dout("dio_get_pagevlen len = %zu\n", size
);
94 * Allocate a page vector based on (@it, @nbytes).
95 * The return value is the tuple describing a page vector,
96 * that is (@pages, @page_align, @num_pages).
99 dio_get_pages_alloc(const struct iov_iter
*it
, size_t nbytes
,
100 size_t *page_align
, int *num_pages
)
102 struct iov_iter tmp_it
= *it
;
105 int ret
= 0, idx
, npages
;
107 align
= (unsigned long)(it
->iov
->iov_base
+ it
->iov_offset
) &
109 npages
= calc_pages_for(align
, nbytes
);
110 pages
= kvmalloc(sizeof(*pages
) * npages
, GFP_KERNEL
);
112 return ERR_PTR(-ENOMEM
);
114 for (idx
= 0; idx
< npages
; ) {
116 ret
= iov_iter_get_pages(&tmp_it
, pages
+ idx
, nbytes
,
117 npages
- idx
, &start
);
121 iov_iter_advance(&tmp_it
, ret
);
123 idx
+= (ret
+ start
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
129 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages
, align
);
132 ceph_put_page_vector(pages
, idx
, false);
137 * Prepare an open request. Preallocate ceph_cap to avoid an
138 * inopportune ENOMEM later.
140 static struct ceph_mds_request
*
141 prepare_open_request(struct super_block
*sb
, int flags
, int create_mode
)
143 struct ceph_fs_client
*fsc
= ceph_sb_to_client(sb
);
144 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
145 struct ceph_mds_request
*req
;
146 int want_auth
= USE_ANY_MDS
;
147 int op
= (flags
& O_CREAT
) ? CEPH_MDS_OP_CREATE
: CEPH_MDS_OP_OPEN
;
149 if (flags
& (O_WRONLY
|O_RDWR
|O_CREAT
|O_TRUNC
))
150 want_auth
= USE_AUTH_MDS
;
152 req
= ceph_mdsc_create_request(mdsc
, op
, want_auth
);
155 req
->r_fmode
= ceph_flags_to_mode(flags
);
156 req
->r_args
.open
.flags
= ceph_flags_sys2wire(flags
);
157 req
->r_args
.open
.mode
= cpu_to_le32(create_mode
);
163 * initialize private struct file data.
164 * if we fail, clean up by dropping fmode reference on the ceph_inode
166 static int ceph_init_file(struct inode
*inode
, struct file
*file
, int fmode
)
168 struct ceph_file_info
*cf
;
171 switch (inode
->i_mode
& S_IFMT
) {
173 ceph_fscache_register_inode_cookie(inode
);
174 ceph_fscache_file_set_cookie(inode
, file
);
176 dout("init_file %p %p 0%o (regular)\n", inode
, file
,
178 cf
= kmem_cache_zalloc(ceph_file_cachep
, GFP_KERNEL
);
180 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
185 cf
->readdir_cache_idx
= -1;
186 file
->private_data
= cf
;
187 BUG_ON(inode
->i_fop
->release
!= ceph_release
);
191 dout("init_file %p %p 0%o (symlink)\n", inode
, file
,
193 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
197 dout("init_file %p %p 0%o (special)\n", inode
, file
,
200 * we need to drop the open ref now, since we don't
201 * have .release set to ceph_release.
203 ceph_put_fmode(ceph_inode(inode
), fmode
); /* clean up */
204 BUG_ON(inode
->i_fop
->release
== ceph_release
);
206 /* call the proper open fop */
207 ret
= inode
->i_fop
->open(inode
, file
);
213 * try renew caps after session gets killed.
215 int ceph_renew_caps(struct inode
*inode
)
217 struct ceph_mds_client
*mdsc
= ceph_sb_to_client(inode
->i_sb
)->mdsc
;
218 struct ceph_inode_info
*ci
= ceph_inode(inode
);
219 struct ceph_mds_request
*req
;
220 int err
, flags
, wanted
;
222 spin_lock(&ci
->i_ceph_lock
);
223 wanted
= __ceph_caps_file_wanted(ci
);
224 if (__ceph_is_any_real_caps(ci
) &&
225 (!(wanted
& CEPH_CAP_ANY_WR
) || ci
->i_auth_cap
)) {
226 int issued
= __ceph_caps_issued(ci
, NULL
);
227 spin_unlock(&ci
->i_ceph_lock
);
228 dout("renew caps %p want %s issued %s updating mds_wanted\n",
229 inode
, ceph_cap_string(wanted
), ceph_cap_string(issued
));
230 ceph_check_caps(ci
, 0, NULL
);
233 spin_unlock(&ci
->i_ceph_lock
);
236 if ((wanted
& CEPH_CAP_FILE_RD
) && (wanted
& CEPH_CAP_FILE_WR
))
238 else if (wanted
& CEPH_CAP_FILE_RD
)
240 else if (wanted
& CEPH_CAP_FILE_WR
)
243 if (wanted
& CEPH_CAP_FILE_LAZYIO
)
247 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
253 req
->r_inode
= inode
;
258 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
259 ceph_mdsc_put_request(req
);
261 dout("renew caps %p open result=%d\n", inode
, err
);
262 return err
< 0 ? err
: 0;
266 * If we already have the requisite capabilities, we can satisfy
267 * the open request locally (no need to request new caps from the
268 * MDS). We do, however, need to inform the MDS (asynchronously)
269 * if our wanted caps set expands.
271 int ceph_open(struct inode
*inode
, struct file
*file
)
273 struct ceph_inode_info
*ci
= ceph_inode(inode
);
274 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
275 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
276 struct ceph_mds_request
*req
;
277 struct ceph_file_info
*cf
= file
->private_data
;
279 int flags
, fmode
, wanted
;
282 dout("open file %p is already opened\n", file
);
286 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
287 flags
= file
->f_flags
& ~(O_CREAT
|O_EXCL
);
288 if (S_ISDIR(inode
->i_mode
))
289 flags
= O_DIRECTORY
; /* mds likes to know */
291 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode
,
292 ceph_vinop(inode
), file
, flags
, file
->f_flags
);
293 fmode
= ceph_flags_to_mode(flags
);
294 wanted
= ceph_caps_for_mode(fmode
);
296 /* snapped files are read-only */
297 if (ceph_snap(inode
) != CEPH_NOSNAP
&& (file
->f_mode
& FMODE_WRITE
))
300 /* trivially open snapdir */
301 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
302 spin_lock(&ci
->i_ceph_lock
);
303 __ceph_get_fmode(ci
, fmode
);
304 spin_unlock(&ci
->i_ceph_lock
);
305 return ceph_init_file(inode
, file
, fmode
);
309 * No need to block if we have caps on the auth MDS (for
310 * write) or any MDS (for read). Update wanted set
313 spin_lock(&ci
->i_ceph_lock
);
314 if (__ceph_is_any_real_caps(ci
) &&
315 (((fmode
& CEPH_FILE_MODE_WR
) == 0) || ci
->i_auth_cap
)) {
316 int mds_wanted
= __ceph_caps_mds_wanted(ci
, true);
317 int issued
= __ceph_caps_issued(ci
, NULL
);
319 dout("open %p fmode %d want %s issued %s using existing\n",
320 inode
, fmode
, ceph_cap_string(wanted
),
321 ceph_cap_string(issued
));
322 __ceph_get_fmode(ci
, fmode
);
323 spin_unlock(&ci
->i_ceph_lock
);
326 if ((issued
& wanted
) != wanted
&&
327 (mds_wanted
& wanted
) != wanted
&&
328 ceph_snap(inode
) != CEPH_SNAPDIR
)
329 ceph_check_caps(ci
, 0, NULL
);
331 return ceph_init_file(inode
, file
, fmode
);
332 } else if (ceph_snap(inode
) != CEPH_NOSNAP
&&
333 (ci
->i_snap_caps
& wanted
) == wanted
) {
334 __ceph_get_fmode(ci
, fmode
);
335 spin_unlock(&ci
->i_ceph_lock
);
336 return ceph_init_file(inode
, file
, fmode
);
339 spin_unlock(&ci
->i_ceph_lock
);
341 dout("open fmode %d wants %s\n", fmode
, ceph_cap_string(wanted
));
342 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
347 req
->r_inode
= inode
;
351 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
353 err
= ceph_init_file(inode
, file
, req
->r_fmode
);
354 ceph_mdsc_put_request(req
);
355 dout("open result=%d on %llx.%llx\n", err
, ceph_vinop(inode
));
362 * Do a lookup + open with a single request. If we get a non-existent
363 * file or symlink, return 1 so the VFS can retry.
365 int ceph_atomic_open(struct inode
*dir
, struct dentry
*dentry
,
366 struct file
*file
, unsigned flags
, umode_t mode
,
369 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
370 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
371 struct ceph_mds_request
*req
;
373 struct ceph_acls_info acls
= {};
377 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
379 d_unhashed(dentry
) ? "unhashed" : "hashed", flags
, mode
);
381 if (dentry
->d_name
.len
> NAME_MAX
)
382 return -ENAMETOOLONG
;
384 if (flags
& O_CREAT
) {
385 err
= ceph_pre_init_acls(dir
, &mode
, &acls
);
391 req
= prepare_open_request(dir
->i_sb
, flags
, mode
);
396 req
->r_dentry
= dget(dentry
);
398 if (flags
& O_CREAT
) {
399 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
400 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
402 req
->r_pagelist
= acls
.pagelist
;
403 acls
.pagelist
= NULL
;
407 mask
= CEPH_STAT_CAP_INODE
| CEPH_CAP_AUTH_SHARED
;
408 if (ceph_security_xattr_wanted(dir
))
409 mask
|= CEPH_CAP_XATTR_SHARED
;
410 req
->r_args
.open
.mask
= cpu_to_le32(mask
);
413 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
414 err
= ceph_mdsc_do_request(mdsc
,
415 (flags
& (O_CREAT
|O_TRUNC
)) ? dir
: NULL
,
417 err
= ceph_handle_snapdir(req
, dentry
, err
);
421 if ((flags
& O_CREAT
) && !req
->r_reply_info
.head
->is_dentry
)
422 err
= ceph_handle_notrace_create(dir
, dentry
);
424 if (d_in_lookup(dentry
)) {
425 dn
= ceph_finish_lookup(req
, dentry
, err
);
429 /* we were given a hashed negative dentry */
434 if (dn
|| d_really_is_negative(dentry
) || d_is_symlink(dentry
)) {
435 /* make vfs retry on splice, ENOENT, or symlink */
436 dout("atomic_open finish_no_open on dn %p\n", dn
);
437 err
= finish_no_open(file
, dn
);
439 dout("atomic_open finish_open on dn %p\n", dn
);
440 if (req
->r_op
== CEPH_MDS_OP_CREATE
&& req
->r_reply_info
.has_create_ino
) {
441 ceph_init_inode_acls(d_inode(dentry
), &acls
);
442 *opened
|= FILE_CREATED
;
444 err
= finish_open(file
, dentry
, ceph_open
, opened
);
447 if (!req
->r_err
&& req
->r_target_inode
)
448 ceph_put_fmode(ceph_inode(req
->r_target_inode
), req
->r_fmode
);
449 ceph_mdsc_put_request(req
);
451 ceph_release_acls_info(&acls
);
452 dout("atomic_open result=%d\n", err
);
456 int ceph_release(struct inode
*inode
, struct file
*file
)
458 struct ceph_inode_info
*ci
= ceph_inode(inode
);
459 struct ceph_file_info
*cf
= file
->private_data
;
461 dout("release inode %p file %p\n", inode
, file
);
462 ceph_put_fmode(ci
, cf
->fmode
);
463 if (cf
->last_readdir
)
464 ceph_mdsc_put_request(cf
->last_readdir
);
465 kfree(cf
->last_name
);
467 kmem_cache_free(ceph_file_cachep
, cf
);
469 /* wake up anyone waiting for caps on this inode */
470 wake_up_all(&ci
->i_cap_wq
);
481 * Read a range of bytes striped over one or more objects. Iterate over
482 * objects we stripe over. (That's not atomic, but good enough for now.)
484 * If we get a short result from the OSD, check against i_size; we need to
485 * only return a short read to the caller if we hit EOF.
487 static int striped_read(struct inode
*inode
,
489 struct page
**pages
, int num_pages
,
490 int page_align
, int *checkeof
)
492 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
493 struct ceph_inode_info
*ci
= ceph_inode(inode
);
498 bool hit_stripe
, was_short
;
501 * we may need to do multiple reads. not atomic, unfortunately.
505 page_idx
= (page_align
+ read
) >> PAGE_SHIFT
;
506 ret
= ceph_osdc_readpages(&fsc
->client
->osdc
, ceph_vino(inode
),
507 &ci
->i_layout
, pos
, &this_len
,
508 ci
->i_truncate_seq
, ci
->i_truncate_size
,
509 pages
+ page_idx
, num_pages
- page_idx
,
510 ((page_align
+ read
) & ~PAGE_MASK
));
513 hit_stripe
= this_len
< len
;
514 was_short
= ret
>= 0 && ret
< this_len
;
515 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos
, len
, read
,
516 ret
, hit_stripe
? " HITSTRIPE" : "", was_short
? " SHORT" : "");
518 i_size
= i_size_read(inode
);
520 if (was_short
&& (pos
+ ret
< i_size
)) {
521 int zlen
= min(this_len
- ret
, i_size
- pos
- ret
);
522 int zoff
= page_align
+ read
+ ret
;
523 dout(" zero gap %llu to %llu\n",
524 pos
+ ret
, pos
+ ret
+ zlen
);
525 ceph_zero_page_vector_range(zoff
, zlen
, pages
);
533 /* hit stripe and need continue*/
534 if (len
&& hit_stripe
&& pos
< i_size
)
540 /* did we bounce off eof? */
541 if (pos
+ len
> i_size
)
542 *checkeof
= CHECK_EOF
;
545 dout("striped_read returns %d\n", ret
);
550 * Completely synchronous read and write methods. Direct from __user
551 * buffer to osd, or directly to user pages (if O_DIRECT).
553 * If the read spans object boundary, just do multiple reads.
555 static ssize_t
ceph_sync_read(struct kiocb
*iocb
, struct iov_iter
*to
,
558 struct file
*file
= iocb
->ki_filp
;
559 struct inode
*inode
= file_inode(file
);
561 u64 off
= iocb
->ki_pos
;
564 size_t len
= iov_iter_count(to
);
566 dout("sync_read on file %p %llu~%u %s\n", file
, off
, (unsigned)len
,
567 (file
->f_flags
& O_DIRECT
) ? "O_DIRECT" : "");
572 * flush any page cache pages in this range. this
573 * will make concurrent normal and sync io slow,
574 * but it will at least behave sensibly when they are
577 ret
= filemap_write_and_wait_range(inode
->i_mapping
, off
,
582 if (unlikely(to
->type
& ITER_PIPE
)) {
584 ret
= iov_iter_get_pages_alloc(to
, &pages
, len
,
588 num_pages
= DIV_ROUND_UP(ret
+ page_off
, PAGE_SIZE
);
590 ret
= striped_read(inode
, off
, ret
, pages
, num_pages
,
593 iov_iter_advance(to
, ret
);
596 iov_iter_advance(to
, 0);
598 ceph_put_page_vector(pages
, num_pages
, false);
600 num_pages
= calc_pages_for(off
, len
);
601 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
603 return PTR_ERR(pages
);
605 ret
= striped_read(inode
, off
, len
, pages
, num_pages
,
606 (off
& ~PAGE_MASK
), checkeof
);
612 size_t page_off
= off
& ~PAGE_MASK
;
613 size_t copy
= min_t(size_t, left
,
614 PAGE_SIZE
- page_off
);
615 l
= copy_page_to_iter(pages
[k
++], page_off
,
623 ceph_release_page_vector(pages
, num_pages
);
626 if (off
> iocb
->ki_pos
) {
627 ret
= off
- iocb
->ki_pos
;
631 dout("sync_read result %zd\n", ret
);
635 struct ceph_aio_request
{
641 struct list_head osd_reqs
;
643 atomic_t pending_reqs
;
644 struct timespec mtime
;
645 struct ceph_cap_flush
*prealloc_cf
;
648 struct ceph_aio_work
{
649 struct work_struct work
;
650 struct ceph_osd_request
*req
;
653 static void ceph_aio_retry_work(struct work_struct
*work
);
655 static void ceph_aio_complete(struct inode
*inode
,
656 struct ceph_aio_request
*aio_req
)
658 struct ceph_inode_info
*ci
= ceph_inode(inode
);
661 if (!atomic_dec_and_test(&aio_req
->pending_reqs
))
664 ret
= aio_req
->error
;
666 ret
= aio_req
->total_len
;
668 dout("ceph_aio_complete %p rc %d\n", inode
, ret
);
670 if (ret
>= 0 && aio_req
->write
) {
673 loff_t endoff
= aio_req
->iocb
->ki_pos
+ aio_req
->total_len
;
674 if (endoff
> i_size_read(inode
)) {
675 if (ceph_inode_set_size(inode
, endoff
))
676 ceph_check_caps(ci
, CHECK_CAPS_AUTHONLY
, NULL
);
679 spin_lock(&ci
->i_ceph_lock
);
680 ci
->i_inline_version
= CEPH_INLINE_NONE
;
681 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
682 &aio_req
->prealloc_cf
);
683 spin_unlock(&ci
->i_ceph_lock
);
685 __mark_inode_dirty(inode
, dirty
);
689 ceph_put_cap_refs(ci
, (aio_req
->write
? CEPH_CAP_FILE_WR
:
692 aio_req
->iocb
->ki_complete(aio_req
->iocb
, ret
, 0);
694 ceph_free_cap_flush(aio_req
->prealloc_cf
);
698 static void ceph_aio_complete_req(struct ceph_osd_request
*req
)
700 int rc
= req
->r_result
;
701 struct inode
*inode
= req
->r_inode
;
702 struct ceph_aio_request
*aio_req
= req
->r_priv
;
703 struct ceph_osd_data
*osd_data
= osd_req_op_extent_osd_data(req
, 0);
704 int num_pages
= calc_pages_for((u64
)osd_data
->alignment
,
707 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
708 inode
, rc
, osd_data
->length
);
710 if (rc
== -EOLDSNAPC
) {
711 struct ceph_aio_work
*aio_work
;
712 BUG_ON(!aio_req
->write
);
714 aio_work
= kmalloc(sizeof(*aio_work
), GFP_NOFS
);
716 INIT_WORK(&aio_work
->work
, ceph_aio_retry_work
);
718 queue_work(ceph_inode_to_client(inode
)->wb_wq
,
723 } else if (!aio_req
->write
) {
726 if (rc
>= 0 && osd_data
->length
> rc
) {
727 int zoff
= osd_data
->alignment
+ rc
;
728 int zlen
= osd_data
->length
- rc
;
730 * If read is satisfied by single OSD request,
731 * it can pass EOF. Otherwise read is within
734 if (aio_req
->num_reqs
== 1) {
735 loff_t i_size
= i_size_read(inode
);
736 loff_t endoff
= aio_req
->iocb
->ki_pos
+ rc
;
738 zlen
= min_t(size_t, zlen
,
740 aio_req
->total_len
= rc
+ zlen
;
744 ceph_zero_page_vector_range(zoff
, zlen
,
749 ceph_put_page_vector(osd_data
->pages
, num_pages
, aio_req
->should_dirty
);
750 ceph_osdc_put_request(req
);
753 cmpxchg(&aio_req
->error
, 0, rc
);
755 ceph_aio_complete(inode
, aio_req
);
759 static void ceph_aio_retry_work(struct work_struct
*work
)
761 struct ceph_aio_work
*aio_work
=
762 container_of(work
, struct ceph_aio_work
, work
);
763 struct ceph_osd_request
*orig_req
= aio_work
->req
;
764 struct ceph_aio_request
*aio_req
= orig_req
->r_priv
;
765 struct inode
*inode
= orig_req
->r_inode
;
766 struct ceph_inode_info
*ci
= ceph_inode(inode
);
767 struct ceph_snap_context
*snapc
;
768 struct ceph_osd_request
*req
;
771 spin_lock(&ci
->i_ceph_lock
);
772 if (__ceph_have_pending_cap_snap(ci
)) {
773 struct ceph_cap_snap
*capsnap
=
774 list_last_entry(&ci
->i_cap_snaps
,
775 struct ceph_cap_snap
,
777 snapc
= ceph_get_snap_context(capsnap
->context
);
779 BUG_ON(!ci
->i_head_snapc
);
780 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
782 spin_unlock(&ci
->i_ceph_lock
);
784 req
= ceph_osdc_alloc_request(orig_req
->r_osdc
, snapc
, 2,
792 req
->r_flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
793 ceph_oloc_copy(&req
->r_base_oloc
, &orig_req
->r_base_oloc
);
794 ceph_oid_copy(&req
->r_base_oid
, &orig_req
->r_base_oid
);
796 ret
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
798 ceph_osdc_put_request(req
);
803 req
->r_ops
[0] = orig_req
->r_ops
[0];
805 req
->r_mtime
= aio_req
->mtime
;
806 req
->r_data_offset
= req
->r_ops
[0].extent
.offset
;
808 ceph_osdc_put_request(orig_req
);
810 req
->r_callback
= ceph_aio_complete_req
;
811 req
->r_inode
= inode
;
812 req
->r_priv
= aio_req
;
813 req
->r_abort_on_full
= true;
815 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
819 ceph_aio_complete_req(req
);
822 ceph_put_snap_context(snapc
);
827 ceph_direct_read_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
828 struct ceph_snap_context
*snapc
,
829 struct ceph_cap_flush
**pcf
)
831 struct file
*file
= iocb
->ki_filp
;
832 struct inode
*inode
= file_inode(file
);
833 struct ceph_inode_info
*ci
= ceph_inode(inode
);
834 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
835 struct ceph_vino vino
;
836 struct ceph_osd_request
*req
;
838 struct ceph_aio_request
*aio_req
= NULL
;
842 struct timespec mtime
= current_time(inode
);
843 size_t count
= iov_iter_count(iter
);
844 loff_t pos
= iocb
->ki_pos
;
845 bool write
= iov_iter_rw(iter
) == WRITE
;
846 bool should_dirty
= !write
&& iter_is_iovec(iter
);
848 if (write
&& ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
851 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
852 (write
? "write" : "read"), file
, pos
, (unsigned)count
,
855 ret
= filemap_write_and_wait_range(inode
->i_mapping
, pos
, pos
+ count
);
860 int ret2
= invalidate_inode_pages2_range(inode
->i_mapping
,
862 (pos
+ count
) >> PAGE_SHIFT
);
864 dout("invalidate_inode_pages2_range returned %d\n", ret2
);
866 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
868 flags
= CEPH_OSD_FLAG_READ
;
871 while (iov_iter_count(iter
) > 0) {
872 u64 size
= dio_get_pagev_size(iter
);
876 vino
= ceph_vino(inode
);
877 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
880 write
? CEPH_OSD_OP_WRITE
:
892 size
= min_t(u64
, size
, fsc
->mount_options
->wsize
);
894 size
= min_t(u64
, size
, fsc
->mount_options
->rsize
);
897 pages
= dio_get_pages_alloc(iter
, len
, &start
, &num_pages
);
899 ceph_osdc_put_request(req
);
900 ret
= PTR_ERR(pages
);
905 * To simplify error handling, allow AIO when IO within i_size
906 * or IO can be satisfied by single OSD request.
908 if (pos
== iocb
->ki_pos
&& !is_sync_kiocb(iocb
) &&
909 (len
== count
|| pos
+ count
<= i_size_read(inode
))) {
910 aio_req
= kzalloc(sizeof(*aio_req
), GFP_KERNEL
);
912 aio_req
->iocb
= iocb
;
913 aio_req
->write
= write
;
914 aio_req
->should_dirty
= should_dirty
;
915 INIT_LIST_HEAD(&aio_req
->osd_reqs
);
917 aio_req
->mtime
= mtime
;
918 swap(aio_req
->prealloc_cf
, *pcf
);
926 * throw out any page cache pages in this range. this
929 truncate_inode_pages_range(inode
->i_mapping
, pos
,
930 (pos
+len
) | (PAGE_SIZE
- 1));
932 req
->r_mtime
= mtime
;
935 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, start
,
939 aio_req
->total_len
+= len
;
941 atomic_inc(&aio_req
->pending_reqs
);
943 req
->r_callback
= ceph_aio_complete_req
;
944 req
->r_inode
= inode
;
945 req
->r_priv
= aio_req
;
946 list_add_tail(&req
->r_unsafe_item
, &aio_req
->osd_reqs
);
949 iov_iter_advance(iter
, len
);
953 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
955 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
957 size
= i_size_read(inode
);
961 if (ret
>= 0 && ret
< len
&& pos
+ ret
< size
) {
962 int zlen
= min_t(size_t, len
- ret
,
964 ceph_zero_page_vector_range(start
+ ret
, zlen
,
972 ceph_put_page_vector(pages
, num_pages
, should_dirty
);
974 ceph_osdc_put_request(req
);
979 iov_iter_advance(iter
, len
);
981 if (!write
&& pos
>= size
)
984 if (write
&& pos
> size
) {
985 if (ceph_inode_set_size(inode
, pos
))
986 ceph_check_caps(ceph_inode(inode
),
995 if (aio_req
->num_reqs
== 0) {
1000 ceph_get_cap_refs(ci
, write
? CEPH_CAP_FILE_WR
:
1003 list_splice(&aio_req
->osd_reqs
, &osd_reqs
);
1004 while (!list_empty(&osd_reqs
)) {
1005 req
= list_first_entry(&osd_reqs
,
1006 struct ceph_osd_request
,
1008 list_del_init(&req
->r_unsafe_item
);
1010 ret
= ceph_osdc_start_request(req
->r_osdc
,
1013 req
->r_result
= ret
;
1014 ceph_aio_complete_req(req
);
1017 return -EIOCBQUEUED
;
1020 if (ret
!= -EOLDSNAPC
&& pos
> iocb
->ki_pos
) {
1021 ret
= pos
- iocb
->ki_pos
;
1028 * Synchronous write, straight from __user pointer or user pages.
1030 * If write spans object boundary, just do multiple writes. (For a
1031 * correct atomic write, we should e.g. take write locks on all
1032 * objects, rollback on failure, etc.)
1035 ceph_sync_write(struct kiocb
*iocb
, struct iov_iter
*from
, loff_t pos
,
1036 struct ceph_snap_context
*snapc
)
1038 struct file
*file
= iocb
->ki_filp
;
1039 struct inode
*inode
= file_inode(file
);
1040 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1041 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1042 struct ceph_vino vino
;
1043 struct ceph_osd_request
*req
;
1044 struct page
**pages
;
1050 bool check_caps
= false;
1051 struct timespec mtime
= current_time(inode
);
1052 size_t count
= iov_iter_count(from
);
1054 if (ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
1057 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1058 file
, pos
, (unsigned)count
, snapc
, snapc
->seq
);
1060 ret
= filemap_write_and_wait_range(inode
->i_mapping
, pos
, pos
+ count
);
1064 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
1066 (pos
+ count
) >> PAGE_SHIFT
);
1068 dout("invalidate_inode_pages2_range returned %d\n", ret
);
1070 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
1072 while ((len
= iov_iter_count(from
)) > 0) {
1076 vino
= ceph_vino(inode
);
1077 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1078 vino
, pos
, &len
, 0, 1,
1079 CEPH_OSD_OP_WRITE
, flags
, snapc
,
1081 ci
->i_truncate_size
,
1089 * write from beginning of first page,
1090 * regardless of io alignment
1092 num_pages
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1094 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
1095 if (IS_ERR(pages
)) {
1096 ret
= PTR_ERR(pages
);
1101 for (n
= 0; n
< num_pages
; n
++) {
1102 size_t plen
= min_t(size_t, left
, PAGE_SIZE
);
1103 ret
= copy_page_from_iter(pages
[n
], 0, plen
, from
);
1112 ceph_release_page_vector(pages
, num_pages
);
1116 req
->r_inode
= inode
;
1118 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, 0,
1121 req
->r_mtime
= mtime
;
1122 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1124 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1127 ceph_osdc_put_request(req
);
1129 ceph_set_error_write(ci
);
1133 ceph_clear_error_write(ci
);
1136 if (pos
> i_size_read(inode
)) {
1137 check_caps
= ceph_inode_set_size(inode
, pos
);
1139 ceph_check_caps(ceph_inode(inode
),
1140 CHECK_CAPS_AUTHONLY
,
1146 if (ret
!= -EOLDSNAPC
&& written
> 0) {
1154 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1155 * Atomically grab references, so that those bits are not released
1156 * back to the MDS mid-read.
1158 * Hmm, the sync read case isn't actually async... should it be?
1160 static ssize_t
ceph_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1162 struct file
*filp
= iocb
->ki_filp
;
1163 struct ceph_file_info
*fi
= filp
->private_data
;
1164 size_t len
= iov_iter_count(to
);
1165 struct inode
*inode
= file_inode(filp
);
1166 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1167 struct page
*pinned_page
= NULL
;
1170 int retry_op
= 0, read
= 0;
1173 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1174 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
, inode
);
1176 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1177 want
= CEPH_CAP_FILE_CACHE
| CEPH_CAP_FILE_LAZYIO
;
1179 want
= CEPH_CAP_FILE_CACHE
;
1180 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_RD
, want
, -1, &got
, &pinned_page
);
1184 if ((got
& (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1185 (iocb
->ki_flags
& IOCB_DIRECT
) ||
1186 (fi
->flags
& CEPH_F_SYNC
)) {
1188 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1189 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1190 ceph_cap_string(got
));
1192 if (ci
->i_inline_version
== CEPH_INLINE_NONE
) {
1193 if (!retry_op
&& (iocb
->ki_flags
& IOCB_DIRECT
)) {
1194 ret
= ceph_direct_read_write(iocb
, to
,
1196 if (ret
>= 0 && ret
< len
)
1197 retry_op
= CHECK_EOF
;
1199 ret
= ceph_sync_read(iocb
, to
, &retry_op
);
1202 retry_op
= READ_INLINE
;
1205 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1206 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1207 ceph_cap_string(got
));
1208 current
->journal_info
= filp
;
1209 ret
= generic_file_read_iter(iocb
, to
);
1210 current
->journal_info
= NULL
;
1212 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1213 inode
, ceph_vinop(inode
), ceph_cap_string(got
), (int)ret
);
1215 put_page(pinned_page
);
1218 ceph_put_cap_refs(ci
, got
);
1219 if (retry_op
> HAVE_RETRIED
&& ret
>= 0) {
1221 struct page
*page
= NULL
;
1223 if (retry_op
== READ_INLINE
) {
1224 page
= __page_cache_alloc(GFP_KERNEL
);
1229 statret
= __ceph_do_getattr(inode
, page
,
1230 CEPH_STAT_CAP_INLINE_DATA
, !!page
);
1234 if (statret
== -ENODATA
) {
1235 BUG_ON(retry_op
!= READ_INLINE
);
1241 i_size
= i_size_read(inode
);
1242 if (retry_op
== READ_INLINE
) {
1243 BUG_ON(ret
> 0 || read
> 0);
1244 if (iocb
->ki_pos
< i_size
&&
1245 iocb
->ki_pos
< PAGE_SIZE
) {
1246 loff_t end
= min_t(loff_t
, i_size
,
1247 iocb
->ki_pos
+ len
);
1248 end
= min_t(loff_t
, end
, PAGE_SIZE
);
1250 zero_user_segment(page
, statret
, end
);
1251 ret
= copy_page_to_iter(page
,
1252 iocb
->ki_pos
& ~PAGE_MASK
,
1253 end
- iocb
->ki_pos
, to
);
1254 iocb
->ki_pos
+= ret
;
1257 if (iocb
->ki_pos
< i_size
&& read
< len
) {
1258 size_t zlen
= min_t(size_t, len
- read
,
1259 i_size
- iocb
->ki_pos
);
1260 ret
= iov_iter_zero(zlen
, to
);
1261 iocb
->ki_pos
+= ret
;
1264 __free_pages(page
, 0);
1268 /* hit EOF or hole? */
1269 if (retry_op
== CHECK_EOF
&& iocb
->ki_pos
< i_size
&&
1271 dout("sync_read hit hole, ppos %lld < size %lld"
1272 ", reading more\n", iocb
->ki_pos
, i_size
);
1276 retry_op
= HAVE_RETRIED
;
1288 * Take cap references to avoid releasing caps to MDS mid-write.
1290 * If we are synchronous, and write with an old snap context, the OSD
1291 * may return EOLDSNAPC. In that case, retry the write.. _after_
1292 * dropping our cap refs and allowing the pending snap to logically
1293 * complete _before_ this write occurs.
1295 * If we are near ENOSPC, write synchronously.
1297 static ssize_t
ceph_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1299 struct file
*file
= iocb
->ki_filp
;
1300 struct ceph_file_info
*fi
= file
->private_data
;
1301 struct inode
*inode
= file_inode(file
);
1302 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1303 struct ceph_osd_client
*osdc
=
1304 &ceph_sb_to_client(inode
->i_sb
)->client
->osdc
;
1305 struct ceph_cap_flush
*prealloc_cf
;
1306 ssize_t count
, written
= 0;
1310 if (ceph_snap(inode
) != CEPH_NOSNAP
)
1313 prealloc_cf
= ceph_alloc_cap_flush();
1320 /* We can write back this queue in page reclaim */
1321 current
->backing_dev_info
= inode_to_bdi(inode
);
1323 if (iocb
->ki_flags
& IOCB_APPEND
) {
1324 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1329 err
= generic_write_checks(iocb
, from
);
1334 count
= iov_iter_count(from
);
1335 err
= file_remove_privs(file
);
1339 err
= file_update_time(file
);
1343 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1344 err
= ceph_uninline_data(file
, NULL
);
1349 /* FIXME: not complete since it doesn't account for being at quota */
1350 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
)) {
1355 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1356 inode
, ceph_vinop(inode
), pos
, count
, i_size_read(inode
));
1357 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1358 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1360 want
= CEPH_CAP_FILE_BUFFER
;
1362 err
= ceph_get_caps(ci
, CEPH_CAP_FILE_WR
, want
, pos
+ count
,
1367 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1368 inode
, ceph_vinop(inode
), pos
, count
, ceph_cap_string(got
));
1370 if ((got
& (CEPH_CAP_FILE_BUFFER
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1371 (iocb
->ki_flags
& IOCB_DIRECT
) || (fi
->flags
& CEPH_F_SYNC
) ||
1372 (ci
->i_ceph_flags
& CEPH_I_ERROR_WRITE
)) {
1373 struct ceph_snap_context
*snapc
;
1374 struct iov_iter data
;
1375 inode_unlock(inode
);
1377 spin_lock(&ci
->i_ceph_lock
);
1378 if (__ceph_have_pending_cap_snap(ci
)) {
1379 struct ceph_cap_snap
*capsnap
=
1380 list_last_entry(&ci
->i_cap_snaps
,
1381 struct ceph_cap_snap
,
1383 snapc
= ceph_get_snap_context(capsnap
->context
);
1385 BUG_ON(!ci
->i_head_snapc
);
1386 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
1388 spin_unlock(&ci
->i_ceph_lock
);
1390 /* we might need to revert back to that point */
1392 if (iocb
->ki_flags
& IOCB_DIRECT
)
1393 written
= ceph_direct_read_write(iocb
, &data
, snapc
,
1396 written
= ceph_sync_write(iocb
, &data
, pos
, snapc
);
1398 iov_iter_advance(from
, written
);
1399 ceph_put_snap_context(snapc
);
1402 * No need to acquire the i_truncate_mutex. Because
1403 * the MDS revokes Fwb caps before sending truncate
1404 * message to us. We can't get Fwb cap while there
1405 * are pending vmtruncate. So write and vmtruncate
1406 * can not run at the same time
1408 written
= generic_perform_write(file
, from
, pos
);
1409 if (likely(written
>= 0))
1410 iocb
->ki_pos
= pos
+ written
;
1411 inode_unlock(inode
);
1416 spin_lock(&ci
->i_ceph_lock
);
1417 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1418 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1420 spin_unlock(&ci
->i_ceph_lock
);
1422 __mark_inode_dirty(inode
, dirty
);
1425 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1426 inode
, ceph_vinop(inode
), pos
, (unsigned)count
,
1427 ceph_cap_string(got
));
1428 ceph_put_cap_refs(ci
, got
);
1430 if (written
== -EOLDSNAPC
) {
1431 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1432 inode
, ceph_vinop(inode
), pos
, (unsigned)count
);
1437 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_NEARFULL
))
1438 iocb
->ki_flags
|= IOCB_DSYNC
;
1439 written
= generic_write_sync(iocb
, written
);
1445 inode_unlock(inode
);
1447 ceph_free_cap_flush(prealloc_cf
);
1448 current
->backing_dev_info
= NULL
;
1449 return written
? written
: err
;
1453 * llseek. be sure to verify file size on SEEK_END.
1455 static loff_t
ceph_llseek(struct file
*file
, loff_t offset
, int whence
)
1457 struct inode
*inode
= file
->f_mapping
->host
;
1463 if (whence
== SEEK_END
|| whence
== SEEK_DATA
|| whence
== SEEK_HOLE
) {
1464 ret
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1469 i_size
= i_size_read(inode
);
1476 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1477 * position-querying operation. Avoid rewriting the "same"
1478 * f_pos value back to the file because a concurrent read(),
1479 * write() or lseek() might have altered it
1485 offset
+= file
->f_pos
;
1488 if (offset
< 0 || offset
>= i_size
) {
1494 if (offset
< 0 || offset
>= i_size
) {
1502 ret
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
1505 inode_unlock(inode
);
1509 static inline void ceph_zero_partial_page(
1510 struct inode
*inode
, loff_t offset
, unsigned size
)
1513 pgoff_t index
= offset
>> PAGE_SHIFT
;
1515 page
= find_lock_page(inode
->i_mapping
, index
);
1517 wait_on_page_writeback(page
);
1518 zero_user(page
, offset
& (PAGE_SIZE
- 1), size
);
1524 static void ceph_zero_pagecache_range(struct inode
*inode
, loff_t offset
,
1527 loff_t nearly
= round_up(offset
, PAGE_SIZE
);
1528 if (offset
< nearly
) {
1529 loff_t size
= nearly
- offset
;
1532 ceph_zero_partial_page(inode
, offset
, size
);
1536 if (length
>= PAGE_SIZE
) {
1537 loff_t size
= round_down(length
, PAGE_SIZE
);
1538 truncate_pagecache_range(inode
, offset
, offset
+ size
- 1);
1543 ceph_zero_partial_page(inode
, offset
, length
);
1546 static int ceph_zero_partial_object(struct inode
*inode
,
1547 loff_t offset
, loff_t
*length
)
1549 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1550 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1551 struct ceph_osd_request
*req
;
1557 op
= offset
? CEPH_OSD_OP_DELETE
: CEPH_OSD_OP_TRUNCATE
;
1560 op
= CEPH_OSD_OP_ZERO
;
1563 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1567 CEPH_OSD_FLAG_WRITE
,
1574 req
->r_mtime
= inode
->i_mtime
;
1575 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1577 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1581 ceph_osdc_put_request(req
);
1587 static int ceph_zero_objects(struct inode
*inode
, loff_t offset
, loff_t length
)
1590 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1591 s32 stripe_unit
= ci
->i_layout
.stripe_unit
;
1592 s32 stripe_count
= ci
->i_layout
.stripe_count
;
1593 s32 object_size
= ci
->i_layout
.object_size
;
1594 u64 object_set_size
= object_size
* stripe_count
;
1597 /* round offset up to next period boundary */
1598 nearly
= offset
+ object_set_size
- 1;
1600 nearly
-= do_div(t
, object_set_size
);
1602 while (length
&& offset
< nearly
) {
1603 loff_t size
= length
;
1604 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1610 while (length
>= object_set_size
) {
1612 loff_t pos
= offset
;
1613 for (i
= 0; i
< stripe_count
; ++i
) {
1614 ret
= ceph_zero_partial_object(inode
, pos
, NULL
);
1619 offset
+= object_set_size
;
1620 length
-= object_set_size
;
1623 loff_t size
= length
;
1624 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
1633 static long ceph_fallocate(struct file
*file
, int mode
,
1634 loff_t offset
, loff_t length
)
1636 struct ceph_file_info
*fi
= file
->private_data
;
1637 struct inode
*inode
= file_inode(file
);
1638 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1639 struct ceph_osd_client
*osdc
=
1640 &ceph_inode_to_client(inode
)->client
->osdc
;
1641 struct ceph_cap_flush
*prealloc_cf
;
1648 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
1651 if (!S_ISREG(inode
->i_mode
))
1654 prealloc_cf
= ceph_alloc_cap_flush();
1660 if (ceph_snap(inode
) != CEPH_NOSNAP
) {
1665 if (ceph_osdmap_flag(osdc
, CEPH_OSDMAP_FULL
) &&
1666 !(mode
& FALLOC_FL_PUNCH_HOLE
)) {
1671 if (ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
1672 ret
= ceph_uninline_data(file
, NULL
);
1677 size
= i_size_read(inode
);
1678 if (!(mode
& FALLOC_FL_KEEP_SIZE
)) {
1679 endoff
= offset
+ length
;
1680 ret
= inode_newsize_ok(inode
, endoff
);
1685 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1686 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
1688 want
= CEPH_CAP_FILE_BUFFER
;
1690 ret
= ceph_get_caps(ci
, CEPH_CAP_FILE_WR
, want
, endoff
, &got
, NULL
);
1694 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1696 ceph_zero_pagecache_range(inode
, offset
, length
);
1697 ret
= ceph_zero_objects(inode
, offset
, length
);
1698 } else if (endoff
> size
) {
1699 truncate_pagecache_range(inode
, size
, -1);
1700 if (ceph_inode_set_size(inode
, endoff
))
1701 ceph_check_caps(ceph_inode(inode
),
1702 CHECK_CAPS_AUTHONLY
, NULL
);
1706 spin_lock(&ci
->i_ceph_lock
);
1707 ci
->i_inline_version
= CEPH_INLINE_NONE
;
1708 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1710 spin_unlock(&ci
->i_ceph_lock
);
1712 __mark_inode_dirty(inode
, dirty
);
1715 ceph_put_cap_refs(ci
, got
);
1717 inode_unlock(inode
);
1718 ceph_free_cap_flush(prealloc_cf
);
1722 const struct file_operations ceph_file_fops
= {
1724 .release
= ceph_release
,
1725 .llseek
= ceph_llseek
,
1726 .read_iter
= ceph_read_iter
,
1727 .write_iter
= ceph_write_iter
,
1729 .fsync
= ceph_fsync
,
1731 .flock
= ceph_flock
,
1732 .splice_read
= generic_file_splice_read
,
1733 .splice_write
= iter_file_splice_write
,
1734 .unlocked_ioctl
= ceph_ioctl
,
1735 .compat_ioctl
= ceph_ioctl
,
1736 .fallocate
= ceph_fallocate
,