1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
17 #include "mds_client.h"
22 static __le32
ceph_flags_sys2wire(u32 flags
)
26 switch (flags
& O_ACCMODE
) {
28 wire_flags
|= CEPH_O_RDONLY
;
31 wire_flags
|= CEPH_O_WRONLY
;
34 wire_flags
|= CEPH_O_RDWR
;
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
42 ceph_sys2wire(O_CREAT
);
43 ceph_sys2wire(O_EXCL
);
44 ceph_sys2wire(O_TRUNC
);
45 ceph_sys2wire(O_DIRECTORY
);
46 ceph_sys2wire(O_NOFOLLOW
);
51 dout("unused open flags: %x\n", flags
);
53 return cpu_to_le32(wire_flags
);
57 * Ceph file operations
59 * Implement basic open/close functionality, and implement
62 * We implement three modes of file I/O:
63 * - buffered uses the generic_file_aio_{read,write} helpers
65 * - synchronous is used when there is multi-client read/write
66 * sharing, avoids the page cache, and synchronously waits for an
69 * - direct io takes the variant of the sync path that references
70 * user pages directly.
72 * fsync() flushes and waits on dirty pages, but just queues metadata
73 * for writeback: since the MDS can recover size and mtime there is no
74 * need to wait for MDS acknowledgement.
78 * How many pages to get in one call to iov_iter_get_pages(). This
79 * determines the size of the on-stack array used as a buffer.
81 #define ITER_GET_BVECS_PAGES 64
83 static ssize_t
__iter_get_bvecs(struct iov_iter
*iter
, size_t maxsize
,
84 struct bio_vec
*bvecs
)
89 if (maxsize
> iov_iter_count(iter
))
90 maxsize
= iov_iter_count(iter
);
92 while (size
< maxsize
) {
93 struct page
*pages
[ITER_GET_BVECS_PAGES
];
98 bytes
= iov_iter_get_pages2(iter
, pages
, maxsize
- size
,
99 ITER_GET_BVECS_PAGES
, &start
);
101 return size
?: bytes
;
105 for ( ; bytes
; idx
++, bvec_idx
++) {
106 struct bio_vec bv
= {
107 .bv_page
= pages
[idx
],
108 .bv_len
= min_t(int, bytes
, PAGE_SIZE
- start
),
112 bvecs
[bvec_idx
] = bv
;
122 * iov_iter_get_pages() only considers one iov_iter segment, no matter
123 * what maxsize or maxpages are given. For ITER_BVEC that is a single
126 * Attempt to get up to @maxsize bytes worth of pages from @iter.
127 * Return the number of bytes in the created bio_vec array, or an error.
129 static ssize_t
iter_get_bvecs_alloc(struct iov_iter
*iter
, size_t maxsize
,
130 struct bio_vec
**bvecs
, int *num_bvecs
)
133 size_t orig_count
= iov_iter_count(iter
);
137 iov_iter_truncate(iter
, maxsize
);
138 npages
= iov_iter_npages(iter
, INT_MAX
);
139 iov_iter_reexpand(iter
, orig_count
);
142 * __iter_get_bvecs() may populate only part of the array -- zero it
145 bv
= kvmalloc_array(npages
, sizeof(*bv
), GFP_KERNEL
| __GFP_ZERO
);
149 bytes
= __iter_get_bvecs(iter
, maxsize
, bv
);
152 * No pages were pinned -- just free the array.
163 static void put_bvecs(struct bio_vec
*bvecs
, int num_bvecs
, bool should_dirty
)
167 for (i
= 0; i
< num_bvecs
; i
++) {
168 if (bvecs
[i
].bv_page
) {
170 set_page_dirty_lock(bvecs
[i
].bv_page
);
171 put_page(bvecs
[i
].bv_page
);
178 * Prepare an open request. Preallocate ceph_cap to avoid an
179 * inopportune ENOMEM later.
181 static struct ceph_mds_request
*
182 prepare_open_request(struct super_block
*sb
, int flags
, int create_mode
)
184 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(sb
);
185 struct ceph_mds_request
*req
;
186 int want_auth
= USE_ANY_MDS
;
187 int op
= (flags
& O_CREAT
) ? CEPH_MDS_OP_CREATE
: CEPH_MDS_OP_OPEN
;
189 if (flags
& (O_WRONLY
|O_RDWR
|O_CREAT
|O_TRUNC
))
190 want_auth
= USE_AUTH_MDS
;
192 req
= ceph_mdsc_create_request(mdsc
, op
, want_auth
);
195 req
->r_fmode
= ceph_flags_to_mode(flags
);
196 req
->r_args
.open
.flags
= ceph_flags_sys2wire(flags
);
197 req
->r_args
.open
.mode
= cpu_to_le32(create_mode
);
202 static int ceph_init_file_info(struct inode
*inode
, struct file
*file
,
203 int fmode
, bool isdir
)
205 struct ceph_inode_info
*ci
= ceph_inode(inode
);
206 struct ceph_mount_options
*opt
=
207 ceph_inode_to_client(&ci
->netfs
.inode
)->mount_options
;
208 struct ceph_file_info
*fi
;
211 dout("%s %p %p 0%o (%s)\n", __func__
, inode
, file
,
212 inode
->i_mode
, isdir
? "dir" : "regular");
213 BUG_ON(inode
->i_fop
->release
!= ceph_release
);
216 struct ceph_dir_file_info
*dfi
=
217 kmem_cache_zalloc(ceph_dir_file_cachep
, GFP_KERNEL
);
221 file
->private_data
= dfi
;
222 fi
= &dfi
->file_info
;
223 dfi
->next_offset
= 2;
224 dfi
->readdir_cache_idx
= -1;
226 fi
= kmem_cache_zalloc(ceph_file_cachep
, GFP_KERNEL
);
230 if (opt
->flags
& CEPH_MOUNT_OPT_NOPAGECACHE
)
231 fi
->flags
|= CEPH_F_SYNC
;
233 file
->private_data
= fi
;
236 ceph_get_fmode(ci
, fmode
, 1);
239 spin_lock_init(&fi
->rw_contexts_lock
);
240 INIT_LIST_HEAD(&fi
->rw_contexts
);
241 fi
->filp_gen
= READ_ONCE(ceph_inode_to_client(inode
)->filp_gen
);
243 if ((file
->f_mode
& FMODE_WRITE
) &&
244 ci
->i_inline_version
!= CEPH_INLINE_NONE
) {
245 ret
= ceph_uninline_data(file
);
253 ceph_fscache_unuse_cookie(inode
, file
->f_mode
& FMODE_WRITE
);
254 ceph_put_fmode(ci
, fi
->fmode
, 1);
255 kmem_cache_free(ceph_file_cachep
, fi
);
256 /* wake up anyone waiting for caps on this inode */
257 wake_up_all(&ci
->i_cap_wq
);
262 * initialize private struct file data.
263 * if we fail, clean up by dropping fmode reference on the ceph_inode
265 static int ceph_init_file(struct inode
*inode
, struct file
*file
, int fmode
)
269 switch (inode
->i_mode
& S_IFMT
) {
271 ceph_fscache_use_cookie(inode
, file
->f_mode
& FMODE_WRITE
);
274 ret
= ceph_init_file_info(inode
, file
, fmode
,
275 S_ISDIR(inode
->i_mode
));
279 dout("init_file %p %p 0%o (symlink)\n", inode
, file
,
284 dout("init_file %p %p 0%o (special)\n", inode
, file
,
287 * we need to drop the open ref now, since we don't
288 * have .release set to ceph_release.
290 BUG_ON(inode
->i_fop
->release
== ceph_release
);
292 /* call the proper open fop */
293 ret
= inode
->i_fop
->open(inode
, file
);
299 * try renew caps after session gets killed.
301 int ceph_renew_caps(struct inode
*inode
, int fmode
)
303 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(inode
->i_sb
);
304 struct ceph_inode_info
*ci
= ceph_inode(inode
);
305 struct ceph_mds_request
*req
;
306 int err
, flags
, wanted
;
308 spin_lock(&ci
->i_ceph_lock
);
309 __ceph_touch_fmode(ci
, mdsc
, fmode
);
310 wanted
= __ceph_caps_file_wanted(ci
);
311 if (__ceph_is_any_real_caps(ci
) &&
312 (!(wanted
& CEPH_CAP_ANY_WR
) || ci
->i_auth_cap
)) {
313 int issued
= __ceph_caps_issued(ci
, NULL
);
314 spin_unlock(&ci
->i_ceph_lock
);
315 dout("renew caps %p want %s issued %s updating mds_wanted\n",
316 inode
, ceph_cap_string(wanted
), ceph_cap_string(issued
));
317 ceph_check_caps(ci
, 0, NULL
);
320 spin_unlock(&ci
->i_ceph_lock
);
323 if ((wanted
& CEPH_CAP_FILE_RD
) && (wanted
& CEPH_CAP_FILE_WR
))
325 else if (wanted
& CEPH_CAP_FILE_RD
)
327 else if (wanted
& CEPH_CAP_FILE_WR
)
330 if (wanted
& CEPH_CAP_FILE_LAZYIO
)
334 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
340 req
->r_inode
= inode
;
344 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
345 ceph_mdsc_put_request(req
);
347 dout("renew caps %p open result=%d\n", inode
, err
);
348 return err
< 0 ? err
: 0;
352 * If we already have the requisite capabilities, we can satisfy
353 * the open request locally (no need to request new caps from the
354 * MDS). We do, however, need to inform the MDS (asynchronously)
355 * if our wanted caps set expands.
357 int ceph_open(struct inode
*inode
, struct file
*file
)
359 struct ceph_inode_info
*ci
= ceph_inode(inode
);
360 struct ceph_fs_client
*fsc
= ceph_sb_to_client(inode
->i_sb
);
361 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
362 struct ceph_mds_request
*req
;
363 struct ceph_file_info
*fi
= file
->private_data
;
365 int flags
, fmode
, wanted
;
368 dout("open file %p is already opened\n", file
);
372 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
373 flags
= file
->f_flags
& ~(O_CREAT
|O_EXCL
);
374 if (S_ISDIR(inode
->i_mode
))
375 flags
= O_DIRECTORY
; /* mds likes to know */
377 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode
,
378 ceph_vinop(inode
), file
, flags
, file
->f_flags
);
379 fmode
= ceph_flags_to_mode(flags
);
380 wanted
= ceph_caps_for_mode(fmode
);
382 /* snapped files are read-only */
383 if (ceph_snap(inode
) != CEPH_NOSNAP
&& (file
->f_mode
& FMODE_WRITE
))
386 /* trivially open snapdir */
387 if (ceph_snap(inode
) == CEPH_SNAPDIR
) {
388 return ceph_init_file(inode
, file
, fmode
);
392 * No need to block if we have caps on the auth MDS (for
393 * write) or any MDS (for read). Update wanted set
396 spin_lock(&ci
->i_ceph_lock
);
397 if (__ceph_is_any_real_caps(ci
) &&
398 (((fmode
& CEPH_FILE_MODE_WR
) == 0) || ci
->i_auth_cap
)) {
399 int mds_wanted
= __ceph_caps_mds_wanted(ci
, true);
400 int issued
= __ceph_caps_issued(ci
, NULL
);
402 dout("open %p fmode %d want %s issued %s using existing\n",
403 inode
, fmode
, ceph_cap_string(wanted
),
404 ceph_cap_string(issued
));
405 __ceph_touch_fmode(ci
, mdsc
, fmode
);
406 spin_unlock(&ci
->i_ceph_lock
);
409 if ((issued
& wanted
) != wanted
&&
410 (mds_wanted
& wanted
) != wanted
&&
411 ceph_snap(inode
) != CEPH_SNAPDIR
)
412 ceph_check_caps(ci
, 0, NULL
);
414 return ceph_init_file(inode
, file
, fmode
);
415 } else if (ceph_snap(inode
) != CEPH_NOSNAP
&&
416 (ci
->i_snap_caps
& wanted
) == wanted
) {
417 __ceph_touch_fmode(ci
, mdsc
, fmode
);
418 spin_unlock(&ci
->i_ceph_lock
);
419 return ceph_init_file(inode
, file
, fmode
);
422 spin_unlock(&ci
->i_ceph_lock
);
424 dout("open fmode %d wants %s\n", fmode
, ceph_cap_string(wanted
));
425 req
= prepare_open_request(inode
->i_sb
, flags
, 0);
430 req
->r_inode
= inode
;
434 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
436 err
= ceph_init_file(inode
, file
, req
->r_fmode
);
437 ceph_mdsc_put_request(req
);
438 dout("open result=%d on %llx.%llx\n", err
, ceph_vinop(inode
));
443 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
445 cache_file_layout(struct inode
*dst
, struct inode
*src
)
447 struct ceph_inode_info
*cdst
= ceph_inode(dst
);
448 struct ceph_inode_info
*csrc
= ceph_inode(src
);
450 spin_lock(&cdst
->i_ceph_lock
);
451 if ((__ceph_caps_issued(cdst
, NULL
) & CEPH_CAP_DIR_CREATE
) &&
452 !ceph_file_layout_is_valid(&cdst
->i_cached_layout
)) {
453 memcpy(&cdst
->i_cached_layout
, &csrc
->i_layout
,
454 sizeof(cdst
->i_cached_layout
));
455 rcu_assign_pointer(cdst
->i_cached_layout
.pool_ns
,
456 ceph_try_get_string(csrc
->i_layout
.pool_ns
));
458 spin_unlock(&cdst
->i_ceph_lock
);
462 * Try to set up an async create. We need caps, a file layout, and inode number,
463 * and either a lease on the dentry or complete dir info. If any of those
464 * criteria are not satisfied, then return false and the caller can go
467 static int try_prep_async_create(struct inode
*dir
, struct dentry
*dentry
,
468 struct ceph_file_layout
*lo
, u64
*pino
)
470 struct ceph_inode_info
*ci
= ceph_inode(dir
);
471 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
472 int got
= 0, want
= CEPH_CAP_FILE_EXCL
| CEPH_CAP_DIR_CREATE
;
475 spin_lock(&ci
->i_ceph_lock
);
476 /* No auth cap means no chance for Dc caps */
480 /* Any delegated inos? */
481 if (xa_empty(&ci
->i_auth_cap
->session
->s_delegated_inos
))
484 if (!ceph_file_layout_is_valid(&ci
->i_cached_layout
))
487 if ((__ceph_caps_issued(ci
, NULL
) & want
) != want
)
490 if (d_in_lookup(dentry
)) {
491 if (!__ceph_dir_is_complete(ci
))
493 spin_lock(&dentry
->d_lock
);
494 di
->lease_shared_gen
= atomic_read(&ci
->i_shared_gen
);
495 spin_unlock(&dentry
->d_lock
);
496 } else if (atomic_read(&ci
->i_shared_gen
) !=
497 READ_ONCE(di
->lease_shared_gen
)) {
501 ino
= ceph_get_deleg_ino(ci
->i_auth_cap
->session
);
506 ceph_take_cap_refs(ci
, want
, false);
507 memcpy(lo
, &ci
->i_cached_layout
, sizeof(*lo
));
508 rcu_assign_pointer(lo
->pool_ns
,
509 ceph_try_get_string(ci
->i_cached_layout
.pool_ns
));
512 spin_unlock(&ci
->i_ceph_lock
);
516 static void restore_deleg_ino(struct inode
*dir
, u64 ino
)
518 struct ceph_inode_info
*ci
= ceph_inode(dir
);
519 struct ceph_mds_session
*s
= NULL
;
521 spin_lock(&ci
->i_ceph_lock
);
523 s
= ceph_get_mds_session(ci
->i_auth_cap
->session
);
524 spin_unlock(&ci
->i_ceph_lock
);
526 int err
= ceph_restore_deleg_ino(s
, ino
);
528 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
530 ceph_put_mds_session(s
);
534 static void wake_async_create_waiters(struct inode
*inode
,
535 struct ceph_mds_session
*session
)
537 struct ceph_inode_info
*ci
= ceph_inode(inode
);
539 spin_lock(&ci
->i_ceph_lock
);
540 if (ci
->i_ceph_flags
& CEPH_I_ASYNC_CREATE
) {
541 ci
->i_ceph_flags
&= ~CEPH_I_ASYNC_CREATE
;
542 wake_up_bit(&ci
->i_ceph_flags
, CEPH_ASYNC_CREATE_BIT
);
544 ceph_kick_flushing_inode_caps(session
, ci
);
545 spin_unlock(&ci
->i_ceph_lock
);
548 static void ceph_async_create_cb(struct ceph_mds_client
*mdsc
,
549 struct ceph_mds_request
*req
)
551 struct dentry
*dentry
= req
->r_dentry
;
552 struct inode
*dinode
= d_inode(dentry
);
553 struct inode
*tinode
= req
->r_target_inode
;
554 int result
= req
->r_err
? req
->r_err
:
555 le32_to_cpu(req
->r_reply_info
.head
->result
);
557 WARN_ON_ONCE(dinode
&& tinode
&& dinode
!= tinode
);
559 /* MDS changed -- caller must resubmit */
560 if (result
== -EJUKEBOX
)
563 mapping_set_error(req
->r_parent
->i_mapping
, result
);
568 char *path
= ceph_mdsc_build_path(req
->r_dentry
, &pathlen
,
571 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
572 base
, IS_ERR(path
) ? "<<bad>>" : path
, result
);
573 ceph_mdsc_free_path(path
, pathlen
);
575 ceph_dir_clear_complete(req
->r_parent
);
576 if (!d_unhashed(dentry
))
580 mapping_set_error(dinode
->i_mapping
, result
);
581 ceph_inode_shutdown(dinode
);
582 wake_async_create_waiters(dinode
, req
->r_session
);
587 u64 ino
= ceph_vino(tinode
).ino
;
589 if (req
->r_deleg_ino
!= ino
)
590 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
591 __func__
, req
->r_err
, req
->r_deleg_ino
, ino
);
593 mapping_set_error(tinode
->i_mapping
, result
);
594 wake_async_create_waiters(tinode
, req
->r_session
);
595 } else if (!result
) {
596 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__
,
600 ceph_mdsc_release_dir_caps(req
);
603 static int ceph_finish_async_create(struct inode
*dir
, struct dentry
*dentry
,
604 struct file
*file
, umode_t mode
,
605 struct ceph_mds_request
*req
,
606 struct ceph_acl_sec_ctx
*as_ctx
,
607 struct ceph_file_layout
*lo
)
611 struct ceph_mds_reply_inode in
= { };
612 struct ceph_mds_reply_info_in iinfo
= { .in
= &in
};
613 struct ceph_inode_info
*ci
= ceph_inode(dir
);
615 struct timespec64 now
;
616 struct ceph_string
*pool_ns
;
617 struct ceph_mds_client
*mdsc
= ceph_sb_to_mdsc(dir
->i_sb
);
618 struct ceph_vino vino
= { .ino
= req
->r_deleg_ino
,
619 .snap
= CEPH_NOSNAP
};
621 ktime_get_real_ts64(&now
);
623 inode
= ceph_get_inode(dentry
->d_sb
, vino
);
625 return PTR_ERR(inode
);
627 iinfo
.inline_version
= CEPH_INLINE_NONE
;
628 iinfo
.change_attr
= 1;
629 ceph_encode_timespec64(&iinfo
.btime
, &now
);
631 if (req
->r_pagelist
) {
632 iinfo
.xattr_len
= req
->r_pagelist
->length
;
633 iinfo
.xattr_data
= req
->r_pagelist
->mapped_tail
;
636 iinfo
.xattr_len
= ARRAY_SIZE(xattr_buf
);
637 iinfo
.xattr_data
= xattr_buf
;
638 memset(iinfo
.xattr_data
, 0, iinfo
.xattr_len
);
641 in
.ino
= cpu_to_le64(vino
.ino
);
642 in
.snapid
= cpu_to_le64(CEPH_NOSNAP
);
643 in
.version
= cpu_to_le64(1); // ???
644 in
.cap
.caps
= in
.cap
.wanted
= cpu_to_le32(CEPH_CAP_ALL_FILE
);
645 in
.cap
.cap_id
= cpu_to_le64(1);
646 in
.cap
.realm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
647 in
.cap
.flags
= CEPH_CAP_FLAG_AUTH
;
648 in
.ctime
= in
.mtime
= in
.atime
= iinfo
.btime
;
649 in
.truncate_seq
= cpu_to_le32(1);
650 in
.truncate_size
= cpu_to_le64(-1ULL);
651 in
.xattr_version
= cpu_to_le64(1);
652 in
.uid
= cpu_to_le32(from_kuid(&init_user_ns
, current_fsuid()));
653 if (dir
->i_mode
& S_ISGID
) {
654 in
.gid
= cpu_to_le32(from_kgid(&init_user_ns
, dir
->i_gid
));
656 /* Directories always inherit the setgid bit. */
660 in
.gid
= cpu_to_le32(from_kgid(&init_user_ns
, current_fsgid()));
662 in
.mode
= cpu_to_le32((u32
)mode
);
664 in
.nlink
= cpu_to_le32(1);
665 in
.max_size
= cpu_to_le64(lo
->stripe_unit
);
667 ceph_file_layout_to_legacy(lo
, &in
.layout
);
668 /* lo is private, so pool_ns can't change */
669 pool_ns
= rcu_dereference_raw(lo
->pool_ns
);
671 iinfo
.pool_ns_len
= pool_ns
->len
;
672 iinfo
.pool_ns_data
= pool_ns
->str
;
675 down_read(&mdsc
->snap_rwsem
);
676 ret
= ceph_fill_inode(inode
, NULL
, &iinfo
, NULL
, req
->r_session
,
678 up_read(&mdsc
->snap_rwsem
);
680 dout("%s failed to fill inode: %d\n", __func__
, ret
);
681 ceph_dir_clear_complete(dir
);
682 if (!d_unhashed(dentry
))
684 if (inode
->i_state
& I_NEW
)
685 discard_new_inode(inode
);
689 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__
,
690 vino
.ino
, ceph_ino(dir
), dentry
->d_name
.name
);
691 ceph_dir_clear_ordered(dir
);
692 ceph_init_inode_acls(inode
, as_ctx
);
693 if (inode
->i_state
& I_NEW
) {
695 * If it's not I_NEW, then someone created this before
696 * we got here. Assume the server is aware of it at
697 * that point and don't worry about setting
698 * CEPH_I_ASYNC_CREATE.
700 ceph_inode(inode
)->i_ceph_flags
= CEPH_I_ASYNC_CREATE
;
701 unlock_new_inode(inode
);
703 if (d_in_lookup(dentry
) || d_really_is_negative(dentry
)) {
704 if (!d_unhashed(dentry
))
706 dn
= d_splice_alias(inode
, dentry
);
707 WARN_ON_ONCE(dn
&& dn
!= dentry
);
709 file
->f_mode
|= FMODE_CREATED
;
710 ret
= finish_open(file
, dentry
, ceph_open
);
716 * Do a lookup + open with a single request. If we get a non-existent
717 * file or symlink, return 1 so the VFS can retry.
719 int ceph_atomic_open(struct inode
*dir
, struct dentry
*dentry
,
720 struct file
*file
, unsigned flags
, umode_t mode
)
722 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
723 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
724 struct ceph_mds_request
*req
;
726 struct ceph_acl_sec_ctx as_ctx
= {};
727 bool try_async
= ceph_test_mount_opt(fsc
, ASYNC_DIROPS
);
731 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
733 d_unhashed(dentry
) ? "unhashed" : "hashed", flags
, mode
);
735 if (dentry
->d_name
.len
> NAME_MAX
)
736 return -ENAMETOOLONG
;
738 if (flags
& O_CREAT
) {
739 if (ceph_quota_is_max_files_exceeded(dir
))
741 err
= ceph_pre_init_acls(dir
, &mode
, &as_ctx
);
744 err
= ceph_security_init_secctx(dentry
, mode
, &as_ctx
);
747 /* Async create can't handle more than a page of xattrs */
748 if (as_ctx
.pagelist
&&
749 !list_is_singular(&as_ctx
.pagelist
->head
))
751 } else if (!d_in_lookup(dentry
)) {
752 /* If it's not being looked up, it's negative */
757 req
= prepare_open_request(dir
->i_sb
, flags
, mode
);
762 req
->r_dentry
= dget(dentry
);
764 mask
= CEPH_STAT_CAP_INODE
| CEPH_CAP_AUTH_SHARED
;
765 if (ceph_security_xattr_wanted(dir
))
766 mask
|= CEPH_CAP_XATTR_SHARED
;
767 req
->r_args
.open
.mask
= cpu_to_le32(mask
);
771 if (flags
& O_CREAT
) {
772 struct ceph_file_layout lo
;
774 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
| CEPH_CAP_AUTH_EXCL
;
775 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
776 if (as_ctx
.pagelist
) {
777 req
->r_pagelist
= as_ctx
.pagelist
;
778 as_ctx
.pagelist
= NULL
;
782 try_prep_async_create(dir
, dentry
, &lo
,
783 &req
->r_deleg_ino
))) {
784 set_bit(CEPH_MDS_R_ASYNC
, &req
->r_req_flags
);
785 req
->r_args
.open
.flags
|= cpu_to_le32(CEPH_O_EXCL
);
786 req
->r_callback
= ceph_async_create_cb
;
787 err
= ceph_mdsc_submit_request(mdsc
, dir
, req
);
789 err
= ceph_finish_async_create(dir
, dentry
,
792 } else if (err
== -EJUKEBOX
) {
793 restore_deleg_ino(dir
, req
->r_deleg_ino
);
794 ceph_mdsc_put_request(req
);
796 ceph_put_string(rcu_dereference_raw(lo
.pool_ns
));
799 ceph_put_string(rcu_dereference_raw(lo
.pool_ns
));
804 set_bit(CEPH_MDS_R_PARENT_LOCKED
, &req
->r_req_flags
);
805 err
= ceph_mdsc_do_request(mdsc
,
806 (flags
& (O_CREAT
|O_TRUNC
)) ? dir
: NULL
,
808 if (err
== -ENOENT
) {
809 dentry
= ceph_handle_snapdir(req
, dentry
);
810 if (IS_ERR(dentry
)) {
811 err
= PTR_ERR(dentry
);
817 if (!err
&& (flags
& O_CREAT
) && !req
->r_reply_info
.head
->is_dentry
)
818 err
= ceph_handle_notrace_create(dir
, dentry
);
820 if (d_in_lookup(dentry
)) {
821 dn
= ceph_finish_lookup(req
, dentry
, err
);
825 /* we were given a hashed negative dentry */
830 if (dn
|| d_really_is_negative(dentry
) || d_is_symlink(dentry
)) {
831 /* make vfs retry on splice, ENOENT, or symlink */
832 dout("atomic_open finish_no_open on dn %p\n", dn
);
833 err
= finish_no_open(file
, dn
);
835 dout("atomic_open finish_open on dn %p\n", dn
);
836 if (req
->r_op
== CEPH_MDS_OP_CREATE
&& req
->r_reply_info
.has_create_ino
) {
837 struct inode
*newino
= d_inode(dentry
);
839 cache_file_layout(dir
, newino
);
840 ceph_init_inode_acls(newino
, &as_ctx
);
841 file
->f_mode
|= FMODE_CREATED
;
843 err
= finish_open(file
, dentry
, ceph_open
);
846 ceph_mdsc_put_request(req
);
848 ceph_release_acl_sec_ctx(&as_ctx
);
849 dout("atomic_open result=%d\n", err
);
853 int ceph_release(struct inode
*inode
, struct file
*file
)
855 struct ceph_inode_info
*ci
= ceph_inode(inode
);
857 if (S_ISDIR(inode
->i_mode
)) {
858 struct ceph_dir_file_info
*dfi
= file
->private_data
;
859 dout("release inode %p dir file %p\n", inode
, file
);
860 WARN_ON(!list_empty(&dfi
->file_info
.rw_contexts
));
862 ceph_put_fmode(ci
, dfi
->file_info
.fmode
, 1);
864 if (dfi
->last_readdir
)
865 ceph_mdsc_put_request(dfi
->last_readdir
);
866 kfree(dfi
->last_name
);
867 kfree(dfi
->dir_info
);
868 kmem_cache_free(ceph_dir_file_cachep
, dfi
);
870 struct ceph_file_info
*fi
= file
->private_data
;
871 dout("release inode %p regular file %p\n", inode
, file
);
872 WARN_ON(!list_empty(&fi
->rw_contexts
));
874 ceph_fscache_unuse_cookie(inode
, file
->f_mode
& FMODE_WRITE
);
875 ceph_put_fmode(ci
, fi
->fmode
, 1);
877 kmem_cache_free(ceph_file_cachep
, fi
);
880 /* wake up anyone waiting for caps on this inode */
881 wake_up_all(&ci
->i_cap_wq
);
892 * Completely synchronous read and write methods. Direct from __user
893 * buffer to osd, or directly to user pages (if O_DIRECT).
895 * If the read spans object boundary, just do multiple reads. (That's not
896 * atomic, but good enough for now.)
898 * If we get a short result from the OSD, check against i_size; we need to
899 * only return a short read to the caller if we hit EOF.
901 static ssize_t
ceph_sync_read(struct kiocb
*iocb
, struct iov_iter
*to
,
904 struct file
*file
= iocb
->ki_filp
;
905 struct inode
*inode
= file_inode(file
);
906 struct ceph_inode_info
*ci
= ceph_inode(inode
);
907 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
908 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
910 u64 off
= iocb
->ki_pos
;
911 u64 len
= iov_iter_count(to
);
912 u64 i_size
= i_size_read(inode
);
914 dout("sync_read on file %p %llu~%u %s\n", file
, off
, (unsigned)len
,
915 (file
->f_flags
& O_DIRECT
) ? "O_DIRECT" : "");
920 * flush any page cache pages in this range. this
921 * will make concurrent normal and sync io slow,
922 * but it will at least behave sensibly when they are
925 ret
= filemap_write_and_wait_range(inode
->i_mapping
,
931 while ((len
= iov_iter_count(to
)) > 0) {
932 struct ceph_osd_request
*req
;
940 req
= ceph_osdc_new_request(osdc
, &ci
->i_layout
,
941 ci
->i_vino
, off
, &len
, 0, 1,
942 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
943 NULL
, ci
->i_truncate_seq
,
944 ci
->i_truncate_size
, false);
950 more
= len
< iov_iter_count(to
);
952 num_pages
= calc_pages_for(off
, len
);
953 page_off
= off
& ~PAGE_MASK
;
954 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
956 ceph_osdc_put_request(req
);
957 ret
= PTR_ERR(pages
);
961 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, page_off
,
963 ret
= ceph_osdc_start_request(osdc
, req
, false);
965 ret
= ceph_osdc_wait_request(osdc
, req
);
967 ceph_update_read_metrics(&fsc
->mdsc
->metric
,
968 req
->r_start_latency
,
972 ceph_osdc_put_request(req
);
974 i_size
= i_size_read(inode
);
975 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
976 off
, len
, ret
, i_size
, (more
? " MORE" : ""));
980 if (ret
>= 0 && ret
< len
&& (off
+ ret
< i_size
)) {
981 int zlen
= min(len
- ret
, i_size
- off
- ret
);
982 int zoff
= page_off
+ ret
;
983 dout("sync_read zero gap %llu~%llu\n",
984 off
+ ret
, off
+ ret
+ zlen
);
985 ceph_zero_page_vector_range(zoff
, zlen
, pages
);
990 left
= ret
> 0 ? ret
: 0;
993 page_off
= off
& ~PAGE_MASK
;
994 len
= min_t(size_t, left
, PAGE_SIZE
- page_off
);
995 SetPageUptodate(pages
[idx
]);
996 copied
= copy_page_to_iter(pages
[idx
++],
1005 ceph_release_page_vector(pages
, num_pages
);
1008 if (ret
== -EBLOCKLISTED
)
1009 fsc
->blocklisted
= true;
1013 if (off
>= i_size
|| !more
)
1017 if (off
> iocb
->ki_pos
) {
1018 if (off
>= i_size
) {
1019 *retry_op
= CHECK_EOF
;
1020 ret
= i_size
- iocb
->ki_pos
;
1021 iocb
->ki_pos
= i_size
;
1023 ret
= off
- iocb
->ki_pos
;
1028 dout("sync_read result %zd retry_op %d\n", ret
, *retry_op
);
1032 struct ceph_aio_request
{
1038 struct list_head osd_reqs
;
1040 atomic_t pending_reqs
;
1041 struct timespec64 mtime
;
1042 struct ceph_cap_flush
*prealloc_cf
;
1045 struct ceph_aio_work
{
1046 struct work_struct work
;
1047 struct ceph_osd_request
*req
;
1050 static void ceph_aio_retry_work(struct work_struct
*work
);
1052 static void ceph_aio_complete(struct inode
*inode
,
1053 struct ceph_aio_request
*aio_req
)
1055 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1058 if (!atomic_dec_and_test(&aio_req
->pending_reqs
))
1061 if (aio_req
->iocb
->ki_flags
& IOCB_DIRECT
)
1062 inode_dio_end(inode
);
1064 ret
= aio_req
->error
;
1066 ret
= aio_req
->total_len
;
1068 dout("ceph_aio_complete %p rc %d\n", inode
, ret
);
1070 if (ret
>= 0 && aio_req
->write
) {
1073 loff_t endoff
= aio_req
->iocb
->ki_pos
+ aio_req
->total_len
;
1074 if (endoff
> i_size_read(inode
)) {
1075 if (ceph_inode_set_size(inode
, endoff
))
1076 ceph_check_caps(ci
, CHECK_CAPS_AUTHONLY
, NULL
);
1079 spin_lock(&ci
->i_ceph_lock
);
1080 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1081 &aio_req
->prealloc_cf
);
1082 spin_unlock(&ci
->i_ceph_lock
);
1084 __mark_inode_dirty(inode
, dirty
);
1088 ceph_put_cap_refs(ci
, (aio_req
->write
? CEPH_CAP_FILE_WR
:
1091 aio_req
->iocb
->ki_complete(aio_req
->iocb
, ret
);
1093 ceph_free_cap_flush(aio_req
->prealloc_cf
);
1097 static void ceph_aio_complete_req(struct ceph_osd_request
*req
)
1099 int rc
= req
->r_result
;
1100 struct inode
*inode
= req
->r_inode
;
1101 struct ceph_aio_request
*aio_req
= req
->r_priv
;
1102 struct ceph_osd_data
*osd_data
= osd_req_op_extent_osd_data(req
, 0);
1103 struct ceph_client_metric
*metric
= &ceph_sb_to_mdsc(inode
->i_sb
)->metric
;
1104 unsigned int len
= osd_data
->bvec_pos
.iter
.bi_size
;
1106 BUG_ON(osd_data
->type
!= CEPH_OSD_DATA_TYPE_BVECS
);
1107 BUG_ON(!osd_data
->num_bvecs
);
1109 dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode
, rc
, len
);
1111 if (rc
== -EOLDSNAPC
) {
1112 struct ceph_aio_work
*aio_work
;
1113 BUG_ON(!aio_req
->write
);
1115 aio_work
= kmalloc(sizeof(*aio_work
), GFP_NOFS
);
1117 INIT_WORK(&aio_work
->work
, ceph_aio_retry_work
);
1118 aio_work
->req
= req
;
1119 queue_work(ceph_inode_to_client(inode
)->inode_wq
,
1124 } else if (!aio_req
->write
) {
1127 if (rc
>= 0 && len
> rc
) {
1129 int zlen
= len
- rc
;
1132 * If read is satisfied by single OSD request,
1133 * it can pass EOF. Otherwise read is within
1136 if (aio_req
->num_reqs
== 1) {
1137 loff_t i_size
= i_size_read(inode
);
1138 loff_t endoff
= aio_req
->iocb
->ki_pos
+ rc
;
1139 if (endoff
< i_size
)
1140 zlen
= min_t(size_t, zlen
,
1142 aio_req
->total_len
= rc
+ zlen
;
1145 iov_iter_bvec(&i
, READ
, osd_data
->bvec_pos
.bvecs
,
1146 osd_data
->num_bvecs
, len
);
1147 iov_iter_advance(&i
, rc
);
1148 iov_iter_zero(zlen
, &i
);
1152 /* r_start_latency == 0 means the request was not submitted */
1153 if (req
->r_start_latency
) {
1155 ceph_update_write_metrics(metric
, req
->r_start_latency
,
1156 req
->r_end_latency
, len
, rc
);
1158 ceph_update_read_metrics(metric
, req
->r_start_latency
,
1159 req
->r_end_latency
, len
, rc
);
1162 put_bvecs(osd_data
->bvec_pos
.bvecs
, osd_data
->num_bvecs
,
1163 aio_req
->should_dirty
);
1164 ceph_osdc_put_request(req
);
1167 cmpxchg(&aio_req
->error
, 0, rc
);
1169 ceph_aio_complete(inode
, aio_req
);
1173 static void ceph_aio_retry_work(struct work_struct
*work
)
1175 struct ceph_aio_work
*aio_work
=
1176 container_of(work
, struct ceph_aio_work
, work
);
1177 struct ceph_osd_request
*orig_req
= aio_work
->req
;
1178 struct ceph_aio_request
*aio_req
= orig_req
->r_priv
;
1179 struct inode
*inode
= orig_req
->r_inode
;
1180 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1181 struct ceph_snap_context
*snapc
;
1182 struct ceph_osd_request
*req
;
1185 spin_lock(&ci
->i_ceph_lock
);
1186 if (__ceph_have_pending_cap_snap(ci
)) {
1187 struct ceph_cap_snap
*capsnap
=
1188 list_last_entry(&ci
->i_cap_snaps
,
1189 struct ceph_cap_snap
,
1191 snapc
= ceph_get_snap_context(capsnap
->context
);
1193 BUG_ON(!ci
->i_head_snapc
);
1194 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
1196 spin_unlock(&ci
->i_ceph_lock
);
1198 req
= ceph_osdc_alloc_request(orig_req
->r_osdc
, snapc
, 1,
1206 req
->r_flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
1207 ceph_oloc_copy(&req
->r_base_oloc
, &orig_req
->r_base_oloc
);
1208 ceph_oid_copy(&req
->r_base_oid
, &orig_req
->r_base_oid
);
1210 req
->r_ops
[0] = orig_req
->r_ops
[0];
1212 req
->r_mtime
= aio_req
->mtime
;
1213 req
->r_data_offset
= req
->r_ops
[0].extent
.offset
;
1215 ret
= ceph_osdc_alloc_messages(req
, GFP_NOFS
);
1217 ceph_osdc_put_request(req
);
1222 ceph_osdc_put_request(orig_req
);
1224 req
->r_callback
= ceph_aio_complete_req
;
1225 req
->r_inode
= inode
;
1226 req
->r_priv
= aio_req
;
1228 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
1231 req
->r_result
= ret
;
1232 ceph_aio_complete_req(req
);
1235 ceph_put_snap_context(snapc
);
1240 ceph_direct_read_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
1241 struct ceph_snap_context
*snapc
,
1242 struct ceph_cap_flush
**pcf
)
1244 struct file
*file
= iocb
->ki_filp
;
1245 struct inode
*inode
= file_inode(file
);
1246 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1247 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1248 struct ceph_client_metric
*metric
= &fsc
->mdsc
->metric
;
1249 struct ceph_vino vino
;
1250 struct ceph_osd_request
*req
;
1251 struct bio_vec
*bvecs
;
1252 struct ceph_aio_request
*aio_req
= NULL
;
1256 struct timespec64 mtime
= current_time(inode
);
1257 size_t count
= iov_iter_count(iter
);
1258 loff_t pos
= iocb
->ki_pos
;
1259 bool write
= iov_iter_rw(iter
) == WRITE
;
1260 bool should_dirty
= !write
&& user_backed_iter(iter
);
1262 if (write
&& ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
1265 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1266 (write
? "write" : "read"), file
, pos
, (unsigned)count
,
1267 snapc
, snapc
? snapc
->seq
: 0);
1272 ceph_fscache_invalidate(inode
, true);
1274 ret2
= invalidate_inode_pages2_range(inode
->i_mapping
,
1276 (pos
+ count
- 1) >> PAGE_SHIFT
);
1278 dout("invalidate_inode_pages2_range returned %d\n", ret2
);
1280 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
1282 flags
= CEPH_OSD_FLAG_READ
;
1285 while (iov_iter_count(iter
) > 0) {
1286 u64 size
= iov_iter_count(iter
);
1290 size
= min_t(u64
, size
, fsc
->mount_options
->wsize
);
1292 size
= min_t(u64
, size
, fsc
->mount_options
->rsize
);
1294 vino
= ceph_vino(inode
);
1295 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1296 vino
, pos
, &size
, 0,
1298 write
? CEPH_OSD_OP_WRITE
:
1302 ci
->i_truncate_size
,
1309 len
= iter_get_bvecs_alloc(iter
, size
, &bvecs
, &num_pages
);
1311 ceph_osdc_put_request(req
);
1316 osd_req_op_extent_update(req
, 0, len
);
1319 * To simplify error handling, allow AIO when IO within i_size
1320 * or IO can be satisfied by single OSD request.
1322 if (pos
== iocb
->ki_pos
&& !is_sync_kiocb(iocb
) &&
1323 (len
== count
|| pos
+ count
<= i_size_read(inode
))) {
1324 aio_req
= kzalloc(sizeof(*aio_req
), GFP_KERNEL
);
1326 aio_req
->iocb
= iocb
;
1327 aio_req
->write
= write
;
1328 aio_req
->should_dirty
= should_dirty
;
1329 INIT_LIST_HEAD(&aio_req
->osd_reqs
);
1331 aio_req
->mtime
= mtime
;
1332 swap(aio_req
->prealloc_cf
, *pcf
);
1340 * throw out any page cache pages in this range. this
1343 truncate_inode_pages_range(inode
->i_mapping
, pos
,
1344 PAGE_ALIGN(pos
+ len
) - 1);
1346 req
->r_mtime
= mtime
;
1349 osd_req_op_extent_osd_data_bvecs(req
, 0, bvecs
, num_pages
, len
);
1352 aio_req
->total_len
+= len
;
1353 aio_req
->num_reqs
++;
1354 atomic_inc(&aio_req
->pending_reqs
);
1356 req
->r_callback
= ceph_aio_complete_req
;
1357 req
->r_inode
= inode
;
1358 req
->r_priv
= aio_req
;
1359 list_add_tail(&req
->r_private_item
, &aio_req
->osd_reqs
);
1365 ret
= ceph_osdc_start_request(req
->r_osdc
, req
, false);
1367 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1370 ceph_update_write_metrics(metric
, req
->r_start_latency
,
1371 req
->r_end_latency
, len
, ret
);
1373 ceph_update_read_metrics(metric
, req
->r_start_latency
,
1374 req
->r_end_latency
, len
, ret
);
1376 size
= i_size_read(inode
);
1380 if (ret
>= 0 && ret
< len
&& pos
+ ret
< size
) {
1382 int zlen
= min_t(size_t, len
- ret
,
1385 iov_iter_bvec(&i
, READ
, bvecs
, num_pages
, len
);
1386 iov_iter_advance(&i
, ret
);
1387 iov_iter_zero(zlen
, &i
);
1394 put_bvecs(bvecs
, num_pages
, should_dirty
);
1395 ceph_osdc_put_request(req
);
1400 if (!write
&& pos
>= size
)
1403 if (write
&& pos
> size
) {
1404 if (ceph_inode_set_size(inode
, pos
))
1405 ceph_check_caps(ceph_inode(inode
),
1406 CHECK_CAPS_AUTHONLY
,
1412 LIST_HEAD(osd_reqs
);
1414 if (aio_req
->num_reqs
== 0) {
1419 ceph_get_cap_refs(ci
, write
? CEPH_CAP_FILE_WR
:
1422 list_splice(&aio_req
->osd_reqs
, &osd_reqs
);
1423 inode_dio_begin(inode
);
1424 while (!list_empty(&osd_reqs
)) {
1425 req
= list_first_entry(&osd_reqs
,
1426 struct ceph_osd_request
,
1428 list_del_init(&req
->r_private_item
);
1430 ret
= ceph_osdc_start_request(req
->r_osdc
,
1433 req
->r_result
= ret
;
1434 ceph_aio_complete_req(req
);
1437 return -EIOCBQUEUED
;
1440 if (ret
!= -EOLDSNAPC
&& pos
> iocb
->ki_pos
) {
1441 ret
= pos
- iocb
->ki_pos
;
1448 * Synchronous write, straight from __user pointer or user pages.
1450 * If write spans object boundary, just do multiple writes. (For a
1451 * correct atomic write, we should e.g. take write locks on all
1452 * objects, rollback on failure, etc.)
1455 ceph_sync_write(struct kiocb
*iocb
, struct iov_iter
*from
, loff_t pos
,
1456 struct ceph_snap_context
*snapc
)
1458 struct file
*file
= iocb
->ki_filp
;
1459 struct inode
*inode
= file_inode(file
);
1460 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1461 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1462 struct ceph_vino vino
;
1463 struct ceph_osd_request
*req
;
1464 struct page
**pages
;
1470 bool check_caps
= false;
1471 struct timespec64 mtime
= current_time(inode
);
1472 size_t count
= iov_iter_count(from
);
1474 if (ceph_snap(file_inode(file
)) != CEPH_NOSNAP
)
1477 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1478 file
, pos
, (unsigned)count
, snapc
, snapc
->seq
);
1480 ret
= filemap_write_and_wait_range(inode
->i_mapping
,
1481 pos
, pos
+ count
- 1);
1485 ceph_fscache_invalidate(inode
, false);
1486 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
1488 (pos
+ count
- 1) >> PAGE_SHIFT
);
1490 dout("invalidate_inode_pages2_range returned %d\n", ret
);
1492 flags
= /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE
;
1494 while ((len
= iov_iter_count(from
)) > 0) {
1498 vino
= ceph_vino(inode
);
1499 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
1500 vino
, pos
, &len
, 0, 1,
1501 CEPH_OSD_OP_WRITE
, flags
, snapc
,
1503 ci
->i_truncate_size
,
1511 * write from beginning of first page,
1512 * regardless of io alignment
1514 num_pages
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1516 pages
= ceph_alloc_page_vector(num_pages
, GFP_KERNEL
);
1517 if (IS_ERR(pages
)) {
1518 ret
= PTR_ERR(pages
);
1523 for (n
= 0; n
< num_pages
; n
++) {
1524 size_t plen
= min_t(size_t, left
, PAGE_SIZE
);
1525 ret
= copy_page_from_iter(pages
[n
], 0, plen
, from
);
1534 ceph_release_page_vector(pages
, num_pages
);
1538 req
->r_inode
= inode
;
1540 osd_req_op_extent_osd_data_pages(req
, 0, pages
, len
, 0,
1543 req
->r_mtime
= mtime
;
1544 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
1546 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
1548 ceph_update_write_metrics(&fsc
->mdsc
->metric
, req
->r_start_latency
,
1549 req
->r_end_latency
, len
, ret
);
1551 ceph_osdc_put_request(req
);
1553 ceph_set_error_write(ci
);
1557 ceph_clear_error_write(ci
);
1560 if (pos
> i_size_read(inode
)) {
1561 check_caps
= ceph_inode_set_size(inode
, pos
);
1563 ceph_check_caps(ceph_inode(inode
),
1564 CHECK_CAPS_AUTHONLY
,
1570 if (ret
!= -EOLDSNAPC
&& written
> 0) {
1578 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1579 * Atomically grab references, so that those bits are not released
1580 * back to the MDS mid-read.
1582 * Hmm, the sync read case isn't actually async... should it be?
1584 static ssize_t
ceph_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1586 struct file
*filp
= iocb
->ki_filp
;
1587 struct ceph_file_info
*fi
= filp
->private_data
;
1588 size_t len
= iov_iter_count(to
);
1589 struct inode
*inode
= file_inode(filp
);
1590 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1591 bool direct_lock
= iocb
->ki_flags
& IOCB_DIRECT
;
1593 int want
= 0, got
= 0;
1594 int retry_op
= 0, read
= 0;
1597 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1598 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
, inode
);
1600 if (ceph_inode_is_shutdown(inode
))
1604 ceph_start_io_direct(inode
);
1606 ceph_start_io_read(inode
);
1608 if (!(fi
->flags
& CEPH_F_SYNC
) && !direct_lock
)
1609 want
|= CEPH_CAP_FILE_CACHE
;
1610 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1611 want
|= CEPH_CAP_FILE_LAZYIO
;
1613 ret
= ceph_get_caps(filp
, CEPH_CAP_FILE_RD
, want
, -1, &got
);
1616 ceph_end_io_direct(inode
);
1618 ceph_end_io_read(inode
);
1622 if ((got
& (CEPH_CAP_FILE_CACHE
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1623 (iocb
->ki_flags
& IOCB_DIRECT
) ||
1624 (fi
->flags
& CEPH_F_SYNC
)) {
1626 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1627 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1628 ceph_cap_string(got
));
1630 if (ci
->i_inline_version
== CEPH_INLINE_NONE
) {
1631 if (!retry_op
&& (iocb
->ki_flags
& IOCB_DIRECT
)) {
1632 ret
= ceph_direct_read_write(iocb
, to
,
1634 if (ret
>= 0 && ret
< len
)
1635 retry_op
= CHECK_EOF
;
1637 ret
= ceph_sync_read(iocb
, to
, &retry_op
);
1640 retry_op
= READ_INLINE
;
1643 CEPH_DEFINE_RW_CONTEXT(rw_ctx
, got
);
1644 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1645 inode
, ceph_vinop(inode
), iocb
->ki_pos
, (unsigned)len
,
1646 ceph_cap_string(got
));
1647 ceph_add_rw_context(fi
, &rw_ctx
);
1648 ret
= generic_file_read_iter(iocb
, to
);
1649 ceph_del_rw_context(fi
, &rw_ctx
);
1652 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1653 inode
, ceph_vinop(inode
), ceph_cap_string(got
), (int)ret
);
1654 ceph_put_cap_refs(ci
, got
);
1657 ceph_end_io_direct(inode
);
1659 ceph_end_io_read(inode
);
1661 if (retry_op
> HAVE_RETRIED
&& ret
>= 0) {
1663 struct page
*page
= NULL
;
1665 if (retry_op
== READ_INLINE
) {
1666 page
= __page_cache_alloc(GFP_KERNEL
);
1671 statret
= __ceph_do_getattr(inode
, page
,
1672 CEPH_STAT_CAP_INLINE_DATA
, !!page
);
1676 if (statret
== -ENODATA
) {
1677 BUG_ON(retry_op
!= READ_INLINE
);
1683 i_size
= i_size_read(inode
);
1684 if (retry_op
== READ_INLINE
) {
1685 BUG_ON(ret
> 0 || read
> 0);
1686 if (iocb
->ki_pos
< i_size
&&
1687 iocb
->ki_pos
< PAGE_SIZE
) {
1688 loff_t end
= min_t(loff_t
, i_size
,
1689 iocb
->ki_pos
+ len
);
1690 end
= min_t(loff_t
, end
, PAGE_SIZE
);
1692 zero_user_segment(page
, statret
, end
);
1693 ret
= copy_page_to_iter(page
,
1694 iocb
->ki_pos
& ~PAGE_MASK
,
1695 end
- iocb
->ki_pos
, to
);
1696 iocb
->ki_pos
+= ret
;
1699 if (iocb
->ki_pos
< i_size
&& read
< len
) {
1700 size_t zlen
= min_t(size_t, len
- read
,
1701 i_size
- iocb
->ki_pos
);
1702 ret
= iov_iter_zero(zlen
, to
);
1703 iocb
->ki_pos
+= ret
;
1706 __free_pages(page
, 0);
1710 /* hit EOF or hole? */
1711 if (retry_op
== CHECK_EOF
&& iocb
->ki_pos
< i_size
&&
1713 dout("sync_read hit hole, ppos %lld < size %lld"
1714 ", reading more\n", iocb
->ki_pos
, i_size
);
1718 retry_op
= HAVE_RETRIED
;
1730 * Take cap references to avoid releasing caps to MDS mid-write.
1732 * If we are synchronous, and write with an old snap context, the OSD
1733 * may return EOLDSNAPC. In that case, retry the write.. _after_
1734 * dropping our cap refs and allowing the pending snap to logically
1735 * complete _before_ this write occurs.
1737 * If we are near ENOSPC, write synchronously.
1739 static ssize_t
ceph_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1741 struct file
*file
= iocb
->ki_filp
;
1742 struct ceph_file_info
*fi
= file
->private_data
;
1743 struct inode
*inode
= file_inode(file
);
1744 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1745 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1746 struct ceph_osd_client
*osdc
= &fsc
->client
->osdc
;
1747 struct ceph_cap_flush
*prealloc_cf
;
1748 ssize_t count
, written
= 0;
1749 int err
, want
= 0, got
;
1750 bool direct_lock
= false;
1754 loff_t limit
= max(i_size_read(inode
), fsc
->max_file_size
);
1756 if (ceph_inode_is_shutdown(inode
))
1759 if (ceph_snap(inode
) != CEPH_NOSNAP
)
1762 prealloc_cf
= ceph_alloc_cap_flush();
1766 if ((iocb
->ki_flags
& (IOCB_DIRECT
| IOCB_APPEND
)) == IOCB_DIRECT
)
1771 ceph_start_io_direct(inode
);
1773 ceph_start_io_write(inode
);
1775 /* We can write back this queue in page reclaim */
1776 current
->backing_dev_info
= inode_to_bdi(inode
);
1778 if (iocb
->ki_flags
& IOCB_APPEND
) {
1779 err
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1784 err
= generic_write_checks(iocb
, from
);
1789 if (unlikely(pos
>= limit
)) {
1793 iov_iter_truncate(from
, limit
- pos
);
1796 count
= iov_iter_count(from
);
1797 if (ceph_quota_is_max_bytes_exceeded(inode
, pos
+ count
)) {
1802 down_read(&osdc
->lock
);
1803 map_flags
= osdc
->osdmap
->flags
;
1804 pool_flags
= ceph_pg_pool_flags(osdc
->osdmap
, ci
->i_layout
.pool_id
);
1805 up_read(&osdc
->lock
);
1806 if ((map_flags
& CEPH_OSDMAP_FULL
) ||
1807 (pool_flags
& CEPH_POOL_FLAG_FULL
)) {
1812 err
= file_remove_privs(file
);
1816 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1817 inode
, ceph_vinop(inode
), pos
, count
, i_size_read(inode
));
1818 if (!(fi
->flags
& CEPH_F_SYNC
) && !direct_lock
)
1819 want
|= CEPH_CAP_FILE_BUFFER
;
1820 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
1821 want
|= CEPH_CAP_FILE_LAZYIO
;
1823 err
= ceph_get_caps(file
, CEPH_CAP_FILE_WR
, want
, pos
+ count
, &got
);
1827 err
= file_update_time(file
);
1831 inode_inc_iversion_raw(inode
);
1833 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1834 inode
, ceph_vinop(inode
), pos
, count
, ceph_cap_string(got
));
1836 if ((got
& (CEPH_CAP_FILE_BUFFER
|CEPH_CAP_FILE_LAZYIO
)) == 0 ||
1837 (iocb
->ki_flags
& IOCB_DIRECT
) || (fi
->flags
& CEPH_F_SYNC
) ||
1838 (ci
->i_ceph_flags
& CEPH_I_ERROR_WRITE
)) {
1839 struct ceph_snap_context
*snapc
;
1840 struct iov_iter data
;
1842 spin_lock(&ci
->i_ceph_lock
);
1843 if (__ceph_have_pending_cap_snap(ci
)) {
1844 struct ceph_cap_snap
*capsnap
=
1845 list_last_entry(&ci
->i_cap_snaps
,
1846 struct ceph_cap_snap
,
1848 snapc
= ceph_get_snap_context(capsnap
->context
);
1850 BUG_ON(!ci
->i_head_snapc
);
1851 snapc
= ceph_get_snap_context(ci
->i_head_snapc
);
1853 spin_unlock(&ci
->i_ceph_lock
);
1855 /* we might need to revert back to that point */
1857 if (iocb
->ki_flags
& IOCB_DIRECT
)
1858 written
= ceph_direct_read_write(iocb
, &data
, snapc
,
1861 written
= ceph_sync_write(iocb
, &data
, pos
, snapc
);
1863 ceph_end_io_direct(inode
);
1865 ceph_end_io_write(inode
);
1867 iov_iter_advance(from
, written
);
1868 ceph_put_snap_context(snapc
);
1871 * No need to acquire the i_truncate_mutex. Because
1872 * the MDS revokes Fwb caps before sending truncate
1873 * message to us. We can't get Fwb cap while there
1874 * are pending vmtruncate. So write and vmtruncate
1875 * can not run at the same time
1877 written
= generic_perform_write(iocb
, from
);
1878 if (likely(written
>= 0))
1879 iocb
->ki_pos
= pos
+ written
;
1880 ceph_end_io_write(inode
);
1886 spin_lock(&ci
->i_ceph_lock
);
1887 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
1889 spin_unlock(&ci
->i_ceph_lock
);
1891 __mark_inode_dirty(inode
, dirty
);
1892 if (ceph_quota_is_max_bytes_approaching(inode
, iocb
->ki_pos
))
1893 ceph_check_caps(ci
, 0, NULL
);
1896 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1897 inode
, ceph_vinop(inode
), pos
, (unsigned)count
,
1898 ceph_cap_string(got
));
1899 ceph_put_cap_refs(ci
, got
);
1901 if (written
== -EOLDSNAPC
) {
1902 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1903 inode
, ceph_vinop(inode
), pos
, (unsigned)count
);
1908 if ((map_flags
& CEPH_OSDMAP_NEARFULL
) ||
1909 (pool_flags
& CEPH_POOL_FLAG_NEARFULL
))
1910 iocb
->ki_flags
|= IOCB_DSYNC
;
1911 written
= generic_write_sync(iocb
, written
);
1916 ceph_put_cap_refs(ci
, got
);
1919 ceph_end_io_direct(inode
);
1921 ceph_end_io_write(inode
);
1923 ceph_free_cap_flush(prealloc_cf
);
1924 current
->backing_dev_info
= NULL
;
1925 return written
? written
: err
;
1929 * llseek. be sure to verify file size on SEEK_END.
1931 static loff_t
ceph_llseek(struct file
*file
, loff_t offset
, int whence
)
1933 struct inode
*inode
= file
->f_mapping
->host
;
1934 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
1940 if (whence
== SEEK_END
|| whence
== SEEK_DATA
|| whence
== SEEK_HOLE
) {
1941 ret
= ceph_do_getattr(inode
, CEPH_STAT_CAP_SIZE
, false);
1946 i_size
= i_size_read(inode
);
1953 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1954 * position-querying operation. Avoid rewriting the "same"
1955 * f_pos value back to the file because a concurrent read(),
1956 * write() or lseek() might have altered it
1962 offset
+= file
->f_pos
;
1965 if (offset
< 0 || offset
>= i_size
) {
1971 if (offset
< 0 || offset
>= i_size
) {
1979 ret
= vfs_setpos(file
, offset
, max(i_size
, fsc
->max_file_size
));
1982 inode_unlock(inode
);
1986 static inline void ceph_zero_partial_page(
1987 struct inode
*inode
, loff_t offset
, unsigned size
)
1990 pgoff_t index
= offset
>> PAGE_SHIFT
;
1992 page
= find_lock_page(inode
->i_mapping
, index
);
1994 wait_on_page_writeback(page
);
1995 zero_user(page
, offset
& (PAGE_SIZE
- 1), size
);
2001 static void ceph_zero_pagecache_range(struct inode
*inode
, loff_t offset
,
2004 loff_t nearly
= round_up(offset
, PAGE_SIZE
);
2005 if (offset
< nearly
) {
2006 loff_t size
= nearly
- offset
;
2009 ceph_zero_partial_page(inode
, offset
, size
);
2013 if (length
>= PAGE_SIZE
) {
2014 loff_t size
= round_down(length
, PAGE_SIZE
);
2015 truncate_pagecache_range(inode
, offset
, offset
+ size
- 1);
2020 ceph_zero_partial_page(inode
, offset
, length
);
2023 static int ceph_zero_partial_object(struct inode
*inode
,
2024 loff_t offset
, loff_t
*length
)
2026 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2027 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
2028 struct ceph_osd_request
*req
;
2034 op
= offset
? CEPH_OSD_OP_DELETE
: CEPH_OSD_OP_TRUNCATE
;
2037 op
= CEPH_OSD_OP_ZERO
;
2040 req
= ceph_osdc_new_request(&fsc
->client
->osdc
, &ci
->i_layout
,
2044 CEPH_OSD_FLAG_WRITE
,
2051 req
->r_mtime
= inode
->i_mtime
;
2052 ret
= ceph_osdc_start_request(&fsc
->client
->osdc
, req
, false);
2054 ret
= ceph_osdc_wait_request(&fsc
->client
->osdc
, req
);
2058 ceph_osdc_put_request(req
);
2064 static int ceph_zero_objects(struct inode
*inode
, loff_t offset
, loff_t length
)
2067 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2068 s32 stripe_unit
= ci
->i_layout
.stripe_unit
;
2069 s32 stripe_count
= ci
->i_layout
.stripe_count
;
2070 s32 object_size
= ci
->i_layout
.object_size
;
2071 u64 object_set_size
= object_size
* stripe_count
;
2074 /* round offset up to next period boundary */
2075 nearly
= offset
+ object_set_size
- 1;
2077 nearly
-= do_div(t
, object_set_size
);
2079 while (length
&& offset
< nearly
) {
2080 loff_t size
= length
;
2081 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
2087 while (length
>= object_set_size
) {
2089 loff_t pos
= offset
;
2090 for (i
= 0; i
< stripe_count
; ++i
) {
2091 ret
= ceph_zero_partial_object(inode
, pos
, NULL
);
2096 offset
+= object_set_size
;
2097 length
-= object_set_size
;
2100 loff_t size
= length
;
2101 ret
= ceph_zero_partial_object(inode
, offset
, &size
);
2110 static long ceph_fallocate(struct file
*file
, int mode
,
2111 loff_t offset
, loff_t length
)
2113 struct ceph_file_info
*fi
= file
->private_data
;
2114 struct inode
*inode
= file_inode(file
);
2115 struct ceph_inode_info
*ci
= ceph_inode(inode
);
2116 struct ceph_cap_flush
*prealloc_cf
;
2123 if (mode
!= (FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
2126 if (!S_ISREG(inode
->i_mode
))
2129 prealloc_cf
= ceph_alloc_cap_flush();
2135 if (ceph_snap(inode
) != CEPH_NOSNAP
) {
2140 size
= i_size_read(inode
);
2142 /* Are we punching a hole beyond EOF? */
2145 if ((offset
+ length
) > size
)
2146 length
= size
- offset
;
2148 if (fi
->fmode
& CEPH_FILE_MODE_LAZY
)
2149 want
= CEPH_CAP_FILE_BUFFER
| CEPH_CAP_FILE_LAZYIO
;
2151 want
= CEPH_CAP_FILE_BUFFER
;
2153 ret
= ceph_get_caps(file
, CEPH_CAP_FILE_WR
, want
, endoff
, &got
);
2157 filemap_invalidate_lock(inode
->i_mapping
);
2158 ceph_fscache_invalidate(inode
, false);
2159 ceph_zero_pagecache_range(inode
, offset
, length
);
2160 ret
= ceph_zero_objects(inode
, offset
, length
);
2163 spin_lock(&ci
->i_ceph_lock
);
2164 dirty
= __ceph_mark_dirty_caps(ci
, CEPH_CAP_FILE_WR
,
2166 spin_unlock(&ci
->i_ceph_lock
);
2168 __mark_inode_dirty(inode
, dirty
);
2170 filemap_invalidate_unlock(inode
->i_mapping
);
2172 ceph_put_cap_refs(ci
, got
);
2174 inode_unlock(inode
);
2175 ceph_free_cap_flush(prealloc_cf
);
2180 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2181 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2182 * this fails; zero is returned on success.
2184 static int get_rd_wr_caps(struct file
*src_filp
, int *src_got
,
2185 struct file
*dst_filp
,
2186 loff_t dst_endoff
, int *dst_got
)
2189 bool retrying
= false;
2192 ret
= ceph_get_caps(dst_filp
, CEPH_CAP_FILE_WR
, CEPH_CAP_FILE_BUFFER
,
2193 dst_endoff
, dst_got
);
2198 * Since we're already holding the FILE_WR capability for the dst file,
2199 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2200 * retry dance instead to try to get both capabilities.
2202 ret
= ceph_try_get_caps(file_inode(src_filp
),
2203 CEPH_CAP_FILE_RD
, CEPH_CAP_FILE_SHARED
,
2206 /* Start by dropping dst_ci caps and getting src_ci caps */
2207 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp
)), *dst_got
);
2210 /* ceph_try_get_caps masks EAGAIN */
2214 ret
= ceph_get_caps(src_filp
, CEPH_CAP_FILE_RD
,
2215 CEPH_CAP_FILE_SHARED
, -1, src_got
);
2218 /*... drop src_ci caps too, and retry */
2219 ceph_put_cap_refs(ceph_inode(file_inode(src_filp
)), *src_got
);
2226 static void put_rd_wr_caps(struct ceph_inode_info
*src_ci
, int src_got
,
2227 struct ceph_inode_info
*dst_ci
, int dst_got
)
2229 ceph_put_cap_refs(src_ci
, src_got
);
2230 ceph_put_cap_refs(dst_ci
, dst_got
);
2234 * This function does several size-related checks, returning an error if:
2235 * - source file is smaller than off+len
2236 * - destination file size is not OK (inode_newsize_ok())
2237 * - max bytes quotas is exceeded
2239 static int is_file_size_ok(struct inode
*src_inode
, struct inode
*dst_inode
,
2240 loff_t src_off
, loff_t dst_off
, size_t len
)
2242 loff_t size
, endoff
;
2244 size
= i_size_read(src_inode
);
2246 * Don't copy beyond source file EOF. Instead of simply setting length
2247 * to (size - src_off), just drop to VFS default implementation, as the
2248 * local i_size may be stale due to other clients writing to the source
2251 if (src_off
+ len
> size
) {
2252 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2253 src_off
, len
, size
);
2256 size
= i_size_read(dst_inode
);
2258 endoff
= dst_off
+ len
;
2259 if (inode_newsize_ok(dst_inode
, endoff
))
2262 if (ceph_quota_is_max_bytes_exceeded(dst_inode
, endoff
))
2268 static struct ceph_osd_request
*
2269 ceph_alloc_copyfrom_request(struct ceph_osd_client
*osdc
,
2271 struct ceph_object_id
*src_oid
,
2272 struct ceph_object_locator
*src_oloc
,
2273 struct ceph_object_id
*dst_oid
,
2274 struct ceph_object_locator
*dst_oloc
,
2275 u32 truncate_seq
, u64 truncate_size
)
2277 struct ceph_osd_request
*req
;
2279 u32 src_fadvise_flags
=
2280 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL
|
2281 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE
;
2282 u32 dst_fadvise_flags
=
2283 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL
|
2284 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
;
2286 req
= ceph_osdc_alloc_request(osdc
, NULL
, 1, false, GFP_KERNEL
);
2288 return ERR_PTR(-ENOMEM
);
2290 req
->r_flags
= CEPH_OSD_FLAG_WRITE
;
2292 ceph_oloc_copy(&req
->r_t
.base_oloc
, dst_oloc
);
2293 ceph_oid_copy(&req
->r_t
.base_oid
, dst_oid
);
2295 ret
= osd_req_op_copy_from_init(req
, src_snapid
, 0,
2301 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ
);
2305 ret
= ceph_osdc_alloc_messages(req
, GFP_KERNEL
);
2312 ceph_osdc_put_request(req
);
2313 return ERR_PTR(ret
);
2316 static ssize_t
ceph_do_objects_copy(struct ceph_inode_info
*src_ci
, u64
*src_off
,
2317 struct ceph_inode_info
*dst_ci
, u64
*dst_off
,
2318 struct ceph_fs_client
*fsc
,
2319 size_t len
, unsigned int flags
)
2321 struct ceph_object_locator src_oloc
, dst_oloc
;
2322 struct ceph_object_id src_oid
, dst_oid
;
2323 struct ceph_osd_client
*osdc
;
2324 struct ceph_osd_request
*req
;
2326 u64 src_objnum
, src_objoff
, dst_objnum
, dst_objoff
;
2327 u32 src_objlen
, dst_objlen
;
2328 u32 object_size
= src_ci
->i_layout
.object_size
;
2331 src_oloc
.pool
= src_ci
->i_layout
.pool_id
;
2332 src_oloc
.pool_ns
= ceph_try_get_string(src_ci
->i_layout
.pool_ns
);
2333 dst_oloc
.pool
= dst_ci
->i_layout
.pool_id
;
2334 dst_oloc
.pool_ns
= ceph_try_get_string(dst_ci
->i_layout
.pool_ns
);
2335 osdc
= &fsc
->client
->osdc
;
2337 while (len
>= object_size
) {
2338 ceph_calc_file_object_mapping(&src_ci
->i_layout
, *src_off
,
2339 object_size
, &src_objnum
,
2340 &src_objoff
, &src_objlen
);
2341 ceph_calc_file_object_mapping(&dst_ci
->i_layout
, *dst_off
,
2342 object_size
, &dst_objnum
,
2343 &dst_objoff
, &dst_objlen
);
2344 ceph_oid_init(&src_oid
);
2345 ceph_oid_printf(&src_oid
, "%llx.%08llx",
2346 src_ci
->i_vino
.ino
, src_objnum
);
2347 ceph_oid_init(&dst_oid
);
2348 ceph_oid_printf(&dst_oid
, "%llx.%08llx",
2349 dst_ci
->i_vino
.ino
, dst_objnum
);
2350 /* Do an object remote copy */
2351 req
= ceph_alloc_copyfrom_request(osdc
, src_ci
->i_vino
.snap
,
2352 &src_oid
, &src_oloc
,
2353 &dst_oid
, &dst_oloc
,
2354 dst_ci
->i_truncate_seq
,
2355 dst_ci
->i_truncate_size
);
2359 ceph_osdc_start_request(osdc
, req
, false);
2360 ret
= ceph_osdc_wait_request(osdc
, req
);
2361 ceph_update_copyfrom_metrics(&fsc
->mdsc
->metric
,
2362 req
->r_start_latency
,
2365 ceph_osdc_put_request(req
);
2368 if (ret
== -EOPNOTSUPP
) {
2369 fsc
->have_copy_from2
= false;
2370 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2372 dout("ceph_osdc_copy_from returned %d\n", ret
);
2378 bytes
+= object_size
;
2379 *src_off
+= object_size
;
2380 *dst_off
+= object_size
;
2384 ceph_oloc_destroy(&src_oloc
);
2385 ceph_oloc_destroy(&dst_oloc
);
2389 static ssize_t
__ceph_copy_file_range(struct file
*src_file
, loff_t src_off
,
2390 struct file
*dst_file
, loff_t dst_off
,
2391 size_t len
, unsigned int flags
)
2393 struct inode
*src_inode
= file_inode(src_file
);
2394 struct inode
*dst_inode
= file_inode(dst_file
);
2395 struct ceph_inode_info
*src_ci
= ceph_inode(src_inode
);
2396 struct ceph_inode_info
*dst_ci
= ceph_inode(dst_inode
);
2397 struct ceph_cap_flush
*prealloc_cf
;
2398 struct ceph_fs_client
*src_fsc
= ceph_inode_to_client(src_inode
);
2400 ssize_t ret
= -EIO
, bytes
;
2401 u64 src_objnum
, dst_objnum
, src_objoff
, dst_objoff
;
2402 u32 src_objlen
, dst_objlen
;
2403 int src_got
= 0, dst_got
= 0, err
, dirty
;
2405 if (src_inode
->i_sb
!= dst_inode
->i_sb
) {
2406 struct ceph_fs_client
*dst_fsc
= ceph_inode_to_client(dst_inode
);
2408 if (ceph_fsid_compare(&src_fsc
->client
->fsid
,
2409 &dst_fsc
->client
->fsid
)) {
2410 dout("Copying files across clusters: src: %pU dst: %pU\n",
2411 &src_fsc
->client
->fsid
, &dst_fsc
->client
->fsid
);
2415 if (ceph_snap(dst_inode
) != CEPH_NOSNAP
)
2419 * Some of the checks below will return -EOPNOTSUPP, which will force a
2420 * fallback to the default VFS copy_file_range implementation. This is
2421 * desirable in several cases (for ex, the 'len' is smaller than the
2422 * size of the objects, or in cases where that would be more
2426 if (ceph_test_mount_opt(src_fsc
, NOCOPYFROM
))
2429 if (!src_fsc
->have_copy_from2
)
2433 * Striped file layouts require that we copy partial objects, but the
2434 * OSD copy-from operation only supports full-object copies. Limit
2435 * this to non-striped file layouts for now.
2437 if ((src_ci
->i_layout
.stripe_unit
!= dst_ci
->i_layout
.stripe_unit
) ||
2438 (src_ci
->i_layout
.stripe_count
!= 1) ||
2439 (dst_ci
->i_layout
.stripe_count
!= 1) ||
2440 (src_ci
->i_layout
.object_size
!= dst_ci
->i_layout
.object_size
)) {
2441 dout("Invalid src/dst files layout\n");
2445 if (len
< src_ci
->i_layout
.object_size
)
2446 return -EOPNOTSUPP
; /* no remote copy will be done */
2448 prealloc_cf
= ceph_alloc_cap_flush();
2452 /* Start by sync'ing the source and destination files */
2453 ret
= file_write_and_wait_range(src_file
, src_off
, (src_off
+ len
));
2455 dout("failed to write src file (%zd)\n", ret
);
2458 ret
= file_write_and_wait_range(dst_file
, dst_off
, (dst_off
+ len
));
2460 dout("failed to write dst file (%zd)\n", ret
);
2465 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2466 * clients may have dirty data in their caches. And OSDs know nothing
2467 * about caps, so they can't safely do the remote object copies.
2469 err
= get_rd_wr_caps(src_file
, &src_got
,
2470 dst_file
, (dst_off
+ len
), &dst_got
);
2472 dout("get_rd_wr_caps returned %d\n", err
);
2477 ret
= is_file_size_ok(src_inode
, dst_inode
, src_off
, dst_off
, len
);
2481 /* Drop dst file cached pages */
2482 ceph_fscache_invalidate(dst_inode
, false);
2483 ret
= invalidate_inode_pages2_range(dst_inode
->i_mapping
,
2484 dst_off
>> PAGE_SHIFT
,
2485 (dst_off
+ len
) >> PAGE_SHIFT
);
2487 dout("Failed to invalidate inode pages (%zd)\n", ret
);
2490 ceph_calc_file_object_mapping(&src_ci
->i_layout
, src_off
,
2491 src_ci
->i_layout
.object_size
,
2492 &src_objnum
, &src_objoff
, &src_objlen
);
2493 ceph_calc_file_object_mapping(&dst_ci
->i_layout
, dst_off
,
2494 dst_ci
->i_layout
.object_size
,
2495 &dst_objnum
, &dst_objoff
, &dst_objlen
);
2496 /* object-level offsets need to the same */
2497 if (src_objoff
!= dst_objoff
) {
2503 * Do a manual copy if the object offset isn't object aligned.
2504 * 'src_objlen' contains the bytes left until the end of the object,
2505 * starting at the src_off
2508 dout("Initial partial copy of %u bytes\n", src_objlen
);
2511 * we need to temporarily drop all caps as we'll be calling
2512 * {read,write}_iter, which will get caps again.
2514 put_rd_wr_caps(src_ci
, src_got
, dst_ci
, dst_got
);
2515 ret
= do_splice_direct(src_file
, &src_off
, dst_file
,
2516 &dst_off
, src_objlen
, flags
);
2517 /* Abort on short copies or on error */
2518 if (ret
< src_objlen
) {
2519 dout("Failed partial copy (%zd)\n", ret
);
2523 err
= get_rd_wr_caps(src_file
, &src_got
,
2524 dst_file
, (dst_off
+ len
), &dst_got
);
2527 err
= is_file_size_ok(src_inode
, dst_inode
,
2528 src_off
, dst_off
, len
);
2533 size
= i_size_read(dst_inode
);
2534 bytes
= ceph_do_objects_copy(src_ci
, &src_off
, dst_ci
, &dst_off
,
2535 src_fsc
, len
, flags
);
2541 dout("Copied %zu bytes out of %zu\n", bytes
, len
);
2545 file_update_time(dst_file
);
2546 inode_inc_iversion_raw(dst_inode
);
2548 if (dst_off
> size
) {
2549 /* Let the MDS know about dst file size change */
2550 if (ceph_inode_set_size(dst_inode
, dst_off
) ||
2551 ceph_quota_is_max_bytes_approaching(dst_inode
, dst_off
))
2552 ceph_check_caps(dst_ci
, CHECK_CAPS_AUTHONLY
, NULL
);
2555 spin_lock(&dst_ci
->i_ceph_lock
);
2556 dirty
= __ceph_mark_dirty_caps(dst_ci
, CEPH_CAP_FILE_WR
, &prealloc_cf
);
2557 spin_unlock(&dst_ci
->i_ceph_lock
);
2559 __mark_inode_dirty(dst_inode
, dirty
);
2562 put_rd_wr_caps(src_ci
, src_got
, dst_ci
, dst_got
);
2565 * Do the final manual copy if we still have some bytes left, unless
2566 * there were errors in remote object copies (len >= object_size).
2568 if (len
&& (len
< src_ci
->i_layout
.object_size
)) {
2569 dout("Final partial copy of %zu bytes\n", len
);
2570 bytes
= do_splice_direct(src_file
, &src_off
, dst_file
,
2571 &dst_off
, len
, flags
);
2575 dout("Failed partial copy (%zd)\n", bytes
);
2579 ceph_free_cap_flush(prealloc_cf
);
2584 static ssize_t
ceph_copy_file_range(struct file
*src_file
, loff_t src_off
,
2585 struct file
*dst_file
, loff_t dst_off
,
2586 size_t len
, unsigned int flags
)
2590 ret
= __ceph_copy_file_range(src_file
, src_off
, dst_file
, dst_off
,
2593 if (ret
== -EOPNOTSUPP
|| ret
== -EXDEV
)
2594 ret
= generic_copy_file_range(src_file
, src_off
, dst_file
,
2595 dst_off
, len
, flags
);
2599 const struct file_operations ceph_file_fops
= {
2601 .release
= ceph_release
,
2602 .llseek
= ceph_llseek
,
2603 .read_iter
= ceph_read_iter
,
2604 .write_iter
= ceph_write_iter
,
2606 .fsync
= ceph_fsync
,
2608 .setlease
= simple_nosetlease
,
2609 .flock
= ceph_flock
,
2610 .splice_read
= generic_file_splice_read
,
2611 .splice_write
= iter_file_splice_write
,
2612 .unlocked_ioctl
= ceph_ioctl
,
2613 .compat_ioctl
= compat_ptr_ioctl
,
2614 .fallocate
= ceph_fallocate
,
2615 .copy_file_range
= ceph_copy_file_range
,