1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
10 #include "mds_client.h"
13 * Directory operations: readdir, lookup, create, link, unlink,
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
29 const struct dentry_operations ceph_dentry_ops
;
32 * Initialize ceph dentry state.
34 int ceph_init_dentry(struct dentry
*dentry
)
36 struct ceph_dentry_info
*di
;
41 di
= kmem_cache_alloc(ceph_dentry_cachep
, GFP_NOFS
| __GFP_ZERO
);
43 return -ENOMEM
; /* oh well */
45 spin_lock(&dentry
->d_lock
);
46 if (dentry
->d_fsdata
) {
48 kmem_cache_free(ceph_dentry_cachep
, di
);
52 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
)
53 d_set_d_op(dentry
, &ceph_dentry_ops
);
54 else if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_SNAPDIR
)
55 d_set_d_op(dentry
, &ceph_snapdir_dentry_ops
);
57 d_set_d_op(dentry
, &ceph_snap_dentry_ops
);
60 di
->lease_session
= NULL
;
61 dentry
->d_time
= jiffies
;
62 /* avoid reordering d_fsdata setup so that the check above is safe */
64 dentry
->d_fsdata
= di
;
65 ceph_dentry_lru_add(dentry
);
67 spin_unlock(&dentry
->d_lock
);
71 struct inode
*ceph_get_dentry_parent_inode(struct dentry
*dentry
)
73 struct inode
*inode
= NULL
;
78 spin_lock(&dentry
->d_lock
);
79 if (!IS_ROOT(dentry
)) {
80 inode
= dentry
->d_parent
->d_inode
;
83 spin_unlock(&dentry
->d_lock
);
89 * for readdir, we encode the directory frag and offset within that
92 static unsigned fpos_frag(loff_t p
)
96 static unsigned fpos_off(loff_t p
)
98 return p
& 0xffffffff;
101 static int fpos_cmp(loff_t l
, loff_t r
)
103 int v
= ceph_frag_compare(fpos_frag(l
), fpos_frag(r
));
106 return (int)(fpos_off(l
) - fpos_off(r
));
110 * When possible, we try to satisfy a readdir by peeking at the
111 * dcache. We make this work by carefully ordering dentries on
112 * d_child when we initially get results back from the MDS, and
113 * falling back to a "normal" sync readdir if any dentries in the dir
116 * Complete dir indicates that we have all dentries in the dir. It is
117 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
118 * the MDS if/when the directory is modified).
120 static int __dcache_readdir(struct file
*file
, struct dir_context
*ctx
,
123 struct ceph_file_info
*fi
= file
->private_data
;
124 struct dentry
*parent
= file
->f_path
.dentry
;
125 struct inode
*dir
= parent
->d_inode
;
127 struct dentry
*dentry
, *last
;
128 struct ceph_dentry_info
*di
;
131 /* claim ref on last dentry we returned */
135 dout("__dcache_readdir %p v%u at %llu (last %p)\n",
136 dir
, shared_gen
, ctx
->pos
, last
);
138 spin_lock(&parent
->d_lock
);
140 /* start at beginning? */
141 if (ctx
->pos
== 2 || last
== NULL
||
142 fpos_cmp(ctx
->pos
, ceph_dentry(last
)->offset
) < 0) {
143 if (list_empty(&parent
->d_subdirs
))
145 p
= parent
->d_subdirs
.prev
;
146 dout(" initial p %p/%p\n", p
->prev
, p
->next
);
148 p
= last
->d_child
.prev
;
152 dentry
= list_entry(p
, struct dentry
, d_child
);
153 di
= ceph_dentry(dentry
);
155 dout(" p %p/%p %s d_subdirs %p/%p\n", p
->prev
, p
->next
,
156 d_unhashed(dentry
) ? "!hashed" : "hashed",
157 parent
->d_subdirs
.prev
, parent
->d_subdirs
.next
);
158 if (p
== &parent
->d_subdirs
) {
159 fi
->flags
|= CEPH_F_ATEND
;
162 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
163 if (di
->lease_shared_gen
== shared_gen
&&
164 !d_unhashed(dentry
) && dentry
->d_inode
&&
165 ceph_snap(dentry
->d_inode
) != CEPH_SNAPDIR
&&
166 ceph_ino(dentry
->d_inode
) != CEPH_INO_CEPH
&&
167 fpos_cmp(ctx
->pos
, di
->offset
) <= 0)
169 dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry
,
171 ctx
->pos
, d_unhashed(dentry
) ? " unhashed" : "",
172 !dentry
->d_inode
? " null" : "");
173 spin_unlock(&dentry
->d_lock
);
175 dentry
= list_entry(p
, struct dentry
, d_child
);
176 di
= ceph_dentry(dentry
);
180 spin_unlock(&dentry
->d_lock
);
181 spin_unlock(&parent
->d_lock
);
183 /* make sure a dentry wasn't dropped while we didn't have parent lock */
184 if (!ceph_dir_is_complete_ordered(dir
)) {
185 dout(" lost dir complete on %p; falling back to mds\n", dir
);
191 dout(" %llu (%llu) dentry %p %pd %p\n", di
->offset
, ctx
->pos
,
192 dentry
, dentry
, dentry
->d_inode
);
193 if (!dir_emit(ctx
, dentry
->d_name
.name
,
195 ceph_translate_ino(dentry
->d_sb
, dentry
->d_inode
->i_ino
),
196 dentry
->d_inode
->i_mode
>> 12)) {
198 /* remember our position */
200 fi
->next_offset
= fpos_off(di
->offset
);
206 ctx
->pos
= di
->offset
+ 1;
212 spin_lock(&parent
->d_lock
);
213 p
= p
->prev
; /* advance to next dentry */
217 spin_unlock(&parent
->d_lock
);
225 * make note of the last dentry we read, so we can
226 * continue at the same lexicographical point,
227 * regardless of what dir changes take place on the
230 static int note_last_dentry(struct ceph_file_info
*fi
, const char *name
,
233 kfree(fi
->last_name
);
234 fi
->last_name
= kmalloc(len
+1, GFP_NOFS
);
237 memcpy(fi
->last_name
, name
, len
);
238 fi
->last_name
[len
] = 0;
239 dout("note_last_dentry '%s'\n", fi
->last_name
);
243 static int ceph_readdir(struct file
*file
, struct dir_context
*ctx
)
245 struct ceph_file_info
*fi
= file
->private_data
;
246 struct inode
*inode
= file_inode(file
);
247 struct ceph_inode_info
*ci
= ceph_inode(inode
);
248 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
249 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
250 unsigned frag
= fpos_frag(ctx
->pos
);
251 int off
= fpos_off(ctx
->pos
);
254 struct ceph_mds_reply_info_parsed
*rinfo
;
256 dout("readdir %p file %p frag %u off %u\n", inode
, file
, frag
, off
);
257 if (fi
->flags
& CEPH_F_ATEND
)
260 /* always start with . and .. */
262 dout("readdir off 0 -> '.'\n");
263 if (!dir_emit(ctx
, ".", 1,
264 ceph_translate_ino(inode
->i_sb
, inode
->i_ino
),
265 inode
->i_mode
>> 12))
271 ino_t ino
= parent_ino(file
->f_path
.dentry
);
272 dout("readdir off 1 -> '..'\n");
273 if (!dir_emit(ctx
, "..", 2,
274 ceph_translate_ino(inode
->i_sb
, ino
),
275 inode
->i_mode
>> 12))
281 /* can we use the dcache? */
282 spin_lock(&ci
->i_ceph_lock
);
283 if ((ctx
->pos
== 2 || fi
->dentry
) &&
284 ceph_test_mount_opt(fsc
, DCACHE
) &&
285 !ceph_test_mount_opt(fsc
, NOASYNCREADDIR
) &&
286 ceph_snap(inode
) != CEPH_SNAPDIR
&&
287 __ceph_dir_is_complete_ordered(ci
) &&
288 __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1)) {
289 u32 shared_gen
= ci
->i_shared_gen
;
290 spin_unlock(&ci
->i_ceph_lock
);
291 err
= __dcache_readdir(file
, ctx
, shared_gen
);
294 frag
= fpos_frag(ctx
->pos
);
295 off
= fpos_off(ctx
->pos
);
297 spin_unlock(&ci
->i_ceph_lock
);
300 err
= note_last_dentry(fi
, fi
->dentry
->d_name
.name
,
301 fi
->dentry
->d_name
.len
);
308 /* proceed with a normal readdir */
311 /* note dir version at start of readdir so we can tell
312 * if any dentries get dropped */
313 fi
->dir_release_count
= atomic_read(&ci
->i_release_count
);
314 fi
->dir_ordered_count
= ci
->i_ordered_count
;
318 /* do we have the correct frag content buffered? */
319 if (fi
->frag
!= frag
|| fi
->last_readdir
== NULL
) {
320 struct ceph_mds_request
*req
;
321 int op
= ceph_snap(inode
) == CEPH_SNAPDIR
?
322 CEPH_MDS_OP_LSSNAP
: CEPH_MDS_OP_READDIR
;
324 /* discard old result, if any */
325 if (fi
->last_readdir
) {
326 ceph_mdsc_put_request(fi
->last_readdir
);
327 fi
->last_readdir
= NULL
;
330 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
331 ceph_vinop(inode
), frag
, fi
->last_name
);
332 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
335 err
= ceph_alloc_readdir_reply_buffer(req
, inode
);
337 ceph_mdsc_put_request(req
);
340 req
->r_inode
= inode
;
342 req
->r_dentry
= dget(file
->f_path
.dentry
);
343 /* hints to request -> mds selection code */
344 req
->r_direct_mode
= USE_AUTH_MDS
;
345 req
->r_direct_hash
= ceph_frag_value(frag
);
346 req
->r_direct_is_hash
= true;
347 req
->r_path2
= kstrdup(fi
->last_name
, GFP_NOFS
);
348 req
->r_readdir_offset
= fi
->next_offset
;
349 req
->r_args
.readdir
.frag
= cpu_to_le32(frag
);
350 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
352 ceph_mdsc_put_request(req
);
355 dout("readdir got and parsed readdir result=%d"
356 " on frag %x, end=%d, complete=%d\n", err
, frag
,
357 (int)req
->r_reply_info
.dir_end
,
358 (int)req
->r_reply_info
.dir_complete
);
360 if (!req
->r_did_prepopulate
) {
361 dout("readdir !did_prepopulate");
362 /* preclude from marking dir complete */
363 fi
->dir_release_count
--;
366 /* note next offset and last dentry name */
367 rinfo
= &req
->r_reply_info
;
368 if (le32_to_cpu(rinfo
->dir_dir
->frag
) != frag
) {
369 frag
= le32_to_cpu(rinfo
->dir_dir
->frag
);
370 if (ceph_frag_is_leftmost(frag
))
374 off
= fi
->next_offset
;
377 fi
->offset
= fi
->next_offset
;
378 fi
->last_readdir
= req
;
380 if (req
->r_reply_info
.dir_end
) {
381 kfree(fi
->last_name
);
382 fi
->last_name
= NULL
;
383 if (ceph_frag_is_rightmost(frag
))
388 err
= note_last_dentry(fi
,
389 rinfo
->dir_dname
[rinfo
->dir_nr
-1],
390 rinfo
->dir_dname_len
[rinfo
->dir_nr
-1]);
393 fi
->next_offset
+= rinfo
->dir_nr
;
397 rinfo
= &fi
->last_readdir
->r_reply_info
;
398 dout("readdir frag %x num %d off %d chunkoff %d\n", frag
,
399 rinfo
->dir_nr
, off
, fi
->offset
);
401 ctx
->pos
= ceph_make_fpos(frag
, off
);
402 while (off
>= fi
->offset
&& off
- fi
->offset
< rinfo
->dir_nr
) {
403 struct ceph_mds_reply_inode
*in
=
404 rinfo
->dir_in
[off
- fi
->offset
].in
;
405 struct ceph_vino vino
;
408 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
409 off
, off
- fi
->offset
, rinfo
->dir_nr
, ctx
->pos
,
410 rinfo
->dir_dname_len
[off
- fi
->offset
],
411 rinfo
->dir_dname
[off
- fi
->offset
], in
);
413 ftype
= le32_to_cpu(in
->mode
) >> 12;
414 vino
.ino
= le64_to_cpu(in
->ino
);
415 vino
.snap
= le64_to_cpu(in
->snapid
);
416 ino
= ceph_vino_to_ino(vino
);
418 rinfo
->dir_dname
[off
- fi
->offset
],
419 rinfo
->dir_dname_len
[off
- fi
->offset
],
420 ceph_translate_ino(inode
->i_sb
, ino
), ftype
)) {
421 dout("filldir stopping us...\n");
429 ceph_mdsc_put_request(fi
->last_readdir
);
430 fi
->last_readdir
= NULL
;
435 if (!ceph_frag_is_rightmost(frag
)) {
436 frag
= ceph_frag_next(frag
);
438 ctx
->pos
= ceph_make_fpos(frag
, off
);
439 dout("readdir next frag is %x\n", frag
);
442 fi
->flags
|= CEPH_F_ATEND
;
445 * if dir_release_count still matches the dir, no dentries
446 * were released during the whole readdir, and we should have
447 * the complete dir contents in our cache.
449 spin_lock(&ci
->i_ceph_lock
);
450 if (atomic_read(&ci
->i_release_count
) == fi
->dir_release_count
) {
451 if (ci
->i_ordered_count
== fi
->dir_ordered_count
)
452 dout(" marking %p complete and ordered\n", inode
);
454 dout(" marking %p complete\n", inode
);
455 __ceph_dir_set_complete(ci
, fi
->dir_release_count
,
456 fi
->dir_ordered_count
);
458 spin_unlock(&ci
->i_ceph_lock
);
460 dout("readdir %p file %p done.\n", inode
, file
);
464 static void reset_readdir(struct ceph_file_info
*fi
, unsigned frag
)
466 if (fi
->last_readdir
) {
467 ceph_mdsc_put_request(fi
->last_readdir
);
468 fi
->last_readdir
= NULL
;
470 kfree(fi
->last_name
);
471 fi
->last_name
= NULL
;
472 if (ceph_frag_is_leftmost(frag
))
473 fi
->next_offset
= 2; /* compensate for . and .. */
480 fi
->flags
&= ~CEPH_F_ATEND
;
483 static loff_t
ceph_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
485 struct ceph_file_info
*fi
= file
->private_data
;
486 struct inode
*inode
= file
->f_mapping
->host
;
487 loff_t old_offset
= ceph_make_fpos(fi
->frag
, fi
->next_offset
);
490 mutex_lock(&inode
->i_mutex
);
494 offset
+= inode
->i_size
+ 2; /* FIXME */
497 offset
+= file
->f_pos
;
505 if (offset
!= file
->f_pos
) {
506 file
->f_pos
= offset
;
508 fi
->flags
&= ~CEPH_F_ATEND
;
513 * discard buffered readdir content on seekdir(0), or
514 * seek to new frag, or seek prior to current chunk.
517 fpos_frag(offset
) != fi
->frag
||
518 fpos_off(offset
) < fi
->offset
) {
519 dout("dir_llseek dropping %p content\n", file
);
520 reset_readdir(fi
, fpos_frag(offset
));
523 /* bump dir_release_count if we did a forward seek */
524 if (fpos_cmp(offset
, old_offset
) > 0)
525 fi
->dir_release_count
--;
528 mutex_unlock(&inode
->i_mutex
);
533 * Handle lookups for the hidden .snap directory.
535 int ceph_handle_snapdir(struct ceph_mds_request
*req
,
536 struct dentry
*dentry
, int err
)
538 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
539 struct inode
*parent
= dentry
->d_parent
->d_inode
; /* we hold i_mutex */
542 if (err
== -ENOENT
&&
543 ceph_snap(parent
) == CEPH_NOSNAP
&&
544 strcmp(dentry
->d_name
.name
,
545 fsc
->mount_options
->snapdir_name
) == 0) {
546 struct inode
*inode
= ceph_get_snapdir(parent
);
547 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
548 dentry
, dentry
, inode
);
549 BUG_ON(!d_unhashed(dentry
));
550 d_add(dentry
, inode
);
557 * Figure out final result of a lookup/open request.
559 * Mainly, make sure we return the final req->r_dentry (if it already
560 * existed) in place of the original VFS-provided dentry when they
563 * Gracefully handle the case where the MDS replies with -ENOENT and
564 * no trace (which it may do, at its discretion, e.g., if it doesn't
565 * care to issue a lease on the negative dentry).
567 struct dentry
*ceph_finish_lookup(struct ceph_mds_request
*req
,
568 struct dentry
*dentry
, int err
)
570 if (err
== -ENOENT
) {
573 if (!req
->r_reply_info
.head
->is_dentry
) {
574 dout("ENOENT and no trace, dentry %p inode %p\n",
575 dentry
, dentry
->d_inode
);
576 if (dentry
->d_inode
) {
585 dentry
= ERR_PTR(err
);
586 else if (dentry
!= req
->r_dentry
)
587 dentry
= dget(req
->r_dentry
); /* we got spliced */
593 static int is_root_ceph_dentry(struct inode
*inode
, struct dentry
*dentry
)
595 return ceph_ino(inode
) == CEPH_INO_ROOT
&&
596 strncmp(dentry
->d_name
.name
, ".ceph", 5) == 0;
600 * Look up a single dir entry. If there is a lookup intent, inform
601 * the MDS so that it gets our 'caps wanted' value in a single op.
603 static struct dentry
*ceph_lookup(struct inode
*dir
, struct dentry
*dentry
,
606 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
607 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
608 struct ceph_mds_request
*req
;
612 dout("lookup %p dentry %p '%pd'\n",
613 dir
, dentry
, dentry
);
615 if (dentry
->d_name
.len
> NAME_MAX
)
616 return ERR_PTR(-ENAMETOOLONG
);
618 err
= ceph_init_dentry(dentry
);
622 /* can we conclude ENOENT locally? */
623 if (dentry
->d_inode
== NULL
) {
624 struct ceph_inode_info
*ci
= ceph_inode(dir
);
625 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
627 spin_lock(&ci
->i_ceph_lock
);
628 dout(" dir %p flags are %d\n", dir
, ci
->i_ceph_flags
);
629 if (strncmp(dentry
->d_name
.name
,
630 fsc
->mount_options
->snapdir_name
,
631 dentry
->d_name
.len
) &&
632 !is_root_ceph_dentry(dir
, dentry
) &&
633 ceph_test_mount_opt(fsc
, DCACHE
) &&
634 __ceph_dir_is_complete(ci
) &&
635 (__ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1))) {
636 spin_unlock(&ci
->i_ceph_lock
);
637 dout(" dir %p complete, -ENOENT\n", dir
);
639 di
->lease_shared_gen
= ci
->i_shared_gen
;
642 spin_unlock(&ci
->i_ceph_lock
);
645 op
= ceph_snap(dir
) == CEPH_SNAPDIR
?
646 CEPH_MDS_OP_LOOKUPSNAP
: CEPH_MDS_OP_LOOKUP
;
647 req
= ceph_mdsc_create_request(mdsc
, op
, USE_ANY_MDS
);
649 return ERR_CAST(req
);
650 req
->r_dentry
= dget(dentry
);
652 /* we only need inode linkage */
653 req
->r_args
.getattr
.mask
= cpu_to_le32(CEPH_STAT_CAP_INODE
);
654 req
->r_locked_dir
= dir
;
655 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
656 err
= ceph_handle_snapdir(req
, dentry
, err
);
657 dentry
= ceph_finish_lookup(req
, dentry
, err
);
658 ceph_mdsc_put_request(req
); /* will dput(dentry) */
659 dout("lookup result=%p\n", dentry
);
664 * If we do a create but get no trace back from the MDS, follow up with
665 * a lookup (the VFS expects us to link up the provided dentry).
667 int ceph_handle_notrace_create(struct inode
*dir
, struct dentry
*dentry
)
669 struct dentry
*result
= ceph_lookup(dir
, dentry
, 0);
671 if (result
&& !IS_ERR(result
)) {
673 * We created the item, then did a lookup, and found
674 * it was already linked to another inode we already
675 * had in our cache (and thus got spliced). To not
676 * confuse VFS (especially when inode is a directory),
677 * we don't link our dentry to that inode, return an
680 * This event should be rare and it happens only when
681 * we talk to old MDS. Recent MDS does not send traceless
682 * reply for request that creates new inode.
687 return PTR_ERR(result
);
690 static int ceph_mknod(struct inode
*dir
, struct dentry
*dentry
,
691 umode_t mode
, dev_t rdev
)
693 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
694 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
695 struct ceph_mds_request
*req
;
696 struct ceph_acls_info acls
= {};
699 if (ceph_snap(dir
) != CEPH_NOSNAP
)
702 err
= ceph_pre_init_acls(dir
, &mode
, &acls
);
706 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
707 dir
, dentry
, mode
, rdev
);
708 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_MKNOD
, USE_AUTH_MDS
);
713 req
->r_dentry
= dget(dentry
);
715 req
->r_locked_dir
= dir
;
716 req
->r_args
.mknod
.mode
= cpu_to_le32(mode
);
717 req
->r_args
.mknod
.rdev
= cpu_to_le32(rdev
);
718 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
719 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
721 req
->r_pagelist
= acls
.pagelist
;
722 acls
.pagelist
= NULL
;
724 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
725 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
726 err
= ceph_handle_notrace_create(dir
, dentry
);
727 ceph_mdsc_put_request(req
);
730 ceph_init_inode_acls(dentry
->d_inode
, &acls
);
733 ceph_release_acls_info(&acls
);
737 static int ceph_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
740 return ceph_mknod(dir
, dentry
, mode
, 0);
743 static int ceph_symlink(struct inode
*dir
, struct dentry
*dentry
,
746 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
747 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
748 struct ceph_mds_request
*req
;
751 if (ceph_snap(dir
) != CEPH_NOSNAP
)
754 dout("symlink in dir %p dentry %p to '%s'\n", dir
, dentry
, dest
);
755 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SYMLINK
, USE_AUTH_MDS
);
760 req
->r_dentry
= dget(dentry
);
762 req
->r_path2
= kstrdup(dest
, GFP_NOFS
);
763 req
->r_locked_dir
= dir
;
764 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
765 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
766 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
767 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
768 err
= ceph_handle_notrace_create(dir
, dentry
);
769 ceph_mdsc_put_request(req
);
776 static int ceph_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
778 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
779 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
780 struct ceph_mds_request
*req
;
781 struct ceph_acls_info acls
= {};
785 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
786 /* mkdir .snap/foo is a MKSNAP */
787 op
= CEPH_MDS_OP_MKSNAP
;
788 dout("mksnap dir %p snap '%pd' dn %p\n", dir
,
790 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
791 dout("mkdir dir %p dn %p mode 0%ho\n", dir
, dentry
, mode
);
792 op
= CEPH_MDS_OP_MKDIR
;
798 err
= ceph_pre_init_acls(dir
, &mode
, &acls
);
802 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
808 req
->r_dentry
= dget(dentry
);
810 req
->r_locked_dir
= dir
;
811 req
->r_args
.mkdir
.mode
= cpu_to_le32(mode
);
812 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
813 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
815 req
->r_pagelist
= acls
.pagelist
;
816 acls
.pagelist
= NULL
;
818 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
820 !req
->r_reply_info
.head
->is_target
&&
821 !req
->r_reply_info
.head
->is_dentry
)
822 err
= ceph_handle_notrace_create(dir
, dentry
);
823 ceph_mdsc_put_request(req
);
826 ceph_init_inode_acls(dentry
->d_inode
, &acls
);
829 ceph_release_acls_info(&acls
);
833 static int ceph_link(struct dentry
*old_dentry
, struct inode
*dir
,
834 struct dentry
*dentry
)
836 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
837 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
838 struct ceph_mds_request
*req
;
841 if (ceph_snap(dir
) != CEPH_NOSNAP
)
844 dout("link in dir %p old_dentry %p dentry %p\n", dir
,
846 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_LINK
, USE_AUTH_MDS
);
851 req
->r_dentry
= dget(dentry
);
853 req
->r_old_dentry
= dget(old_dentry
);
854 req
->r_locked_dir
= dir
;
855 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
856 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
857 /* release LINK_SHARED on source inode (mds will lock it) */
858 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
859 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
862 } else if (!req
->r_reply_info
.head
->is_dentry
) {
863 ihold(old_dentry
->d_inode
);
864 d_instantiate(dentry
, old_dentry
->d_inode
);
866 ceph_mdsc_put_request(req
);
871 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
872 * looks like the link count will hit 0, drop any other caps (other
873 * than PIN) we don't specifically want (due to the file still being
876 static int drop_caps_for_unlink(struct inode
*inode
)
878 struct ceph_inode_info
*ci
= ceph_inode(inode
);
879 int drop
= CEPH_CAP_LINK_SHARED
| CEPH_CAP_LINK_EXCL
;
881 spin_lock(&ci
->i_ceph_lock
);
882 if (inode
->i_nlink
== 1) {
883 drop
|= ~(__ceph_caps_wanted(ci
) | CEPH_CAP_PIN
);
884 ci
->i_ceph_flags
|= CEPH_I_NODELAY
;
886 spin_unlock(&ci
->i_ceph_lock
);
891 * rmdir and unlink are differ only by the metadata op code
893 static int ceph_unlink(struct inode
*dir
, struct dentry
*dentry
)
895 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
896 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
897 struct inode
*inode
= dentry
->d_inode
;
898 struct ceph_mds_request
*req
;
902 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
903 /* rmdir .snap/foo is RMSNAP */
904 dout("rmsnap dir %p '%pd' dn %p\n", dir
, dentry
, dentry
);
905 op
= CEPH_MDS_OP_RMSNAP
;
906 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
907 dout("unlink/rmdir dir %p dn %p inode %p\n",
909 op
= d_is_dir(dentry
) ?
910 CEPH_MDS_OP_RMDIR
: CEPH_MDS_OP_UNLINK
;
913 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
918 req
->r_dentry
= dget(dentry
);
920 req
->r_locked_dir
= dir
;
921 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
922 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
923 req
->r_inode_drop
= drop_caps_for_unlink(inode
);
924 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
925 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
927 ceph_mdsc_put_request(req
);
932 static int ceph_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
933 struct inode
*new_dir
, struct dentry
*new_dentry
)
935 struct ceph_fs_client
*fsc
= ceph_sb_to_client(old_dir
->i_sb
);
936 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
937 struct ceph_mds_request
*req
;
940 if (ceph_snap(old_dir
) != ceph_snap(new_dir
))
942 if (ceph_snap(old_dir
) != CEPH_NOSNAP
||
943 ceph_snap(new_dir
) != CEPH_NOSNAP
)
945 dout("rename dir %p dentry %p to dir %p dentry %p\n",
946 old_dir
, old_dentry
, new_dir
, new_dentry
);
947 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RENAME
, USE_AUTH_MDS
);
951 req
->r_dentry
= dget(new_dentry
);
953 req
->r_old_dentry
= dget(old_dentry
);
954 req
->r_old_dentry_dir
= old_dir
;
955 req
->r_locked_dir
= new_dir
;
956 req
->r_old_dentry_drop
= CEPH_CAP_FILE_SHARED
;
957 req
->r_old_dentry_unless
= CEPH_CAP_FILE_EXCL
;
958 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
959 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
960 /* release LINK_RDCACHE on source inode (mds will lock it) */
961 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
962 if (new_dentry
->d_inode
)
963 req
->r_inode_drop
= drop_caps_for_unlink(new_dentry
->d_inode
);
964 err
= ceph_mdsc_do_request(mdsc
, old_dir
, req
);
965 if (!err
&& !req
->r_reply_info
.head
->is_dentry
) {
967 * Normally d_move() is done by fill_trace (called by
968 * do_request, above). If there is no trace, we need
972 d_move(old_dentry
, new_dentry
);
974 /* ensure target dentry is invalidated, despite
975 rehashing bug in vfs_rename_dir */
976 ceph_invalidate_dentry_lease(new_dentry
);
978 /* d_move screws up sibling dentries' offsets */
979 ceph_dir_clear_complete(old_dir
);
980 ceph_dir_clear_complete(new_dir
);
983 ceph_mdsc_put_request(req
);
988 * Ensure a dentry lease will no longer revalidate.
990 void ceph_invalidate_dentry_lease(struct dentry
*dentry
)
992 spin_lock(&dentry
->d_lock
);
993 dentry
->d_time
= jiffies
;
994 ceph_dentry(dentry
)->lease_shared_gen
= 0;
995 spin_unlock(&dentry
->d_lock
);
999 * Check if dentry lease is valid. If not, delete the lease. Try to
1000 * renew if the least is more than half up.
1002 static int dentry_lease_is_valid(struct dentry
*dentry
)
1004 struct ceph_dentry_info
*di
;
1005 struct ceph_mds_session
*s
;
1009 struct ceph_mds_session
*session
= NULL
;
1010 struct inode
*dir
= NULL
;
1013 spin_lock(&dentry
->d_lock
);
1014 di
= ceph_dentry(dentry
);
1015 if (di
->lease_session
) {
1016 s
= di
->lease_session
;
1017 spin_lock(&s
->s_gen_ttl_lock
);
1020 spin_unlock(&s
->s_gen_ttl_lock
);
1022 if (di
->lease_gen
== gen
&&
1023 time_before(jiffies
, dentry
->d_time
) &&
1024 time_before(jiffies
, ttl
)) {
1026 if (di
->lease_renew_after
&&
1027 time_after(jiffies
, di
->lease_renew_after
)) {
1028 /* we should renew */
1029 dir
= dentry
->d_parent
->d_inode
;
1030 session
= ceph_get_mds_session(s
);
1031 seq
= di
->lease_seq
;
1032 di
->lease_renew_after
= 0;
1033 di
->lease_renew_from
= jiffies
;
1037 spin_unlock(&dentry
->d_lock
);
1040 ceph_mdsc_lease_send_msg(session
, dir
, dentry
,
1041 CEPH_MDS_LEASE_RENEW
, seq
);
1042 ceph_put_mds_session(session
);
1044 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry
, valid
);
1049 * Check if directory-wide content lease/cap is valid.
1051 static int dir_lease_is_valid(struct inode
*dir
, struct dentry
*dentry
)
1053 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1054 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1057 spin_lock(&ci
->i_ceph_lock
);
1058 if (ci
->i_shared_gen
== di
->lease_shared_gen
)
1059 valid
= __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1);
1060 spin_unlock(&ci
->i_ceph_lock
);
1061 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1062 dir
, (unsigned)ci
->i_shared_gen
, dentry
,
1063 (unsigned)di
->lease_shared_gen
, valid
);
1068 * Check if cached dentry can be trusted.
1070 static int ceph_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
1075 if (flags
& LOOKUP_RCU
)
1078 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry
,
1079 dentry
, dentry
->d_inode
, ceph_dentry(dentry
)->offset
);
1081 dir
= ceph_get_dentry_parent_inode(dentry
);
1083 /* always trust cached snapped dentries, snapdir dentry */
1084 if (ceph_snap(dir
) != CEPH_NOSNAP
) {
1085 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry
,
1086 dentry
, dentry
->d_inode
);
1088 } else if (dentry
->d_inode
&&
1089 ceph_snap(dentry
->d_inode
) == CEPH_SNAPDIR
) {
1091 } else if (dentry_lease_is_valid(dentry
) ||
1092 dir_lease_is_valid(dir
, dentry
)) {
1093 if (dentry
->d_inode
)
1094 valid
= ceph_is_any_caps(dentry
->d_inode
);
1099 dout("d_revalidate %p %s\n", dentry
, valid
? "valid" : "invalid");
1101 ceph_dentry_lru_touch(dentry
);
1103 ceph_dir_clear_complete(dir
);
1110 * Release our ceph_dentry_info.
1112 static void ceph_d_release(struct dentry
*dentry
)
1114 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1116 dout("d_release %p\n", dentry
);
1117 ceph_dentry_lru_del(dentry
);
1118 if (di
->lease_session
)
1119 ceph_put_mds_session(di
->lease_session
);
1120 kmem_cache_free(ceph_dentry_cachep
, di
);
1121 dentry
->d_fsdata
= NULL
;
1124 static int ceph_snapdir_d_revalidate(struct dentry
*dentry
,
1128 * Eventually, we'll want to revalidate snapped metadata
1129 * too... probably...
1135 * When the VFS prunes a dentry from the cache, we need to clear the
1136 * complete flag on the parent directory.
1138 * Called under dentry->d_lock.
1140 static void ceph_d_prune(struct dentry
*dentry
)
1142 dout("ceph_d_prune %p\n", dentry
);
1144 /* do we have a valid parent? */
1145 if (IS_ROOT(dentry
))
1148 /* if we are not hashed, we don't affect dir's completeness */
1149 if (d_unhashed(dentry
))
1153 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1154 * cleared until d_release
1156 ceph_dir_clear_complete(dentry
->d_parent
->d_inode
);
1160 * read() on a dir. This weird interface hack only works if mounted
1161 * with '-o dirstat'.
1163 static ssize_t
ceph_read_dir(struct file
*file
, char __user
*buf
, size_t size
,
1166 struct ceph_file_info
*cf
= file
->private_data
;
1167 struct inode
*inode
= file_inode(file
);
1168 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1170 const int bufsize
= 1024;
1172 if (!ceph_test_mount_opt(ceph_sb_to_client(inode
->i_sb
), DIRSTAT
))
1175 if (!cf
->dir_info
) {
1176 cf
->dir_info
= kmalloc(bufsize
, GFP_NOFS
);
1180 snprintf(cf
->dir_info
, bufsize
,
1183 " subdirs: %20lld\n"
1184 "rentries: %20lld\n"
1186 " rsubdirs: %20lld\n"
1188 "rctime: %10ld.%09ld\n",
1189 ci
->i_files
+ ci
->i_subdirs
,
1192 ci
->i_rfiles
+ ci
->i_rsubdirs
,
1196 (long)ci
->i_rctime
.tv_sec
,
1197 (long)ci
->i_rctime
.tv_nsec
);
1200 if (*ppos
>= cf
->dir_info_len
)
1202 size
= min_t(unsigned, size
, cf
->dir_info_len
-*ppos
);
1203 left
= copy_to_user(buf
, cf
->dir_info
+ *ppos
, size
);
1206 *ppos
+= (size
- left
);
1211 * an fsync() on a dir will wait for any uncommitted directory
1212 * operations to commit.
1214 static int ceph_dir_fsync(struct file
*file
, loff_t start
, loff_t end
,
1217 struct inode
*inode
= file_inode(file
);
1218 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1219 struct list_head
*head
= &ci
->i_unsafe_dirops
;
1220 struct ceph_mds_request
*req
;
1224 dout("dir_fsync %p\n", inode
);
1225 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1228 mutex_lock(&inode
->i_mutex
);
1230 spin_lock(&ci
->i_unsafe_lock
);
1231 if (list_empty(head
))
1234 req
= list_entry(head
->prev
,
1235 struct ceph_mds_request
, r_unsafe_dir_item
);
1236 last_tid
= req
->r_tid
;
1239 ceph_mdsc_get_request(req
);
1240 spin_unlock(&ci
->i_unsafe_lock
);
1242 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1243 inode
, req
->r_tid
, last_tid
);
1244 if (req
->r_timeout
) {
1245 unsigned long time_left
= wait_for_completion_timeout(
1246 &req
->r_safe_completion
,
1251 ret
= -EIO
; /* timed out */
1253 wait_for_completion(&req
->r_safe_completion
);
1255 ceph_mdsc_put_request(req
);
1257 spin_lock(&ci
->i_unsafe_lock
);
1258 if (ret
|| list_empty(head
))
1260 req
= list_entry(head
->next
,
1261 struct ceph_mds_request
, r_unsafe_dir_item
);
1262 } while (req
->r_tid
< last_tid
);
1264 spin_unlock(&ci
->i_unsafe_lock
);
1265 mutex_unlock(&inode
->i_mutex
);
1271 * We maintain a private dentry LRU.
1273 * FIXME: this needs to be changed to a per-mds lru to be useful.
1275 void ceph_dentry_lru_add(struct dentry
*dn
)
1277 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1278 struct ceph_mds_client
*mdsc
;
1280 dout("dentry_lru_add %p %p '%pd'\n", di
, dn
, dn
);
1281 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1282 spin_lock(&mdsc
->dentry_lru_lock
);
1283 list_add_tail(&di
->lru
, &mdsc
->dentry_lru
);
1285 spin_unlock(&mdsc
->dentry_lru_lock
);
1288 void ceph_dentry_lru_touch(struct dentry
*dn
)
1290 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1291 struct ceph_mds_client
*mdsc
;
1293 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di
, dn
, dn
,
1295 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1296 spin_lock(&mdsc
->dentry_lru_lock
);
1297 list_move_tail(&di
->lru
, &mdsc
->dentry_lru
);
1298 spin_unlock(&mdsc
->dentry_lru_lock
);
1301 void ceph_dentry_lru_del(struct dentry
*dn
)
1303 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1304 struct ceph_mds_client
*mdsc
;
1306 dout("dentry_lru_del %p %p '%pd'\n", di
, dn
, dn
);
1307 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1308 spin_lock(&mdsc
->dentry_lru_lock
);
1309 list_del_init(&di
->lru
);
1311 spin_unlock(&mdsc
->dentry_lru_lock
);
1315 * Return name hash for a given dentry. This is dependent on
1316 * the parent directory's hash function.
1318 unsigned ceph_dentry_hash(struct inode
*dir
, struct dentry
*dn
)
1320 struct ceph_inode_info
*dci
= ceph_inode(dir
);
1322 switch (dci
->i_dir_layout
.dl_dir_hash
) {
1323 case 0: /* for backward compat */
1324 case CEPH_STR_HASH_LINUX
:
1325 return dn
->d_name
.hash
;
1328 return ceph_str_hash(dci
->i_dir_layout
.dl_dir_hash
,
1329 dn
->d_name
.name
, dn
->d_name
.len
);
1333 const struct file_operations ceph_dir_fops
= {
1334 .read
= ceph_read_dir
,
1335 .iterate
= ceph_readdir
,
1336 .llseek
= ceph_dir_llseek
,
1338 .release
= ceph_release
,
1339 .unlocked_ioctl
= ceph_ioctl
,
1340 .fsync
= ceph_dir_fsync
,
1343 const struct file_operations ceph_snapdir_fops
= {
1344 .iterate
= ceph_readdir
,
1345 .llseek
= ceph_dir_llseek
,
1347 .release
= ceph_release
,
1350 const struct inode_operations ceph_dir_iops
= {
1351 .lookup
= ceph_lookup
,
1352 .permission
= ceph_permission
,
1353 .getattr
= ceph_getattr
,
1354 .setattr
= ceph_setattr
,
1355 .setxattr
= ceph_setxattr
,
1356 .getxattr
= ceph_getxattr
,
1357 .listxattr
= ceph_listxattr
,
1358 .removexattr
= ceph_removexattr
,
1359 .get_acl
= ceph_get_acl
,
1360 .set_acl
= ceph_set_acl
,
1361 .mknod
= ceph_mknod
,
1362 .symlink
= ceph_symlink
,
1363 .mkdir
= ceph_mkdir
,
1365 .unlink
= ceph_unlink
,
1366 .rmdir
= ceph_unlink
,
1367 .rename
= ceph_rename
,
1368 .create
= ceph_create
,
1369 .atomic_open
= ceph_atomic_open
,
1372 const struct inode_operations ceph_snapdir_iops
= {
1373 .lookup
= ceph_lookup
,
1374 .permission
= ceph_permission
,
1375 .getattr
= ceph_getattr
,
1376 .mkdir
= ceph_mkdir
,
1377 .rmdir
= ceph_unlink
,
1380 const struct dentry_operations ceph_dentry_ops
= {
1381 .d_revalidate
= ceph_d_revalidate
,
1382 .d_release
= ceph_d_release
,
1383 .d_prune
= ceph_d_prune
,
1386 const struct dentry_operations ceph_snapdir_dentry_ops
= {
1387 .d_revalidate
= ceph_snapdir_d_revalidate
,
1388 .d_release
= ceph_d_release
,
1391 const struct dentry_operations ceph_snap_dentry_ops
= {
1392 .d_release
= ceph_d_release
,
1393 .d_prune
= ceph_d_prune
,