1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
10 #include "mds_client.h"
13 * Directory operations: readdir, lookup, create, link, unlink,
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
29 const struct inode_operations ceph_dir_iops
;
30 const struct file_operations ceph_dir_fops
;
31 const struct dentry_operations ceph_dentry_ops
;
34 * Initialize ceph dentry state.
36 int ceph_init_dentry(struct dentry
*dentry
)
38 struct ceph_dentry_info
*di
;
43 di
= kmem_cache_alloc(ceph_dentry_cachep
, GFP_NOFS
| __GFP_ZERO
);
45 return -ENOMEM
; /* oh well */
47 spin_lock(&dentry
->d_lock
);
48 if (dentry
->d_fsdata
) {
50 kmem_cache_free(ceph_dentry_cachep
, di
);
54 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
)
55 d_set_d_op(dentry
, &ceph_dentry_ops
);
56 else if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_SNAPDIR
)
57 d_set_d_op(dentry
, &ceph_snapdir_dentry_ops
);
59 d_set_d_op(dentry
, &ceph_snap_dentry_ops
);
62 di
->lease_session
= NULL
;
63 dentry
->d_time
= jiffies
;
64 /* avoid reordering d_fsdata setup so that the check above is safe */
66 dentry
->d_fsdata
= di
;
67 ceph_dentry_lru_add(dentry
);
69 spin_unlock(&dentry
->d_lock
);
73 struct inode
*ceph_get_dentry_parent_inode(struct dentry
*dentry
)
75 struct inode
*inode
= NULL
;
80 spin_lock(&dentry
->d_lock
);
81 if (!IS_ROOT(dentry
)) {
82 inode
= dentry
->d_parent
->d_inode
;
85 spin_unlock(&dentry
->d_lock
);
91 * for readdir, we encode the directory frag and offset within that
94 static unsigned fpos_frag(loff_t p
)
98 static unsigned fpos_off(loff_t p
)
100 return p
& 0xffffffff;
104 * When possible, we try to satisfy a readdir by peeking at the
105 * dcache. We make this work by carefully ordering dentries on
106 * d_u.d_child when we initially get results back from the MDS, and
107 * falling back to a "normal" sync readdir if any dentries in the dir
110 * Complete dir indicates that we have all dentries in the dir. It is
111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
112 * the MDS if/when the directory is modified).
114 static int __dcache_readdir(struct file
*file
, struct dir_context
*ctx
)
116 struct ceph_file_info
*fi
= file
->private_data
;
117 struct dentry
*parent
= file
->f_dentry
;
118 struct inode
*dir
= parent
->d_inode
;
120 struct dentry
*dentry
, *last
;
121 struct ceph_dentry_info
*di
;
124 /* claim ref on last dentry we returned */
128 dout("__dcache_readdir %p at %llu (last %p)\n", dir
, ctx
->pos
,
131 spin_lock(&parent
->d_lock
);
133 /* start at beginning? */
134 if (ctx
->pos
== 2 || last
== NULL
||
135 ctx
->pos
< ceph_dentry(last
)->offset
) {
136 if (list_empty(&parent
->d_subdirs
))
138 p
= parent
->d_subdirs
.prev
;
139 dout(" initial p %p/%p\n", p
->prev
, p
->next
);
141 p
= last
->d_u
.d_child
.prev
;
145 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
146 di
= ceph_dentry(dentry
);
148 dout(" p %p/%p %s d_subdirs %p/%p\n", p
->prev
, p
->next
,
149 d_unhashed(dentry
) ? "!hashed" : "hashed",
150 parent
->d_subdirs
.prev
, parent
->d_subdirs
.next
);
151 if (p
== &parent
->d_subdirs
) {
152 fi
->flags
|= CEPH_F_ATEND
;
155 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
156 if (!d_unhashed(dentry
) && dentry
->d_inode
&&
157 ceph_snap(dentry
->d_inode
) != CEPH_SNAPDIR
&&
158 ceph_ino(dentry
->d_inode
) != CEPH_INO_CEPH
&&
159 ctx
->pos
<= di
->offset
)
161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry
,
162 dentry
->d_name
.len
, dentry
->d_name
.name
, di
->offset
,
163 ctx
->pos
, d_unhashed(dentry
) ? " unhashed" : "",
164 !dentry
->d_inode
? " null" : "");
165 spin_unlock(&dentry
->d_lock
);
167 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
168 di
= ceph_dentry(dentry
);
172 spin_unlock(&dentry
->d_lock
);
173 spin_unlock(&parent
->d_lock
);
175 dout(" %llu (%llu) dentry %p %.*s %p\n", di
->offset
, ctx
->pos
,
176 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
177 ctx
->pos
= di
->offset
;
178 if (!dir_emit(ctx
, dentry
->d_name
.name
,
180 ceph_translate_ino(dentry
->d_sb
, dentry
->d_inode
->i_ino
),
181 dentry
->d_inode
->i_mode
>> 12)) {
183 /* remember our position */
185 fi
->next_offset
= di
->offset
;
197 /* make sure a dentry wasn't dropped while we didn't have parent lock */
198 if (!ceph_dir_is_complete(dir
)) {
199 dout(" lost dir complete on %p; falling back to mds\n", dir
);
204 spin_lock(&parent
->d_lock
);
205 p
= p
->prev
; /* advance to next dentry */
209 spin_unlock(&parent
->d_lock
);
217 * make note of the last dentry we read, so we can
218 * continue at the same lexicographical point,
219 * regardless of what dir changes take place on the
222 static int note_last_dentry(struct ceph_file_info
*fi
, const char *name
,
225 kfree(fi
->last_name
);
226 fi
->last_name
= kmalloc(len
+1, GFP_NOFS
);
229 memcpy(fi
->last_name
, name
, len
);
230 fi
->last_name
[len
] = 0;
231 dout("note_last_dentry '%s'\n", fi
->last_name
);
235 static int ceph_readdir(struct file
*file
, struct dir_context
*ctx
)
237 struct ceph_file_info
*fi
= file
->private_data
;
238 struct inode
*inode
= file_inode(file
);
239 struct ceph_inode_info
*ci
= ceph_inode(inode
);
240 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
241 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
242 unsigned frag
= fpos_frag(ctx
->pos
);
243 int off
= fpos_off(ctx
->pos
);
246 struct ceph_mds_reply_info_parsed
*rinfo
;
247 const int max_entries
= fsc
->mount_options
->max_readdir
;
248 const int max_bytes
= fsc
->mount_options
->max_readdir_bytes
;
250 dout("readdir %p file %p frag %u off %u\n", inode
, file
, frag
, off
);
251 if (fi
->flags
& CEPH_F_ATEND
)
254 /* always start with . and .. */
256 /* note dir version at start of readdir so we can tell
257 * if any dentries get dropped */
258 fi
->dir_release_count
= atomic_read(&ci
->i_release_count
);
260 dout("readdir off 0 -> '.'\n");
261 if (!dir_emit(ctx
, ".", 1,
262 ceph_translate_ino(inode
->i_sb
, inode
->i_ino
),
263 inode
->i_mode
>> 12))
269 ino_t ino
= parent_ino(file
->f_dentry
);
270 dout("readdir off 1 -> '..'\n");
271 if (!dir_emit(ctx
, "..", 2,
272 ceph_translate_ino(inode
->i_sb
, ino
),
273 inode
->i_mode
>> 12))
279 /* can we use the dcache? */
280 spin_lock(&ci
->i_ceph_lock
);
281 if ((ctx
->pos
== 2 || fi
->dentry
) &&
282 !ceph_test_mount_opt(fsc
, NOASYNCREADDIR
) &&
283 ceph_snap(inode
) != CEPH_SNAPDIR
&&
284 __ceph_dir_is_complete(ci
) &&
285 __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1)) {
286 spin_unlock(&ci
->i_ceph_lock
);
287 err
= __dcache_readdir(file
, ctx
);
291 spin_unlock(&ci
->i_ceph_lock
);
294 err
= note_last_dentry(fi
, fi
->dentry
->d_name
.name
,
295 fi
->dentry
->d_name
.len
);
302 /* proceed with a normal readdir */
305 /* do we have the correct frag content buffered? */
306 if (fi
->frag
!= frag
|| fi
->last_readdir
== NULL
) {
307 struct ceph_mds_request
*req
;
308 int op
= ceph_snap(inode
) == CEPH_SNAPDIR
?
309 CEPH_MDS_OP_LSSNAP
: CEPH_MDS_OP_READDIR
;
311 /* discard old result, if any */
312 if (fi
->last_readdir
) {
313 ceph_mdsc_put_request(fi
->last_readdir
);
314 fi
->last_readdir
= NULL
;
317 /* requery frag tree, as the frag topology may have changed */
318 frag
= ceph_choose_frag(ceph_inode(inode
), frag
, NULL
, NULL
);
320 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
321 ceph_vinop(inode
), frag
, fi
->last_name
);
322 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
325 req
->r_inode
= inode
;
327 req
->r_dentry
= dget(file
->f_dentry
);
328 /* hints to request -> mds selection code */
329 req
->r_direct_mode
= USE_AUTH_MDS
;
330 req
->r_direct_hash
= ceph_frag_value(frag
);
331 req
->r_direct_is_hash
= true;
332 req
->r_path2
= kstrdup(fi
->last_name
, GFP_NOFS
);
333 req
->r_readdir_offset
= fi
->next_offset
;
334 req
->r_args
.readdir
.frag
= cpu_to_le32(frag
);
335 req
->r_args
.readdir
.max_entries
= cpu_to_le32(max_entries
);
336 req
->r_args
.readdir
.max_bytes
= cpu_to_le32(max_bytes
);
337 req
->r_num_caps
= max_entries
+ 1;
338 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
340 ceph_mdsc_put_request(req
);
343 dout("readdir got and parsed readdir result=%d"
344 " on frag %x, end=%d, complete=%d\n", err
, frag
,
345 (int)req
->r_reply_info
.dir_end
,
346 (int)req
->r_reply_info
.dir_complete
);
348 if (!req
->r_did_prepopulate
) {
349 dout("readdir !did_prepopulate");
350 /* preclude from marking dir complete */
351 fi
->dir_release_count
--;
354 /* note next offset and last dentry name */
355 rinfo
= &req
->r_reply_info
;
356 if (le32_to_cpu(rinfo
->dir_dir
->frag
) != frag
) {
357 frag
= le32_to_cpu(rinfo
->dir_dir
->frag
);
358 if (ceph_frag_is_leftmost(frag
))
362 off
= fi
->next_offset
;
364 fi
->offset
= fi
->next_offset
;
365 fi
->last_readdir
= req
;
368 if (req
->r_reply_info
.dir_end
) {
369 kfree(fi
->last_name
);
370 fi
->last_name
= NULL
;
371 if (ceph_frag_is_rightmost(frag
))
376 err
= note_last_dentry(fi
,
377 rinfo
->dir_dname
[rinfo
->dir_nr
-1],
378 rinfo
->dir_dname_len
[rinfo
->dir_nr
-1]);
381 fi
->next_offset
+= rinfo
->dir_nr
;
385 rinfo
= &fi
->last_readdir
->r_reply_info
;
386 dout("readdir frag %x num %d off %d chunkoff %d\n", frag
,
387 rinfo
->dir_nr
, off
, fi
->offset
);
389 ctx
->pos
= ceph_make_fpos(frag
, off
);
390 while (off
>= fi
->offset
&& off
- fi
->offset
< rinfo
->dir_nr
) {
391 struct ceph_mds_reply_inode
*in
=
392 rinfo
->dir_in
[off
- fi
->offset
].in
;
393 struct ceph_vino vino
;
396 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
397 off
, off
- fi
->offset
, rinfo
->dir_nr
, ctx
->pos
,
398 rinfo
->dir_dname_len
[off
- fi
->offset
],
399 rinfo
->dir_dname
[off
- fi
->offset
], in
);
401 ftype
= le32_to_cpu(in
->mode
) >> 12;
402 vino
.ino
= le64_to_cpu(in
->ino
);
403 vino
.snap
= le64_to_cpu(in
->snapid
);
404 ino
= ceph_vino_to_ino(vino
);
406 rinfo
->dir_dname
[off
- fi
->offset
],
407 rinfo
->dir_dname_len
[off
- fi
->offset
],
408 ceph_translate_ino(inode
->i_sb
, ino
), ftype
)) {
409 dout("filldir stopping us...\n");
417 ceph_mdsc_put_request(fi
->last_readdir
);
418 fi
->last_readdir
= NULL
;
423 if (!ceph_frag_is_rightmost(frag
)) {
424 frag
= ceph_frag_next(frag
);
426 ctx
->pos
= ceph_make_fpos(frag
, off
);
427 dout("readdir next frag is %x\n", frag
);
430 fi
->flags
|= CEPH_F_ATEND
;
433 * if dir_release_count still matches the dir, no dentries
434 * were released during the whole readdir, and we should have
435 * the complete dir contents in our cache.
437 spin_lock(&ci
->i_ceph_lock
);
438 if (atomic_read(&ci
->i_release_count
) == fi
->dir_release_count
) {
439 dout(" marking %p complete\n", inode
);
440 __ceph_dir_set_complete(ci
, fi
->dir_release_count
);
441 ci
->i_max_offset
= ctx
->pos
;
443 spin_unlock(&ci
->i_ceph_lock
);
445 dout("readdir %p file %p done.\n", inode
, file
);
449 static void reset_readdir(struct ceph_file_info
*fi
)
451 if (fi
->last_readdir
) {
452 ceph_mdsc_put_request(fi
->last_readdir
);
453 fi
->last_readdir
= NULL
;
455 kfree(fi
->last_name
);
456 fi
->last_name
= NULL
;
457 fi
->next_offset
= 2; /* compensate for . and .. */
462 fi
->flags
&= ~CEPH_F_ATEND
;
465 static loff_t
ceph_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
467 struct ceph_file_info
*fi
= file
->private_data
;
468 struct inode
*inode
= file
->f_mapping
->host
;
469 loff_t old_offset
= offset
;
472 mutex_lock(&inode
->i_mutex
);
476 offset
+= inode
->i_size
+ 2; /* FIXME */
479 offset
+= file
->f_pos
;
486 if (offset
>= 0 && offset
<= inode
->i_sb
->s_maxbytes
) {
487 if (offset
!= file
->f_pos
) {
488 file
->f_pos
= offset
;
490 fi
->flags
&= ~CEPH_F_ATEND
;
495 * discard buffered readdir content on seekdir(0), or
496 * seek to new frag, or seek prior to current chunk.
499 fpos_frag(offset
) != fpos_frag(old_offset
) ||
500 fpos_off(offset
) < fi
->offset
) {
501 dout("dir_llseek dropping %p content\n", file
);
505 /* bump dir_release_count if we did a forward seek */
506 if (offset
> old_offset
)
507 fi
->dir_release_count
--;
510 mutex_unlock(&inode
->i_mutex
);
515 * Handle lookups for the hidden .snap directory.
517 int ceph_handle_snapdir(struct ceph_mds_request
*req
,
518 struct dentry
*dentry
, int err
)
520 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
521 struct inode
*parent
= dentry
->d_parent
->d_inode
; /* we hold i_mutex */
524 if (err
== -ENOENT
&&
525 ceph_snap(parent
) == CEPH_NOSNAP
&&
526 strcmp(dentry
->d_name
.name
,
527 fsc
->mount_options
->snapdir_name
) == 0) {
528 struct inode
*inode
= ceph_get_snapdir(parent
);
529 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
530 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, inode
);
531 BUG_ON(!d_unhashed(dentry
));
532 d_add(dentry
, inode
);
539 * Figure out final result of a lookup/open request.
541 * Mainly, make sure we return the final req->r_dentry (if it already
542 * existed) in place of the original VFS-provided dentry when they
545 * Gracefully handle the case where the MDS replies with -ENOENT and
546 * no trace (which it may do, at its discretion, e.g., if it doesn't
547 * care to issue a lease on the negative dentry).
549 struct dentry
*ceph_finish_lookup(struct ceph_mds_request
*req
,
550 struct dentry
*dentry
, int err
)
552 if (err
== -ENOENT
) {
555 if (!req
->r_reply_info
.head
->is_dentry
) {
556 dout("ENOENT and no trace, dentry %p inode %p\n",
557 dentry
, dentry
->d_inode
);
558 if (dentry
->d_inode
) {
567 dentry
= ERR_PTR(err
);
568 else if (dentry
!= req
->r_dentry
)
569 dentry
= dget(req
->r_dentry
); /* we got spliced */
575 static int is_root_ceph_dentry(struct inode
*inode
, struct dentry
*dentry
)
577 return ceph_ino(inode
) == CEPH_INO_ROOT
&&
578 strncmp(dentry
->d_name
.name
, ".ceph", 5) == 0;
582 * Look up a single dir entry. If there is a lookup intent, inform
583 * the MDS so that it gets our 'caps wanted' value in a single op.
585 static struct dentry
*ceph_lookup(struct inode
*dir
, struct dentry
*dentry
,
588 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
589 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
590 struct ceph_mds_request
*req
;
594 dout("lookup %p dentry %p '%.*s'\n",
595 dir
, dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
);
597 if (dentry
->d_name
.len
> NAME_MAX
)
598 return ERR_PTR(-ENAMETOOLONG
);
600 err
= ceph_init_dentry(dentry
);
604 /* can we conclude ENOENT locally? */
605 if (dentry
->d_inode
== NULL
) {
606 struct ceph_inode_info
*ci
= ceph_inode(dir
);
607 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
609 spin_lock(&ci
->i_ceph_lock
);
610 dout(" dir %p flags are %d\n", dir
, ci
->i_ceph_flags
);
611 if (strncmp(dentry
->d_name
.name
,
612 fsc
->mount_options
->snapdir_name
,
613 dentry
->d_name
.len
) &&
614 !is_root_ceph_dentry(dir
, dentry
) &&
615 __ceph_dir_is_complete(ci
) &&
616 (__ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1))) {
617 spin_unlock(&ci
->i_ceph_lock
);
618 dout(" dir %p complete, -ENOENT\n", dir
);
620 di
->lease_shared_gen
= ci
->i_shared_gen
;
623 spin_unlock(&ci
->i_ceph_lock
);
626 op
= ceph_snap(dir
) == CEPH_SNAPDIR
?
627 CEPH_MDS_OP_LOOKUPSNAP
: CEPH_MDS_OP_LOOKUP
;
628 req
= ceph_mdsc_create_request(mdsc
, op
, USE_ANY_MDS
);
630 return ERR_CAST(req
);
631 req
->r_dentry
= dget(dentry
);
633 /* we only need inode linkage */
634 req
->r_args
.getattr
.mask
= cpu_to_le32(CEPH_STAT_CAP_INODE
);
635 req
->r_locked_dir
= dir
;
636 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
637 err
= ceph_handle_snapdir(req
, dentry
, err
);
638 dentry
= ceph_finish_lookup(req
, dentry
, err
);
639 ceph_mdsc_put_request(req
); /* will dput(dentry) */
640 dout("lookup result=%p\n", dentry
);
645 * If we do a create but get no trace back from the MDS, follow up with
646 * a lookup (the VFS expects us to link up the provided dentry).
648 int ceph_handle_notrace_create(struct inode
*dir
, struct dentry
*dentry
)
650 struct dentry
*result
= ceph_lookup(dir
, dentry
, 0);
652 if (result
&& !IS_ERR(result
)) {
654 * We created the item, then did a lookup, and found
655 * it was already linked to another inode we already
656 * had in our cache (and thus got spliced). Link our
657 * dentry to that inode, but don't hash it, just in
658 * case the VFS wants to dereference it.
660 BUG_ON(!result
->d_inode
);
661 d_instantiate(dentry
, result
->d_inode
);
664 return PTR_ERR(result
);
667 static int ceph_mknod(struct inode
*dir
, struct dentry
*dentry
,
668 umode_t mode
, dev_t rdev
)
670 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
671 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
672 struct ceph_mds_request
*req
;
675 if (ceph_snap(dir
) != CEPH_NOSNAP
)
678 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
679 dir
, dentry
, mode
, rdev
);
680 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_MKNOD
, USE_AUTH_MDS
);
685 req
->r_dentry
= dget(dentry
);
687 req
->r_locked_dir
= dir
;
688 req
->r_args
.mknod
.mode
= cpu_to_le32(mode
);
689 req
->r_args
.mknod
.rdev
= cpu_to_le32(rdev
);
690 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
691 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
692 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
693 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
694 err
= ceph_handle_notrace_create(dir
, dentry
);
695 ceph_mdsc_put_request(req
);
698 err
= ceph_init_acl(dentry
, dentry
->d_inode
, dir
);
705 static int ceph_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
708 return ceph_mknod(dir
, dentry
, mode
, 0);
711 static int ceph_symlink(struct inode
*dir
, struct dentry
*dentry
,
714 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
715 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
716 struct ceph_mds_request
*req
;
719 if (ceph_snap(dir
) != CEPH_NOSNAP
)
722 dout("symlink in dir %p dentry %p to '%s'\n", dir
, dentry
, dest
);
723 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SYMLINK
, USE_AUTH_MDS
);
728 req
->r_dentry
= dget(dentry
);
730 req
->r_path2
= kstrdup(dest
, GFP_NOFS
);
731 req
->r_locked_dir
= dir
;
732 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
733 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
734 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
735 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
736 err
= ceph_handle_notrace_create(dir
, dentry
);
737 ceph_mdsc_put_request(req
);
743 static int ceph_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
745 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
746 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
747 struct ceph_mds_request
*req
;
751 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
752 /* mkdir .snap/foo is a MKSNAP */
753 op
= CEPH_MDS_OP_MKSNAP
;
754 dout("mksnap dir %p snap '%.*s' dn %p\n", dir
,
755 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
);
756 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
757 dout("mkdir dir %p dn %p mode 0%ho\n", dir
, dentry
, mode
);
758 op
= CEPH_MDS_OP_MKDIR
;
762 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
768 req
->r_dentry
= dget(dentry
);
770 req
->r_locked_dir
= dir
;
771 req
->r_args
.mkdir
.mode
= cpu_to_le32(mode
);
772 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
773 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
774 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
775 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
776 err
= ceph_handle_notrace_create(dir
, dentry
);
777 ceph_mdsc_put_request(req
);
784 static int ceph_link(struct dentry
*old_dentry
, struct inode
*dir
,
785 struct dentry
*dentry
)
787 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
788 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
789 struct ceph_mds_request
*req
;
792 if (ceph_snap(dir
) != CEPH_NOSNAP
)
795 dout("link in dir %p old_dentry %p dentry %p\n", dir
,
797 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_LINK
, USE_AUTH_MDS
);
802 req
->r_dentry
= dget(dentry
);
804 req
->r_old_dentry
= dget(old_dentry
); /* or inode? hrm. */
805 req
->r_old_dentry_dir
= ceph_get_dentry_parent_inode(old_dentry
);
806 req
->r_locked_dir
= dir
;
807 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
808 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
809 /* release LINK_SHARED on source inode (mds will lock it) */
810 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
811 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
814 } else if (!req
->r_reply_info
.head
->is_dentry
) {
815 ihold(old_dentry
->d_inode
);
816 d_instantiate(dentry
, old_dentry
->d_inode
);
818 ceph_mdsc_put_request(req
);
823 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
824 * looks like the link count will hit 0, drop any other caps (other
825 * than PIN) we don't specifically want (due to the file still being
828 static int drop_caps_for_unlink(struct inode
*inode
)
830 struct ceph_inode_info
*ci
= ceph_inode(inode
);
831 int drop
= CEPH_CAP_LINK_SHARED
| CEPH_CAP_LINK_EXCL
;
833 spin_lock(&ci
->i_ceph_lock
);
834 if (inode
->i_nlink
== 1) {
835 drop
|= ~(__ceph_caps_wanted(ci
) | CEPH_CAP_PIN
);
836 ci
->i_ceph_flags
|= CEPH_I_NODELAY
;
838 spin_unlock(&ci
->i_ceph_lock
);
843 * rmdir and unlink are differ only by the metadata op code
845 static int ceph_unlink(struct inode
*dir
, struct dentry
*dentry
)
847 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
848 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
849 struct inode
*inode
= dentry
->d_inode
;
850 struct ceph_mds_request
*req
;
854 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
855 /* rmdir .snap/foo is RMSNAP */
856 dout("rmsnap dir %p '%.*s' dn %p\n", dir
, dentry
->d_name
.len
,
857 dentry
->d_name
.name
, dentry
);
858 op
= CEPH_MDS_OP_RMSNAP
;
859 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
860 dout("unlink/rmdir dir %p dn %p inode %p\n",
862 op
= S_ISDIR(dentry
->d_inode
->i_mode
) ?
863 CEPH_MDS_OP_RMDIR
: CEPH_MDS_OP_UNLINK
;
866 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
871 req
->r_dentry
= dget(dentry
);
873 req
->r_locked_dir
= dir
;
874 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
875 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
876 req
->r_inode_drop
= drop_caps_for_unlink(inode
);
877 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
878 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
880 ceph_mdsc_put_request(req
);
885 static int ceph_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
886 struct inode
*new_dir
, struct dentry
*new_dentry
)
888 struct ceph_fs_client
*fsc
= ceph_sb_to_client(old_dir
->i_sb
);
889 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
890 struct ceph_mds_request
*req
;
893 if (ceph_snap(old_dir
) != ceph_snap(new_dir
))
895 if (ceph_snap(old_dir
) != CEPH_NOSNAP
||
896 ceph_snap(new_dir
) != CEPH_NOSNAP
)
898 dout("rename dir %p dentry %p to dir %p dentry %p\n",
899 old_dir
, old_dentry
, new_dir
, new_dentry
);
900 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RENAME
, USE_AUTH_MDS
);
903 req
->r_dentry
= dget(new_dentry
);
905 req
->r_old_dentry
= dget(old_dentry
);
906 req
->r_old_dentry_dir
= ceph_get_dentry_parent_inode(old_dentry
);
907 req
->r_locked_dir
= new_dir
;
908 req
->r_old_dentry_drop
= CEPH_CAP_FILE_SHARED
;
909 req
->r_old_dentry_unless
= CEPH_CAP_FILE_EXCL
;
910 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
911 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
912 /* release LINK_RDCACHE on source inode (mds will lock it) */
913 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
914 if (new_dentry
->d_inode
)
915 req
->r_inode_drop
= drop_caps_for_unlink(new_dentry
->d_inode
);
916 err
= ceph_mdsc_do_request(mdsc
, old_dir
, req
);
917 if (!err
&& !req
->r_reply_info
.head
->is_dentry
) {
919 * Normally d_move() is done by fill_trace (called by
920 * do_request, above). If there is no trace, we need
924 /* d_move screws up d_subdirs order */
925 ceph_dir_clear_complete(new_dir
);
927 d_move(old_dentry
, new_dentry
);
929 /* ensure target dentry is invalidated, despite
930 rehashing bug in vfs_rename_dir */
931 ceph_invalidate_dentry_lease(new_dentry
);
933 ceph_mdsc_put_request(req
);
938 * Ensure a dentry lease will no longer revalidate.
940 void ceph_invalidate_dentry_lease(struct dentry
*dentry
)
942 spin_lock(&dentry
->d_lock
);
943 dentry
->d_time
= jiffies
;
944 ceph_dentry(dentry
)->lease_shared_gen
= 0;
945 spin_unlock(&dentry
->d_lock
);
949 * Check if dentry lease is valid. If not, delete the lease. Try to
950 * renew if the least is more than half up.
952 static int dentry_lease_is_valid(struct dentry
*dentry
)
954 struct ceph_dentry_info
*di
;
955 struct ceph_mds_session
*s
;
959 struct ceph_mds_session
*session
= NULL
;
960 struct inode
*dir
= NULL
;
963 spin_lock(&dentry
->d_lock
);
964 di
= ceph_dentry(dentry
);
965 if (di
->lease_session
) {
966 s
= di
->lease_session
;
967 spin_lock(&s
->s_gen_ttl_lock
);
970 spin_unlock(&s
->s_gen_ttl_lock
);
972 if (di
->lease_gen
== gen
&&
973 time_before(jiffies
, dentry
->d_time
) &&
974 time_before(jiffies
, ttl
)) {
976 if (di
->lease_renew_after
&&
977 time_after(jiffies
, di
->lease_renew_after
)) {
978 /* we should renew */
979 dir
= dentry
->d_parent
->d_inode
;
980 session
= ceph_get_mds_session(s
);
982 di
->lease_renew_after
= 0;
983 di
->lease_renew_from
= jiffies
;
987 spin_unlock(&dentry
->d_lock
);
990 ceph_mdsc_lease_send_msg(session
, dir
, dentry
,
991 CEPH_MDS_LEASE_RENEW
, seq
);
992 ceph_put_mds_session(session
);
994 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry
, valid
);
999 * Check if directory-wide content lease/cap is valid.
1001 static int dir_lease_is_valid(struct inode
*dir
, struct dentry
*dentry
)
1003 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1004 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1007 spin_lock(&ci
->i_ceph_lock
);
1008 if (ci
->i_shared_gen
== di
->lease_shared_gen
)
1009 valid
= __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1);
1010 spin_unlock(&ci
->i_ceph_lock
);
1011 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1012 dir
, (unsigned)ci
->i_shared_gen
, dentry
,
1013 (unsigned)di
->lease_shared_gen
, valid
);
1018 * Check if cached dentry can be trusted.
1020 static int ceph_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
1025 if (flags
& LOOKUP_RCU
)
1028 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry
,
1029 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
,
1030 ceph_dentry(dentry
)->offset
);
1032 dir
= ceph_get_dentry_parent_inode(dentry
);
1034 /* always trust cached snapped dentries, snapdir dentry */
1035 if (ceph_snap(dir
) != CEPH_NOSNAP
) {
1036 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry
,
1037 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
1039 } else if (dentry
->d_inode
&&
1040 ceph_snap(dentry
->d_inode
) == CEPH_SNAPDIR
) {
1042 } else if (dentry_lease_is_valid(dentry
) ||
1043 dir_lease_is_valid(dir
, dentry
)) {
1044 if (dentry
->d_inode
)
1045 valid
= ceph_is_any_caps(dentry
->d_inode
);
1050 dout("d_revalidate %p %s\n", dentry
, valid
? "valid" : "invalid");
1052 ceph_dentry_lru_touch(dentry
);
1054 ceph_dir_clear_complete(dir
);
1062 * Release our ceph_dentry_info.
1064 static void ceph_d_release(struct dentry
*dentry
)
1066 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1068 dout("d_release %p\n", dentry
);
1069 ceph_dentry_lru_del(dentry
);
1070 if (di
->lease_session
)
1071 ceph_put_mds_session(di
->lease_session
);
1072 kmem_cache_free(ceph_dentry_cachep
, di
);
1073 dentry
->d_fsdata
= NULL
;
1076 static int ceph_snapdir_d_revalidate(struct dentry
*dentry
,
1080 * Eventually, we'll want to revalidate snapped metadata
1081 * too... probably...
1087 * When the VFS prunes a dentry from the cache, we need to clear the
1088 * complete flag on the parent directory.
1090 * Called under dentry->d_lock.
1092 static void ceph_d_prune(struct dentry
*dentry
)
1094 dout("ceph_d_prune %p\n", dentry
);
1096 /* do we have a valid parent? */
1097 if (IS_ROOT(dentry
))
1100 /* if we are not hashed, we don't affect dir's completeness */
1101 if (d_unhashed(dentry
))
1105 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1106 * cleared until d_release
1108 ceph_dir_clear_complete(dentry
->d_parent
->d_inode
);
1112 * read() on a dir. This weird interface hack only works if mounted
1113 * with '-o dirstat'.
1115 static ssize_t
ceph_read_dir(struct file
*file
, char __user
*buf
, size_t size
,
1118 struct ceph_file_info
*cf
= file
->private_data
;
1119 struct inode
*inode
= file_inode(file
);
1120 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1122 const int bufsize
= 1024;
1124 if (!ceph_test_mount_opt(ceph_sb_to_client(inode
->i_sb
), DIRSTAT
))
1127 if (!cf
->dir_info
) {
1128 cf
->dir_info
= kmalloc(bufsize
, GFP_NOFS
);
1132 snprintf(cf
->dir_info
, bufsize
,
1135 " subdirs: %20lld\n"
1136 "rentries: %20lld\n"
1138 " rsubdirs: %20lld\n"
1140 "rctime: %10ld.%09ld\n",
1141 ci
->i_files
+ ci
->i_subdirs
,
1144 ci
->i_rfiles
+ ci
->i_rsubdirs
,
1148 (long)ci
->i_rctime
.tv_sec
,
1149 (long)ci
->i_rctime
.tv_nsec
);
1152 if (*ppos
>= cf
->dir_info_len
)
1154 size
= min_t(unsigned, size
, cf
->dir_info_len
-*ppos
);
1155 left
= copy_to_user(buf
, cf
->dir_info
+ *ppos
, size
);
1158 *ppos
+= (size
- left
);
1163 * an fsync() on a dir will wait for any uncommitted directory
1164 * operations to commit.
1166 static int ceph_dir_fsync(struct file
*file
, loff_t start
, loff_t end
,
1169 struct inode
*inode
= file_inode(file
);
1170 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1171 struct list_head
*head
= &ci
->i_unsafe_dirops
;
1172 struct ceph_mds_request
*req
;
1176 dout("dir_fsync %p\n", inode
);
1177 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1180 mutex_lock(&inode
->i_mutex
);
1182 spin_lock(&ci
->i_unsafe_lock
);
1183 if (list_empty(head
))
1186 req
= list_entry(head
->prev
,
1187 struct ceph_mds_request
, r_unsafe_dir_item
);
1188 last_tid
= req
->r_tid
;
1191 ceph_mdsc_get_request(req
);
1192 spin_unlock(&ci
->i_unsafe_lock
);
1194 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1195 inode
, req
->r_tid
, last_tid
);
1196 if (req
->r_timeout
) {
1197 ret
= wait_for_completion_timeout(
1198 &req
->r_safe_completion
, req
->r_timeout
);
1202 ret
= -EIO
; /* timed out */
1204 wait_for_completion(&req
->r_safe_completion
);
1206 ceph_mdsc_put_request(req
);
1208 spin_lock(&ci
->i_unsafe_lock
);
1209 if (ret
|| list_empty(head
))
1211 req
= list_entry(head
->next
,
1212 struct ceph_mds_request
, r_unsafe_dir_item
);
1213 } while (req
->r_tid
< last_tid
);
1215 spin_unlock(&ci
->i_unsafe_lock
);
1216 mutex_unlock(&inode
->i_mutex
);
1222 * We maintain a private dentry LRU.
1224 * FIXME: this needs to be changed to a per-mds lru to be useful.
1226 void ceph_dentry_lru_add(struct dentry
*dn
)
1228 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1229 struct ceph_mds_client
*mdsc
;
1231 dout("dentry_lru_add %p %p '%.*s'\n", di
, dn
,
1232 dn
->d_name
.len
, dn
->d_name
.name
);
1233 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1234 spin_lock(&mdsc
->dentry_lru_lock
);
1235 list_add_tail(&di
->lru
, &mdsc
->dentry_lru
);
1237 spin_unlock(&mdsc
->dentry_lru_lock
);
1240 void ceph_dentry_lru_touch(struct dentry
*dn
)
1242 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1243 struct ceph_mds_client
*mdsc
;
1245 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di
, dn
,
1246 dn
->d_name
.len
, dn
->d_name
.name
, di
->offset
);
1247 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1248 spin_lock(&mdsc
->dentry_lru_lock
);
1249 list_move_tail(&di
->lru
, &mdsc
->dentry_lru
);
1250 spin_unlock(&mdsc
->dentry_lru_lock
);
1253 void ceph_dentry_lru_del(struct dentry
*dn
)
1255 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1256 struct ceph_mds_client
*mdsc
;
1258 dout("dentry_lru_del %p %p '%.*s'\n", di
, dn
,
1259 dn
->d_name
.len
, dn
->d_name
.name
);
1260 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1261 spin_lock(&mdsc
->dentry_lru_lock
);
1262 list_del_init(&di
->lru
);
1264 spin_unlock(&mdsc
->dentry_lru_lock
);
1268 * Return name hash for a given dentry. This is dependent on
1269 * the parent directory's hash function.
1271 unsigned ceph_dentry_hash(struct inode
*dir
, struct dentry
*dn
)
1273 struct ceph_inode_info
*dci
= ceph_inode(dir
);
1275 switch (dci
->i_dir_layout
.dl_dir_hash
) {
1276 case 0: /* for backward compat */
1277 case CEPH_STR_HASH_LINUX
:
1278 return dn
->d_name
.hash
;
1281 return ceph_str_hash(dci
->i_dir_layout
.dl_dir_hash
,
1282 dn
->d_name
.name
, dn
->d_name
.len
);
1286 const struct file_operations ceph_dir_fops
= {
1287 .read
= ceph_read_dir
,
1288 .iterate
= ceph_readdir
,
1289 .llseek
= ceph_dir_llseek
,
1291 .release
= ceph_release
,
1292 .unlocked_ioctl
= ceph_ioctl
,
1293 .fsync
= ceph_dir_fsync
,
1296 const struct inode_operations ceph_dir_iops
= {
1297 .lookup
= ceph_lookup
,
1298 .permission
= ceph_permission
,
1299 .getattr
= ceph_getattr
,
1300 .setattr
= ceph_setattr
,
1301 .setxattr
= ceph_setxattr
,
1302 .getxattr
= ceph_getxattr
,
1303 .listxattr
= ceph_listxattr
,
1304 .removexattr
= ceph_removexattr
,
1305 .get_acl
= ceph_get_acl
,
1306 .set_acl
= ceph_set_acl
,
1307 .mknod
= ceph_mknod
,
1308 .symlink
= ceph_symlink
,
1309 .mkdir
= ceph_mkdir
,
1311 .unlink
= ceph_unlink
,
1312 .rmdir
= ceph_unlink
,
1313 .rename
= ceph_rename
,
1314 .create
= ceph_create
,
1315 .atomic_open
= ceph_atomic_open
,
1318 const struct dentry_operations ceph_dentry_ops
= {
1319 .d_revalidate
= ceph_d_revalidate
,
1320 .d_release
= ceph_d_release
,
1321 .d_prune
= ceph_d_prune
,
1324 const struct dentry_operations ceph_snapdir_dentry_ops
= {
1325 .d_revalidate
= ceph_snapdir_d_revalidate
,
1326 .d_release
= ceph_d_release
,
1329 const struct dentry_operations ceph_snap_dentry_ops
= {
1330 .d_release
= ceph_d_release
,
1331 .d_prune
= ceph_d_prune
,