]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
ceph: handle frag mismatch between readdir request and reply
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
2817b000
SW
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
3d14c5d2 10#include "mds_client.h"
2817b000
SW
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29const struct inode_operations ceph_dir_iops;
30const struct file_operations ceph_dir_fops;
52dfb8ac 31const struct dentry_operations ceph_dentry_ops;
2817b000
SW
32
33/*
34 * Initialize ceph dentry state.
35 */
36int ceph_init_dentry(struct dentry *dentry)
37{
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
36e21687 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
2817b000
SW
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
8c6efb58
SW
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 51 goto out_unlock;
8c6efb58 52 }
48d0cbd1 53
8842b3be 54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
48d0cbd1
SW
55 d_set_d_op(dentry, &ceph_dentry_ops);
56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
58 else
59 d_set_d_op(dentry, &ceph_snap_dentry_ops);
60
2817b000
SW
61 di->dentry = dentry;
62 di->lease_session = NULL;
2817b000 63 dentry->d_time = jiffies;
48d0cbd1
SW
64 /* avoid reordering d_fsdata setup so that the check above is safe */
65 smp_mb();
66 dentry->d_fsdata = di;
2817b000
SW
67 ceph_dentry_lru_add(dentry);
68out_unlock:
69 spin_unlock(&dentry->d_lock);
70 return 0;
71}
72
5f21c96d
SW
73struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
74{
75 struct inode *inode = NULL;
76
77 if (!dentry)
78 return NULL;
79
80 spin_lock(&dentry->d_lock);
8842b3be 81 if (!IS_ROOT(dentry)) {
5f21c96d
SW
82 inode = dentry->d_parent->d_inode;
83 ihold(inode);
84 }
85 spin_unlock(&dentry->d_lock);
86 return inode;
87}
2817b000
SW
88
89
90/*
91 * for readdir, we encode the directory frag and offset within that
92 * frag into f_pos.
93 */
94static unsigned fpos_frag(loff_t p)
95{
96 return p >> 32;
97}
98static unsigned fpos_off(loff_t p)
99{
100 return p & 0xffffffff;
101}
102
103/*
104 * When possible, we try to satisfy a readdir by peeking at the
105 * dcache. We make this work by carefully ordering dentries on
106 * d_u.d_child when we initially get results back from the MDS, and
107 * falling back to a "normal" sync readdir if any dentries in the dir
108 * are dropped.
109 *
2f276c51 110 * Complete dir indicates that we have all dentries in the dir. It is
2817b000
SW
111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
112 * the MDS if/when the directory is modified).
113 */
77acfa29 114static int __dcache_readdir(struct file *file, struct dir_context *ctx)
2817b000 115{
77acfa29
AV
116 struct ceph_file_info *fi = file->private_data;
117 struct dentry *parent = file->f_dentry;
2817b000
SW
118 struct inode *dir = parent->d_inode;
119 struct list_head *p;
120 struct dentry *dentry, *last;
121 struct ceph_dentry_info *di;
122 int err = 0;
123
124 /* claim ref on last dentry we returned */
125 last = fi->dentry;
126 fi->dentry = NULL;
127
77acfa29 128 dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos,
2817b000
SW
129 last);
130
2fd6b7f5 131 spin_lock(&parent->d_lock);
2817b000
SW
132
133 /* start at beginning? */
77acfa29
AV
134 if (ctx->pos == 2 || last == NULL ||
135 ctx->pos < ceph_dentry(last)->offset) {
2817b000
SW
136 if (list_empty(&parent->d_subdirs))
137 goto out_unlock;
138 p = parent->d_subdirs.prev;
139 dout(" initial p %p/%p\n", p->prev, p->next);
140 } else {
141 p = last->d_u.d_child.prev;
142 }
143
144more:
145 dentry = list_entry(p, struct dentry, d_u.d_child);
146 di = ceph_dentry(dentry);
147 while (1) {
1cd3935b
SW
148 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
149 d_unhashed(dentry) ? "!hashed" : "hashed",
2817b000
SW
150 parent->d_subdirs.prev, parent->d_subdirs.next);
151 if (p == &parent->d_subdirs) {
9cfa1098 152 fi->flags |= CEPH_F_ATEND;
2817b000
SW
153 goto out_unlock;
154 }
2fd6b7f5 155 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2817b000 156 if (!d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 157 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 158 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
77acfa29 159 ctx->pos <= di->offset)
2817b000
SW
160 break;
161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
162 dentry->d_name.len, dentry->d_name.name, di->offset,
77acfa29 163 ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
2817b000 164 !dentry->d_inode ? " null" : "");
da502956 165 spin_unlock(&dentry->d_lock);
2817b000
SW
166 p = p->prev;
167 dentry = list_entry(p, struct dentry, d_u.d_child);
168 di = ceph_dentry(dentry);
169 }
170
da502956 171 dget_dlock(dentry);
b7ab39f6 172 spin_unlock(&dentry->d_lock);
2fd6b7f5 173 spin_unlock(&parent->d_lock);
2817b000 174
77acfa29 175 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
2817b000 176 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
77acfa29
AV
177 ctx->pos = di->offset;
178 if (!dir_emit(ctx, dentry->d_name.name,
179 dentry->d_name.len,
ad1fee96 180 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
77acfa29
AV
181 dentry->d_inode->i_mode >> 12)) {
182 if (last) {
2817b000
SW
183 /* remember our position */
184 fi->dentry = last;
185 fi->next_offset = di->offset;
2817b000 186 }
77acfa29
AV
187 dput(dentry);
188 return 0;
2817b000 189 }
f5b06628 190
77acfa29
AV
191 if (last)
192 dput(last);
193 last = dentry;
2817b000 194
77acfa29 195 ctx->pos++;
2817b000 196
b5c84bf6 197 /* make sure a dentry wasn't dropped while we didn't have parent lock */
2f276c51
YZ
198 if (!ceph_dir_is_complete(dir)) {
199 dout(" lost dir complete on %p; falling back to mds\n", dir);
efa4c120
SW
200 err = -EAGAIN;
201 goto out;
202 }
203
2fd6b7f5 204 spin_lock(&parent->d_lock);
efa4c120
SW
205 p = p->prev; /* advance to next dentry */
206 goto more;
2817b000
SW
207
208out_unlock:
2fd6b7f5 209 spin_unlock(&parent->d_lock);
efa4c120
SW
210out:
211 if (last)
2817b000 212 dput(last);
2817b000
SW
213 return err;
214}
215
216/*
217 * make note of the last dentry we read, so we can
218 * continue at the same lexicographical point,
219 * regardless of what dir changes take place on the
220 * server.
221 */
222static int note_last_dentry(struct ceph_file_info *fi, const char *name,
223 int len)
224{
225 kfree(fi->last_name);
226 fi->last_name = kmalloc(len+1, GFP_NOFS);
227 if (!fi->last_name)
228 return -ENOMEM;
229 memcpy(fi->last_name, name, len);
230 fi->last_name[len] = 0;
231 dout("note_last_dentry '%s'\n", fi->last_name);
232 return 0;
233}
234
77acfa29 235static int ceph_readdir(struct file *file, struct dir_context *ctx)
2817b000 236{
77acfa29
AV
237 struct ceph_file_info *fi = file->private_data;
238 struct inode *inode = file_inode(file);
2817b000 239 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
240 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
241 struct ceph_mds_client *mdsc = fsc->mdsc;
77acfa29
AV
242 unsigned frag = fpos_frag(ctx->pos);
243 int off = fpos_off(ctx->pos);
2817b000
SW
244 int err;
245 u32 ftype;
246 struct ceph_mds_reply_info_parsed *rinfo;
3d14c5d2
YS
247 const int max_entries = fsc->mount_options->max_readdir;
248 const int max_bytes = fsc->mount_options->max_readdir_bytes;
2817b000 249
77acfa29 250 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
9cfa1098 251 if (fi->flags & CEPH_F_ATEND)
2817b000
SW
252 return 0;
253
254 /* always start with . and .. */
77acfa29 255 if (ctx->pos == 0) {
2817b000
SW
256 /* note dir version at start of readdir so we can tell
257 * if any dentries get dropped */
2f276c51 258 fi->dir_release_count = atomic_read(&ci->i_release_count);
2817b000
SW
259
260 dout("readdir off 0 -> '.'\n");
77acfa29 261 if (!dir_emit(ctx, ".", 1,
ad1fee96 262 ceph_translate_ino(inode->i_sb, inode->i_ino),
77acfa29 263 inode->i_mode >> 12))
2817b000 264 return 0;
77acfa29 265 ctx->pos = 1;
2817b000
SW
266 off = 1;
267 }
77acfa29
AV
268 if (ctx->pos == 1) {
269 ino_t ino = parent_ino(file->f_dentry);
2817b000 270 dout("readdir off 1 -> '..'\n");
77acfa29 271 if (!dir_emit(ctx, "..", 2,
ad1fee96 272 ceph_translate_ino(inode->i_sb, ino),
77acfa29 273 inode->i_mode >> 12))
2817b000 274 return 0;
77acfa29 275 ctx->pos = 2;
2817b000
SW
276 off = 2;
277 }
278
279 /* can we use the dcache? */
be655596 280 spin_lock(&ci->i_ceph_lock);
77acfa29 281 if ((ctx->pos == 2 || fi->dentry) &&
3d14c5d2 282 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
a0dff78d 283 ceph_snap(inode) != CEPH_SNAPDIR &&
2f276c51 284 __ceph_dir_is_complete(ci) &&
2817b000 285 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
be655596 286 spin_unlock(&ci->i_ceph_lock);
77acfa29 287 err = __dcache_readdir(file, ctx);
efa4c120 288 if (err != -EAGAIN)
2817b000 289 return err;
efa4c120 290 } else {
be655596 291 spin_unlock(&ci->i_ceph_lock);
2817b000 292 }
2817b000
SW
293 if (fi->dentry) {
294 err = note_last_dentry(fi, fi->dentry->d_name.name,
295 fi->dentry->d_name.len);
296 if (err)
297 return err;
298 dput(fi->dentry);
299 fi->dentry = NULL;
300 }
301
302 /* proceed with a normal readdir */
303
304more:
305 /* do we have the correct frag content buffered? */
306 if (fi->frag != frag || fi->last_readdir == NULL) {
307 struct ceph_mds_request *req;
308 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
309 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
310
311 /* discard old result, if any */
393f6620 312 if (fi->last_readdir) {
2817b000 313 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
314 fi->last_readdir = NULL;
315 }
2817b000
SW
316
317 /* requery frag tree, as the frag topology may have changed */
318 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
319
320 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
321 ceph_vinop(inode), frag, fi->last_name);
322 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
323 if (IS_ERR(req))
324 return PTR_ERR(req);
70b666c3
SW
325 req->r_inode = inode;
326 ihold(inode);
77acfa29 327 req->r_dentry = dget(file->f_dentry);
2817b000
SW
328 /* hints to request -> mds selection code */
329 req->r_direct_mode = USE_AUTH_MDS;
330 req->r_direct_hash = ceph_frag_value(frag);
331 req->r_direct_is_hash = true;
332 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
333 req->r_readdir_offset = fi->next_offset;
334 req->r_args.readdir.frag = cpu_to_le32(frag);
335 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
23804d91 336 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
e1e4dd0c 337 req->r_num_caps = max_entries + 1;
2817b000
SW
338 err = ceph_mdsc_do_request(mdsc, NULL, req);
339 if (err < 0) {
340 ceph_mdsc_put_request(req);
341 return err;
342 }
343 dout("readdir got and parsed readdir result=%d"
344 " on frag %x, end=%d, complete=%d\n", err, frag,
345 (int)req->r_reply_info.dir_end,
346 (int)req->r_reply_info.dir_complete);
347
348 if (!req->r_did_prepopulate) {
349 dout("readdir !did_prepopulate");
2f276c51
YZ
350 /* preclude from marking dir complete */
351 fi->dir_release_count--;
2817b000
SW
352 }
353
354 /* note next offset and last dentry name */
81c6aea5
YZ
355 rinfo = &req->r_reply_info;
356 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
357 frag = le32_to_cpu(rinfo->dir_dir->frag);
358 if (ceph_frag_is_leftmost(frag))
359 fi->next_offset = 2;
360 else
361 fi->next_offset = 0;
362 off = fi->next_offset;
363 }
2817b000
SW
364 fi->offset = fi->next_offset;
365 fi->last_readdir = req;
81c6aea5 366 fi->frag = frag;
2817b000
SW
367
368 if (req->r_reply_info.dir_end) {
369 kfree(fi->last_name);
370 fi->last_name = NULL;
7b88dadc
SW
371 if (ceph_frag_is_rightmost(frag))
372 fi->next_offset = 2;
373 else
374 fi->next_offset = 0;
2817b000 375 } else {
2817b000
SW
376 err = note_last_dentry(fi,
377 rinfo->dir_dname[rinfo->dir_nr-1],
378 rinfo->dir_dname_len[rinfo->dir_nr-1]);
379 if (err)
380 return err;
381 fi->next_offset += rinfo->dir_nr;
382 }
383 }
384
385 rinfo = &fi->last_readdir->r_reply_info;
386 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
387 rinfo->dir_nr, off, fi->offset);
77acfa29
AV
388
389 ctx->pos = ceph_make_fpos(frag, off);
da39822c 390 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
2817b000
SW
391 struct ceph_mds_reply_inode *in =
392 rinfo->dir_in[off - fi->offset].in;
3105c19c
SW
393 struct ceph_vino vino;
394 ino_t ino;
395
2817b000 396 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
77acfa29 397 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
2817b000
SW
398 rinfo->dir_dname_len[off - fi->offset],
399 rinfo->dir_dname[off - fi->offset], in);
400 BUG_ON(!in);
401 ftype = le32_to_cpu(in->mode) >> 12;
3105c19c
SW
402 vino.ino = le64_to_cpu(in->ino);
403 vino.snap = le64_to_cpu(in->snapid);
404 ino = ceph_vino_to_ino(vino);
77acfa29 405 if (!dir_emit(ctx,
2817b000
SW
406 rinfo->dir_dname[off - fi->offset],
407 rinfo->dir_dname_len[off - fi->offset],
77acfa29 408 ceph_translate_ino(inode->i_sb, ino), ftype)) {
2817b000
SW
409 dout("filldir stopping us...\n");
410 return 0;
411 }
412 off++;
77acfa29 413 ctx->pos++;
2817b000
SW
414 }
415
416 if (fi->last_name) {
417 ceph_mdsc_put_request(fi->last_readdir);
418 fi->last_readdir = NULL;
419 goto more;
420 }
421
422 /* more frags? */
423 if (!ceph_frag_is_rightmost(frag)) {
424 frag = ceph_frag_next(frag);
425 off = 0;
77acfa29 426 ctx->pos = ceph_make_fpos(frag, off);
2817b000
SW
427 dout("readdir next frag is %x\n", frag);
428 goto more;
429 }
9cfa1098 430 fi->flags |= CEPH_F_ATEND;
2817b000
SW
431
432 /*
433 * if dir_release_count still matches the dir, no dentries
434 * were released during the whole readdir, and we should have
435 * the complete dir contents in our cache.
436 */
be655596 437 spin_lock(&ci->i_ceph_lock);
2f276c51 438 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
a8673d61 439 dout(" marking %p complete\n", inode);
2f276c51 440 __ceph_dir_set_complete(ci, fi->dir_release_count);
77acfa29 441 ci->i_max_offset = ctx->pos;
2817b000 442 }
be655596 443 spin_unlock(&ci->i_ceph_lock);
2817b000 444
77acfa29 445 dout("readdir %p file %p done.\n", inode, file);
2817b000
SW
446 return 0;
447}
448
449static void reset_readdir(struct ceph_file_info *fi)
450{
451 if (fi->last_readdir) {
452 ceph_mdsc_put_request(fi->last_readdir);
453 fi->last_readdir = NULL;
454 }
455 kfree(fi->last_name);
a1629c3b 456 fi->last_name = NULL;
2817b000
SW
457 fi->next_offset = 2; /* compensate for . and .. */
458 if (fi->dentry) {
459 dput(fi->dentry);
460 fi->dentry = NULL;
461 }
9cfa1098 462 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
463}
464
965c8e59 465static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
2817b000
SW
466{
467 struct ceph_file_info *fi = file->private_data;
468 struct inode *inode = file->f_mapping->host;
469 loff_t old_offset = offset;
470 loff_t retval;
471
472 mutex_lock(&inode->i_mutex);
06222e49 473 retval = -EINVAL;
965c8e59 474 switch (whence) {
2817b000
SW
475 case SEEK_END:
476 offset += inode->i_size + 2; /* FIXME */
477 break;
478 case SEEK_CUR:
479 offset += file->f_pos;
06222e49
JB
480 case SEEK_SET:
481 break;
482 default:
483 goto out;
2817b000 484 }
06222e49 485
2817b000
SW
486 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
487 if (offset != file->f_pos) {
488 file->f_pos = offset;
489 file->f_version = 0;
9cfa1098 490 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
491 }
492 retval = offset;
493
494 /*
495 * discard buffered readdir content on seekdir(0), or
496 * seek to new frag, or seek prior to current chunk.
497 */
498 if (offset == 0 ||
499 fpos_frag(offset) != fpos_frag(old_offset) ||
500 fpos_off(offset) < fi->offset) {
501 dout("dir_llseek dropping %p content\n", file);
502 reset_readdir(fi);
503 }
504
505 /* bump dir_release_count if we did a forward seek */
506 if (offset > old_offset)
507 fi->dir_release_count--;
508 }
06222e49 509out:
2817b000
SW
510 mutex_unlock(&inode->i_mutex);
511 return retval;
512}
513
514/*
468640e3 515 * Handle lookups for the hidden .snap directory.
2817b000 516 */
468640e3
SW
517int ceph_handle_snapdir(struct ceph_mds_request *req,
518 struct dentry *dentry, int err)
2817b000 519{
3d14c5d2 520 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
d79698da 521 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
2817b000
SW
522
523 /* .snap dir? */
524 if (err == -ENOENT &&
455cec0a 525 ceph_snap(parent) == CEPH_NOSNAP &&
6b805185 526 strcmp(dentry->d_name.name,
3d14c5d2 527 fsc->mount_options->snapdir_name) == 0) {
2817b000
SW
528 struct inode *inode = ceph_get_snapdir(parent);
529 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
530 dentry, dentry->d_name.len, dentry->d_name.name, inode);
9358c6d4 531 BUG_ON(!d_unhashed(dentry));
2817b000
SW
532 d_add(dentry, inode);
533 err = 0;
534 }
468640e3
SW
535 return err;
536}
2817b000 537
468640e3
SW
538/*
539 * Figure out final result of a lookup/open request.
540 *
541 * Mainly, make sure we return the final req->r_dentry (if it already
542 * existed) in place of the original VFS-provided dentry when they
543 * differ.
544 *
545 * Gracefully handle the case where the MDS replies with -ENOENT and
546 * no trace (which it may do, at its discretion, e.g., if it doesn't
547 * care to issue a lease on the negative dentry).
548 */
549struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
550 struct dentry *dentry, int err)
551{
2817b000
SW
552 if (err == -ENOENT) {
553 /* no trace? */
554 err = 0;
555 if (!req->r_reply_info.head->is_dentry) {
556 dout("ENOENT and no trace, dentry %p inode %p\n",
557 dentry, dentry->d_inode);
558 if (dentry->d_inode) {
559 d_drop(dentry);
560 err = -ENOENT;
561 } else {
562 d_add(dentry, NULL);
563 }
564 }
565 }
566 if (err)
567 dentry = ERR_PTR(err);
568 else if (dentry != req->r_dentry)
569 dentry = dget(req->r_dentry); /* we got spliced */
570 else
571 dentry = NULL;
572 return dentry;
573}
574
1d1de916
SW
575static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
576{
577 return ceph_ino(inode) == CEPH_INO_ROOT &&
578 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
579}
580
2817b000
SW
581/*
582 * Look up a single dir entry. If there is a lookup intent, inform
583 * the MDS so that it gets our 'caps wanted' value in a single op.
584 */
585static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
00cd8dd3 586 unsigned int flags)
2817b000 587{
3d14c5d2
YS
588 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
589 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
590 struct ceph_mds_request *req;
591 int op;
592 int err;
593
594 dout("lookup %p dentry %p '%.*s'\n",
595 dir, dentry, dentry->d_name.len, dentry->d_name.name);
596
597 if (dentry->d_name.len > NAME_MAX)
598 return ERR_PTR(-ENAMETOOLONG);
599
600 err = ceph_init_dentry(dentry);
601 if (err < 0)
602 return ERR_PTR(err);
603
2817b000
SW
604 /* can we conclude ENOENT locally? */
605 if (dentry->d_inode == NULL) {
606 struct ceph_inode_info *ci = ceph_inode(dir);
607 struct ceph_dentry_info *di = ceph_dentry(dentry);
608
be655596 609 spin_lock(&ci->i_ceph_lock);
2817b000
SW
610 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
611 if (strncmp(dentry->d_name.name,
3d14c5d2 612 fsc->mount_options->snapdir_name,
2817b000 613 dentry->d_name.len) &&
1d1de916 614 !is_root_ceph_dentry(dir, dentry) &&
2f276c51 615 __ceph_dir_is_complete(ci) &&
2817b000 616 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
be655596 617 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
618 dout(" dir %p complete, -ENOENT\n", dir);
619 d_add(dentry, NULL);
620 di->lease_shared_gen = ci->i_shared_gen;
621 return NULL;
622 }
be655596 623 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
624 }
625
626 op = ceph_snap(dir) == CEPH_SNAPDIR ?
627 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
628 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
629 if (IS_ERR(req))
7e34bc52 630 return ERR_CAST(req);
2817b000
SW
631 req->r_dentry = dget(dentry);
632 req->r_num_caps = 2;
633 /* we only need inode linkage */
634 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
635 req->r_locked_dir = dir;
636 err = ceph_mdsc_do_request(mdsc, NULL, req);
468640e3 637 err = ceph_handle_snapdir(req, dentry, err);
2817b000
SW
638 dentry = ceph_finish_lookup(req, dentry, err);
639 ceph_mdsc_put_request(req); /* will dput(dentry) */
640 dout("lookup result=%p\n", dentry);
641 return dentry;
642}
643
644/*
645 * If we do a create but get no trace back from the MDS, follow up with
646 * a lookup (the VFS expects us to link up the provided dentry).
647 */
648int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
649{
00cd8dd3 650 struct dentry *result = ceph_lookup(dir, dentry, 0);
2817b000
SW
651
652 if (result && !IS_ERR(result)) {
653 /*
654 * We created the item, then did a lookup, and found
655 * it was already linked to another inode we already
656 * had in our cache (and thus got spliced). Link our
657 * dentry to that inode, but don't hash it, just in
658 * case the VFS wants to dereference it.
659 */
660 BUG_ON(!result->d_inode);
661 d_instantiate(dentry, result->d_inode);
662 return 0;
663 }
664 return PTR_ERR(result);
665}
666
667static int ceph_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 668 umode_t mode, dev_t rdev)
2817b000 669{
3d14c5d2
YS
670 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
671 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
672 struct ceph_mds_request *req;
673 int err;
674
675 if (ceph_snap(dir) != CEPH_NOSNAP)
676 return -EROFS;
677
1a67aafb 678 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
2817b000
SW
679 dir, dentry, mode, rdev);
680 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
681 if (IS_ERR(req)) {
682 d_drop(dentry);
683 return PTR_ERR(req);
684 }
685 req->r_dentry = dget(dentry);
686 req->r_num_caps = 2;
687 req->r_locked_dir = dir;
688 req->r_args.mknod.mode = cpu_to_le32(mode);
689 req->r_args.mknod.rdev = cpu_to_le32(rdev);
690 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
691 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
692 err = ceph_mdsc_do_request(mdsc, dir, req);
693 if (!err && !req->r_reply_info.head->is_dentry)
694 err = ceph_handle_notrace_create(dir, dentry);
695 ceph_mdsc_put_request(req);
696 if (err)
697 d_drop(dentry);
698 return err;
699}
700
4acdaf27 701static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
ebfc3b49 702 bool excl)
2817b000 703{
2d83bde9 704 return ceph_mknod(dir, dentry, mode, 0);
2817b000
SW
705}
706
707static int ceph_symlink(struct inode *dir, struct dentry *dentry,
708 const char *dest)
709{
3d14c5d2
YS
710 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
711 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
712 struct ceph_mds_request *req;
713 int err;
714
715 if (ceph_snap(dir) != CEPH_NOSNAP)
716 return -EROFS;
717
718 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
719 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
720 if (IS_ERR(req)) {
721 d_drop(dentry);
722 return PTR_ERR(req);
723 }
724 req->r_dentry = dget(dentry);
725 req->r_num_caps = 2;
726 req->r_path2 = kstrdup(dest, GFP_NOFS);
727 req->r_locked_dir = dir;
728 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
729 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
730 err = ceph_mdsc_do_request(mdsc, dir, req);
731 if (!err && !req->r_reply_info.head->is_dentry)
732 err = ceph_handle_notrace_create(dir, dentry);
733 ceph_mdsc_put_request(req);
734 if (err)
735 d_drop(dentry);
736 return err;
737}
738
18bb1db3 739static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2817b000 740{
3d14c5d2
YS
741 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
742 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
743 struct ceph_mds_request *req;
744 int err = -EROFS;
745 int op;
746
747 if (ceph_snap(dir) == CEPH_SNAPDIR) {
748 /* mkdir .snap/foo is a MKSNAP */
749 op = CEPH_MDS_OP_MKSNAP;
750 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
751 dentry->d_name.len, dentry->d_name.name, dentry);
752 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
18bb1db3 753 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
2817b000
SW
754 op = CEPH_MDS_OP_MKDIR;
755 } else {
756 goto out;
757 }
758 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
759 if (IS_ERR(req)) {
760 err = PTR_ERR(req);
761 goto out;
762 }
763
764 req->r_dentry = dget(dentry);
765 req->r_num_caps = 2;
766 req->r_locked_dir = dir;
767 req->r_args.mkdir.mode = cpu_to_le32(mode);
768 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
769 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
770 err = ceph_mdsc_do_request(mdsc, dir, req);
771 if (!err && !req->r_reply_info.head->is_dentry)
772 err = ceph_handle_notrace_create(dir, dentry);
773 ceph_mdsc_put_request(req);
774out:
775 if (err < 0)
776 d_drop(dentry);
777 return err;
778}
779
780static int ceph_link(struct dentry *old_dentry, struct inode *dir,
781 struct dentry *dentry)
782{
3d14c5d2
YS
783 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
784 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
785 struct ceph_mds_request *req;
786 int err;
787
788 if (ceph_snap(dir) != CEPH_NOSNAP)
789 return -EROFS;
790
791 dout("link in dir %p old_dentry %p dentry %p\n", dir,
792 old_dentry, dentry);
793 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
794 if (IS_ERR(req)) {
795 d_drop(dentry);
796 return PTR_ERR(req);
797 }
798 req->r_dentry = dget(dentry);
799 req->r_num_caps = 2;
800 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
41b02e1f 801 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
2817b000
SW
802 req->r_locked_dir = dir;
803 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
804 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
ad88f23f
YZ
805 /* release LINK_SHARED on source inode (mds will lock it) */
806 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
2817b000 807 err = ceph_mdsc_do_request(mdsc, dir, req);
70b666c3 808 if (err) {
2817b000 809 d_drop(dentry);
70b666c3
SW
810 } else if (!req->r_reply_info.head->is_dentry) {
811 ihold(old_dentry->d_inode);
812 d_instantiate(dentry, old_dentry->d_inode);
813 }
2817b000
SW
814 ceph_mdsc_put_request(req);
815 return err;
816}
817
818/*
819 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
820 * looks like the link count will hit 0, drop any other caps (other
821 * than PIN) we don't specifically want (due to the file still being
822 * open).
823 */
824static int drop_caps_for_unlink(struct inode *inode)
825{
826 struct ceph_inode_info *ci = ceph_inode(inode);
827 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
828
be655596 829 spin_lock(&ci->i_ceph_lock);
2817b000
SW
830 if (inode->i_nlink == 1) {
831 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
832 ci->i_ceph_flags |= CEPH_I_NODELAY;
833 }
be655596 834 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
835 return drop;
836}
837
838/*
839 * rmdir and unlink are differ only by the metadata op code
840 */
841static int ceph_unlink(struct inode *dir, struct dentry *dentry)
842{
3d14c5d2
YS
843 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
844 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
845 struct inode *inode = dentry->d_inode;
846 struct ceph_mds_request *req;
847 int err = -EROFS;
848 int op;
849
850 if (ceph_snap(dir) == CEPH_SNAPDIR) {
851 /* rmdir .snap/foo is RMSNAP */
852 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
853 dentry->d_name.name, dentry);
854 op = CEPH_MDS_OP_RMSNAP;
855 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
856 dout("unlink/rmdir dir %p dn %p inode %p\n",
857 dir, dentry, inode);
dba19c60 858 op = S_ISDIR(dentry->d_inode->i_mode) ?
2817b000
SW
859 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
860 } else
861 goto out;
862 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
863 if (IS_ERR(req)) {
864 err = PTR_ERR(req);
865 goto out;
866 }
867 req->r_dentry = dget(dentry);
868 req->r_num_caps = 2;
869 req->r_locked_dir = dir;
870 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
871 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
872 req->r_inode_drop = drop_caps_for_unlink(inode);
873 err = ceph_mdsc_do_request(mdsc, dir, req);
874 if (!err && !req->r_reply_info.head->is_dentry)
875 d_delete(dentry);
876 ceph_mdsc_put_request(req);
877out:
878 return err;
879}
880
881static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
882 struct inode *new_dir, struct dentry *new_dentry)
883{
3d14c5d2
YS
884 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
885 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
886 struct ceph_mds_request *req;
887 int err;
888
889 if (ceph_snap(old_dir) != ceph_snap(new_dir))
890 return -EXDEV;
891 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
892 ceph_snap(new_dir) != CEPH_NOSNAP)
893 return -EROFS;
894 dout("rename dir %p dentry %p to dir %p dentry %p\n",
895 old_dir, old_dentry, new_dir, new_dentry);
896 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
897 if (IS_ERR(req))
898 return PTR_ERR(req);
899 req->r_dentry = dget(new_dentry);
900 req->r_num_caps = 2;
901 req->r_old_dentry = dget(old_dentry);
41b02e1f 902 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
2817b000
SW
903 req->r_locked_dir = new_dir;
904 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
905 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
906 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
907 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
908 /* release LINK_RDCACHE on source inode (mds will lock it) */
909 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
910 if (new_dentry->d_inode)
911 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
912 err = ceph_mdsc_do_request(mdsc, old_dir, req);
913 if (!err && !req->r_reply_info.head->is_dentry) {
914 /*
915 * Normally d_move() is done by fill_trace (called by
916 * do_request, above). If there is no trace, we need
917 * to do it here.
918 */
ea1409f9
SW
919
920 /* d_move screws up d_subdirs order */
2f276c51 921 ceph_dir_clear_complete(new_dir);
ea1409f9 922
2817b000 923 d_move(old_dentry, new_dentry);
ea1409f9
SW
924
925 /* ensure target dentry is invalidated, despite
926 rehashing bug in vfs_rename_dir */
81a6cf2d 927 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
928 }
929 ceph_mdsc_put_request(req);
930 return err;
931}
932
81a6cf2d
SW
933/*
934 * Ensure a dentry lease will no longer revalidate.
935 */
936void ceph_invalidate_dentry_lease(struct dentry *dentry)
937{
938 spin_lock(&dentry->d_lock);
939 dentry->d_time = jiffies;
940 ceph_dentry(dentry)->lease_shared_gen = 0;
941 spin_unlock(&dentry->d_lock);
942}
2817b000
SW
943
944/*
945 * Check if dentry lease is valid. If not, delete the lease. Try to
946 * renew if the least is more than half up.
947 */
948static int dentry_lease_is_valid(struct dentry *dentry)
949{
950 struct ceph_dentry_info *di;
951 struct ceph_mds_session *s;
952 int valid = 0;
953 u32 gen;
954 unsigned long ttl;
955 struct ceph_mds_session *session = NULL;
956 struct inode *dir = NULL;
957 u32 seq = 0;
958
959 spin_lock(&dentry->d_lock);
960 di = ceph_dentry(dentry);
3d8eb7a9 961 if (di->lease_session) {
2817b000 962 s = di->lease_session;
d8fb02ab 963 spin_lock(&s->s_gen_ttl_lock);
2817b000
SW
964 gen = s->s_cap_gen;
965 ttl = s->s_cap_ttl;
d8fb02ab 966 spin_unlock(&s->s_gen_ttl_lock);
2817b000
SW
967
968 if (di->lease_gen == gen &&
969 time_before(jiffies, dentry->d_time) &&
970 time_before(jiffies, ttl)) {
971 valid = 1;
972 if (di->lease_renew_after &&
973 time_after(jiffies, di->lease_renew_after)) {
974 /* we should renew */
975 dir = dentry->d_parent->d_inode;
976 session = ceph_get_mds_session(s);
977 seq = di->lease_seq;
978 di->lease_renew_after = 0;
979 di->lease_renew_from = jiffies;
980 }
2817b000
SW
981 }
982 }
983 spin_unlock(&dentry->d_lock);
984
985 if (session) {
986 ceph_mdsc_lease_send_msg(session, dir, dentry,
987 CEPH_MDS_LEASE_RENEW, seq);
988 ceph_put_mds_session(session);
989 }
990 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
991 return valid;
992}
993
994/*
995 * Check if directory-wide content lease/cap is valid.
996 */
997static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
998{
999 struct ceph_inode_info *ci = ceph_inode(dir);
1000 struct ceph_dentry_info *di = ceph_dentry(dentry);
1001 int valid = 0;
1002
be655596 1003 spin_lock(&ci->i_ceph_lock);
2817b000
SW
1004 if (ci->i_shared_gen == di->lease_shared_gen)
1005 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
be655596 1006 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
1007 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1008 dir, (unsigned)ci->i_shared_gen, dentry,
1009 (unsigned)di->lease_shared_gen, valid);
1010 return valid;
1011}
1012
1013/*
1014 * Check if cached dentry can be trusted.
1015 */
0b728e19 1016static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
2817b000 1017{
bf1c6aca 1018 int valid = 0;
34286d66
NP
1019 struct inode *dir;
1020
0b728e19 1021 if (flags & LOOKUP_RCU)
34286d66
NP
1022 return -ECHILD;
1023
1cd3935b
SW
1024 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1025 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1026 ceph_dentry(dentry)->offset);
2817b000 1027
bf1c6aca
SW
1028 dir = ceph_get_dentry_parent_inode(dentry);
1029
2817b000
SW
1030 /* always trust cached snapped dentries, snapdir dentry */
1031 if (ceph_snap(dir) != CEPH_NOSNAP) {
1032 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1033 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
bf1c6aca
SW
1034 valid = 1;
1035 } else if (dentry->d_inode &&
1036 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1037 valid = 1;
1038 } else if (dentry_lease_is_valid(dentry) ||
1039 dir_lease_is_valid(dir, dentry)) {
1040 valid = 1;
2817b000 1041 }
2817b000 1042
bf1c6aca
SW
1043 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1044 if (valid)
1045 ceph_dentry_lru_touch(dentry);
1046 else
1047 d_drop(dentry);
1048 iput(dir);
1049 return valid;
2817b000
SW
1050}
1051
1052/*
147851d2 1053 * Release our ceph_dentry_info.
2817b000 1054 */
147851d2 1055static void ceph_d_release(struct dentry *dentry)
2817b000
SW
1056{
1057 struct ceph_dentry_info *di = ceph_dentry(dentry);
2817b000 1058
147851d2 1059 dout("d_release %p\n", dentry);
3d8eb7a9
SW
1060 ceph_dentry_lru_del(dentry);
1061 if (di->lease_session)
1062 ceph_put_mds_session(di->lease_session);
1063 kmem_cache_free(ceph_dentry_cachep, di);
1064 dentry->d_fsdata = NULL;
2817b000
SW
1065}
1066
1067static int ceph_snapdir_d_revalidate(struct dentry *dentry,
0b728e19 1068 unsigned int flags)
2817b000
SW
1069{
1070 /*
1071 * Eventually, we'll want to revalidate snapped metadata
1072 * too... probably...
1073 */
1074 return 1;
1075}
1076
b58dc410
SW
1077/*
1078 * When the VFS prunes a dentry from the cache, we need to clear the
1079 * complete flag on the parent directory.
1080 *
1081 * Called under dentry->d_lock.
1082 */
1083static void ceph_d_prune(struct dentry *dentry)
1084{
774ac21d 1085 dout("ceph_d_prune %p\n", dentry);
b58dc410
SW
1086
1087 /* do we have a valid parent? */
8842b3be 1088 if (IS_ROOT(dentry))
b58dc410
SW
1089 return;
1090
2f276c51 1091 /* if we are not hashed, we don't affect dir's completeness */
b58dc410
SW
1092 if (d_unhashed(dentry))
1093 return;
2817b000 1094
b58dc410
SW
1095 /*
1096 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1097 * cleared until d_release
1098 */
2f276c51 1099 ceph_dir_clear_complete(dentry->d_parent->d_inode);
b58dc410 1100}
2817b000
SW
1101
1102/*
1103 * read() on a dir. This weird interface hack only works if mounted
1104 * with '-o dirstat'.
1105 */
1106static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1107 loff_t *ppos)
1108{
1109 struct ceph_file_info *cf = file->private_data;
496ad9aa 1110 struct inode *inode = file_inode(file);
2817b000
SW
1111 struct ceph_inode_info *ci = ceph_inode(inode);
1112 int left;
ae598083 1113 const int bufsize = 1024;
2817b000 1114
3d14c5d2 1115 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1116 return -EISDIR;
1117
1118 if (!cf->dir_info) {
ae598083 1119 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
2817b000
SW
1120 if (!cf->dir_info)
1121 return -ENOMEM;
1122 cf->dir_info_len =
ae598083 1123 snprintf(cf->dir_info, bufsize,
2817b000
SW
1124 "entries: %20lld\n"
1125 " files: %20lld\n"
1126 " subdirs: %20lld\n"
1127 "rentries: %20lld\n"
1128 " rfiles: %20lld\n"
1129 " rsubdirs: %20lld\n"
1130 "rbytes: %20lld\n"
1131 "rctime: %10ld.%09ld\n",
1132 ci->i_files + ci->i_subdirs,
1133 ci->i_files,
1134 ci->i_subdirs,
1135 ci->i_rfiles + ci->i_rsubdirs,
1136 ci->i_rfiles,
1137 ci->i_rsubdirs,
1138 ci->i_rbytes,
1139 (long)ci->i_rctime.tv_sec,
1140 (long)ci->i_rctime.tv_nsec);
1141 }
1142
1143 if (*ppos >= cf->dir_info_len)
1144 return 0;
1145 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1146 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1147 if (left == size)
1148 return -EFAULT;
1149 *ppos += (size - left);
1150 return size - left;
1151}
1152
1153/*
1154 * an fsync() on a dir will wait for any uncommitted directory
1155 * operations to commit.
1156 */
02c24a82
JB
1157static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1158 int datasync)
2817b000 1159{
496ad9aa 1160 struct inode *inode = file_inode(file);
2817b000
SW
1161 struct ceph_inode_info *ci = ceph_inode(inode);
1162 struct list_head *head = &ci->i_unsafe_dirops;
1163 struct ceph_mds_request *req;
1164 u64 last_tid;
1165 int ret = 0;
1166
1167 dout("dir_fsync %p\n", inode);
02c24a82
JB
1168 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1169 if (ret)
1170 return ret;
1171 mutex_lock(&inode->i_mutex);
1172
2817b000
SW
1173 spin_lock(&ci->i_unsafe_lock);
1174 if (list_empty(head))
1175 goto out;
1176
1177 req = list_entry(head->prev,
1178 struct ceph_mds_request, r_unsafe_dir_item);
1179 last_tid = req->r_tid;
1180
1181 do {
1182 ceph_mdsc_get_request(req);
1183 spin_unlock(&ci->i_unsafe_lock);
2ff179e6 1184
2817b000
SW
1185 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1186 inode, req->r_tid, last_tid);
1187 if (req->r_timeout) {
1188 ret = wait_for_completion_timeout(
1189 &req->r_safe_completion, req->r_timeout);
1190 if (ret > 0)
1191 ret = 0;
1192 else if (ret == 0)
1193 ret = -EIO; /* timed out */
1194 } else {
1195 wait_for_completion(&req->r_safe_completion);
1196 }
2817b000
SW
1197 ceph_mdsc_put_request(req);
1198
2ff179e6 1199 spin_lock(&ci->i_unsafe_lock);
2817b000
SW
1200 if (ret || list_empty(head))
1201 break;
1202 req = list_entry(head->next,
1203 struct ceph_mds_request, r_unsafe_dir_item);
1204 } while (req->r_tid < last_tid);
1205out:
1206 spin_unlock(&ci->i_unsafe_lock);
02c24a82
JB
1207 mutex_unlock(&inode->i_mutex);
1208
2817b000
SW
1209 return ret;
1210}
1211
1212/*
1213 * We maintain a private dentry LRU.
1214 *
1215 * FIXME: this needs to be changed to a per-mds lru to be useful.
1216 */
1217void ceph_dentry_lru_add(struct dentry *dn)
1218{
1219 struct ceph_dentry_info *di = ceph_dentry(dn);
1220 struct ceph_mds_client *mdsc;
2817b000 1221
04a419f9
SW
1222 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1223 dn->d_name.len, dn->d_name.name);
3d8eb7a9
SW
1224 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1225 spin_lock(&mdsc->dentry_lru_lock);
1226 list_add_tail(&di->lru, &mdsc->dentry_lru);
1227 mdsc->num_dentry++;
1228 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1229}
1230
1231void ceph_dentry_lru_touch(struct dentry *dn)
1232{
1233 struct ceph_dentry_info *di = ceph_dentry(dn);
1234 struct ceph_mds_client *mdsc;
2817b000 1235
1cd3935b
SW
1236 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1237 dn->d_name.len, dn->d_name.name, di->offset);
3d8eb7a9
SW
1238 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1239 spin_lock(&mdsc->dentry_lru_lock);
1240 list_move_tail(&di->lru, &mdsc->dentry_lru);
1241 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1242}
1243
1244void ceph_dentry_lru_del(struct dentry *dn)
1245{
1246 struct ceph_dentry_info *di = ceph_dentry(dn);
1247 struct ceph_mds_client *mdsc;
1248
04a419f9
SW
1249 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1250 dn->d_name.len, dn->d_name.name);
3d8eb7a9
SW
1251 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1252 spin_lock(&mdsc->dentry_lru_lock);
1253 list_del_init(&di->lru);
1254 mdsc->num_dentry--;
1255 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1256}
1257
6c0f3af7
SW
1258/*
1259 * Return name hash for a given dentry. This is dependent on
1260 * the parent directory's hash function.
1261 */
e5f86dc3 1262unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
6c0f3af7 1263{
6c0f3af7
SW
1264 struct ceph_inode_info *dci = ceph_inode(dir);
1265
1266 switch (dci->i_dir_layout.dl_dir_hash) {
1267 case 0: /* for backward compat */
1268 case CEPH_STR_HASH_LINUX:
1269 return dn->d_name.hash;
1270
1271 default:
1272 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1273 dn->d_name.name, dn->d_name.len);
1274 }
1275}
1276
2817b000
SW
1277const struct file_operations ceph_dir_fops = {
1278 .read = ceph_read_dir,
77acfa29 1279 .iterate = ceph_readdir,
2817b000
SW
1280 .llseek = ceph_dir_llseek,
1281 .open = ceph_open,
1282 .release = ceph_release,
1283 .unlocked_ioctl = ceph_ioctl,
1284 .fsync = ceph_dir_fsync,
1285};
1286
1287const struct inode_operations ceph_dir_iops = {
1288 .lookup = ceph_lookup,
1289 .permission = ceph_permission,
1290 .getattr = ceph_getattr,
1291 .setattr = ceph_setattr,
1292 .setxattr = ceph_setxattr,
1293 .getxattr = ceph_getxattr,
1294 .listxattr = ceph_listxattr,
1295 .removexattr = ceph_removexattr,
1296 .mknod = ceph_mknod,
1297 .symlink = ceph_symlink,
1298 .mkdir = ceph_mkdir,
1299 .link = ceph_link,
1300 .unlink = ceph_unlink,
1301 .rmdir = ceph_unlink,
1302 .rename = ceph_rename,
1303 .create = ceph_create,
2d83bde9 1304 .atomic_open = ceph_atomic_open,
2817b000
SW
1305};
1306
52dfb8ac 1307const struct dentry_operations ceph_dentry_ops = {
2817b000 1308 .d_revalidate = ceph_d_revalidate,
147851d2 1309 .d_release = ceph_d_release,
b58dc410 1310 .d_prune = ceph_d_prune,
2817b000
SW
1311};
1312
52dfb8ac 1313const struct dentry_operations ceph_snapdir_dentry_ops = {
2817b000 1314 .d_revalidate = ceph_snapdir_d_revalidate,
147851d2 1315 .d_release = ceph_d_release,
2817b000
SW
1316};
1317
52dfb8ac 1318const struct dentry_operations ceph_snap_dentry_ops = {
147851d2 1319 .d_release = ceph_d_release,
b58dc410 1320 .d_prune = ceph_d_prune,
2817b000 1321};