]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
ceph: set mds_want according to cap import message
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
2817b000
SW
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
3d14c5d2 10#include "mds_client.h"
2817b000
SW
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29const struct inode_operations ceph_dir_iops;
30const struct file_operations ceph_dir_fops;
52dfb8ac 31const struct dentry_operations ceph_dentry_ops;
2817b000
SW
32
33/*
34 * Initialize ceph dentry state.
35 */
36int ceph_init_dentry(struct dentry *dentry)
37{
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
36e21687 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
2817b000
SW
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
8c6efb58
SW
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 51 goto out_unlock;
8c6efb58 52 }
48d0cbd1 53
8842b3be 54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
48d0cbd1
SW
55 d_set_d_op(dentry, &ceph_dentry_ops);
56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
58 else
59 d_set_d_op(dentry, &ceph_snap_dentry_ops);
60
2817b000
SW
61 di->dentry = dentry;
62 di->lease_session = NULL;
2817b000 63 dentry->d_time = jiffies;
48d0cbd1
SW
64 /* avoid reordering d_fsdata setup so that the check above is safe */
65 smp_mb();
66 dentry->d_fsdata = di;
2817b000
SW
67 ceph_dentry_lru_add(dentry);
68out_unlock:
69 spin_unlock(&dentry->d_lock);
70 return 0;
71}
72
5f21c96d
SW
73struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
74{
75 struct inode *inode = NULL;
76
77 if (!dentry)
78 return NULL;
79
80 spin_lock(&dentry->d_lock);
8842b3be 81 if (!IS_ROOT(dentry)) {
5f21c96d
SW
82 inode = dentry->d_parent->d_inode;
83 ihold(inode);
84 }
85 spin_unlock(&dentry->d_lock);
86 return inode;
87}
2817b000
SW
88
89
90/*
91 * for readdir, we encode the directory frag and offset within that
92 * frag into f_pos.
93 */
94static unsigned fpos_frag(loff_t p)
95{
96 return p >> 32;
97}
98static unsigned fpos_off(loff_t p)
99{
100 return p & 0xffffffff;
101}
102
103/*
104 * When possible, we try to satisfy a readdir by peeking at the
105 * dcache. We make this work by carefully ordering dentries on
106 * d_u.d_child when we initially get results back from the MDS, and
107 * falling back to a "normal" sync readdir if any dentries in the dir
108 * are dropped.
109 *
c6ffe100 110 * D_COMPLETE tells indicates we have all dentries in the dir. It is
2817b000
SW
111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
112 * the MDS if/when the directory is modified).
113 */
114static int __dcache_readdir(struct file *filp,
115 void *dirent, filldir_t filldir)
116{
2817b000
SW
117 struct ceph_file_info *fi = filp->private_data;
118 struct dentry *parent = filp->f_dentry;
119 struct inode *dir = parent->d_inode;
120 struct list_head *p;
121 struct dentry *dentry, *last;
122 struct ceph_dentry_info *di;
123 int err = 0;
124
125 /* claim ref on last dentry we returned */
126 last = fi->dentry;
127 fi->dentry = NULL;
128
129 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
130 last);
131
2fd6b7f5 132 spin_lock(&parent->d_lock);
2817b000
SW
133
134 /* start at beginning? */
884ea892
SW
135 if (filp->f_pos == 2 || last == NULL ||
136 filp->f_pos < ceph_dentry(last)->offset) {
2817b000
SW
137 if (list_empty(&parent->d_subdirs))
138 goto out_unlock;
139 p = parent->d_subdirs.prev;
140 dout(" initial p %p/%p\n", p->prev, p->next);
141 } else {
142 p = last->d_u.d_child.prev;
143 }
144
145more:
146 dentry = list_entry(p, struct dentry, d_u.d_child);
147 di = ceph_dentry(dentry);
148 while (1) {
1cd3935b
SW
149 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
150 d_unhashed(dentry) ? "!hashed" : "hashed",
2817b000
SW
151 parent->d_subdirs.prev, parent->d_subdirs.next);
152 if (p == &parent->d_subdirs) {
9cfa1098 153 fi->flags |= CEPH_F_ATEND;
2817b000
SW
154 goto out_unlock;
155 }
2fd6b7f5 156 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2817b000 157 if (!d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 158 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 159 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
2817b000
SW
160 filp->f_pos <= di->offset)
161 break;
162 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
163 dentry->d_name.len, dentry->d_name.name, di->offset,
164 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
165 !dentry->d_inode ? " null" : "");
da502956 166 spin_unlock(&dentry->d_lock);
2817b000
SW
167 p = p->prev;
168 dentry = list_entry(p, struct dentry, d_u.d_child);
169 di = ceph_dentry(dentry);
170 }
171
da502956 172 dget_dlock(dentry);
b7ab39f6 173 spin_unlock(&dentry->d_lock);
2fd6b7f5 174 spin_unlock(&parent->d_lock);
2817b000
SW
175
176 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
177 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
178 filp->f_pos = di->offset;
179 err = filldir(dirent, dentry->d_name.name,
180 dentry->d_name.len, di->offset,
ad1fee96 181 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
2817b000
SW
182 dentry->d_inode->i_mode >> 12);
183
184 if (last) {
185 if (err < 0) {
186 /* remember our position */
187 fi->dentry = last;
188 fi->next_offset = di->offset;
189 } else {
190 dput(last);
191 }
2817b000 192 }
f5b06628
SW
193 last = dentry;
194
2817b000 195 if (err < 0)
efa4c120 196 goto out;
2817b000 197
2817b000
SW
198 filp->f_pos++;
199
b5c84bf6 200 /* make sure a dentry wasn't dropped while we didn't have parent lock */
c6ffe100
SW
201 if (!ceph_dir_test_complete(dir)) {
202 dout(" lost D_COMPLETE on %p; falling back to mds\n", dir);
efa4c120
SW
203 err = -EAGAIN;
204 goto out;
205 }
206
2fd6b7f5 207 spin_lock(&parent->d_lock);
efa4c120
SW
208 p = p->prev; /* advance to next dentry */
209 goto more;
2817b000
SW
210
211out_unlock:
2fd6b7f5 212 spin_unlock(&parent->d_lock);
efa4c120
SW
213out:
214 if (last)
2817b000 215 dput(last);
2817b000
SW
216 return err;
217}
218
219/*
220 * make note of the last dentry we read, so we can
221 * continue at the same lexicographical point,
222 * regardless of what dir changes take place on the
223 * server.
224 */
225static int note_last_dentry(struct ceph_file_info *fi, const char *name,
226 int len)
227{
228 kfree(fi->last_name);
229 fi->last_name = kmalloc(len+1, GFP_NOFS);
230 if (!fi->last_name)
231 return -ENOMEM;
232 memcpy(fi->last_name, name, len);
233 fi->last_name[len] = 0;
234 dout("note_last_dentry '%s'\n", fi->last_name);
235 return 0;
236}
237
238static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
239{
240 struct ceph_file_info *fi = filp->private_data;
496ad9aa 241 struct inode *inode = file_inode(filp);
2817b000 242 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
243 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
244 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
245 unsigned frag = fpos_frag(filp->f_pos);
246 int off = fpos_off(filp->f_pos);
247 int err;
248 u32 ftype;
249 struct ceph_mds_reply_info_parsed *rinfo;
3d14c5d2
YS
250 const int max_entries = fsc->mount_options->max_readdir;
251 const int max_bytes = fsc->mount_options->max_readdir_bytes;
2817b000
SW
252
253 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
9cfa1098 254 if (fi->flags & CEPH_F_ATEND)
2817b000
SW
255 return 0;
256
257 /* always start with . and .. */
258 if (filp->f_pos == 0) {
259 /* note dir version at start of readdir so we can tell
260 * if any dentries get dropped */
261 fi->dir_release_count = ci->i_release_count;
262
263 dout("readdir off 0 -> '.'\n");
264 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
ad1fee96
YS
265 ceph_translate_ino(inode->i_sb, inode->i_ino),
266 inode->i_mode >> 12) < 0)
2817b000
SW
267 return 0;
268 filp->f_pos = 1;
269 off = 1;
270 }
271 if (filp->f_pos == 1) {
b85fd6bd 272 ino_t ino = parent_ino(filp->f_dentry);
2817b000
SW
273 dout("readdir off 1 -> '..'\n");
274 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
ad1fee96 275 ceph_translate_ino(inode->i_sb, ino),
2817b000
SW
276 inode->i_mode >> 12) < 0)
277 return 0;
278 filp->f_pos = 2;
279 off = 2;
280 }
281
282 /* can we use the dcache? */
be655596 283 spin_lock(&ci->i_ceph_lock);
2817b000 284 if ((filp->f_pos == 2 || fi->dentry) &&
3d14c5d2 285 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
a0dff78d 286 ceph_snap(inode) != CEPH_SNAPDIR &&
c6ffe100 287 ceph_dir_test_complete(inode) &&
2817b000 288 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
be655596 289 spin_unlock(&ci->i_ceph_lock);
2817b000 290 err = __dcache_readdir(filp, dirent, filldir);
efa4c120 291 if (err != -EAGAIN)
2817b000 292 return err;
efa4c120 293 } else {
be655596 294 spin_unlock(&ci->i_ceph_lock);
2817b000 295 }
2817b000
SW
296 if (fi->dentry) {
297 err = note_last_dentry(fi, fi->dentry->d_name.name,
298 fi->dentry->d_name.len);
299 if (err)
300 return err;
301 dput(fi->dentry);
302 fi->dentry = NULL;
303 }
304
305 /* proceed with a normal readdir */
306
307more:
308 /* do we have the correct frag content buffered? */
309 if (fi->frag != frag || fi->last_readdir == NULL) {
310 struct ceph_mds_request *req;
311 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
312 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
313
314 /* discard old result, if any */
393f6620 315 if (fi->last_readdir) {
2817b000 316 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
317 fi->last_readdir = NULL;
318 }
2817b000
SW
319
320 /* requery frag tree, as the frag topology may have changed */
321 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
322
323 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
324 ceph_vinop(inode), frag, fi->last_name);
325 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
326 if (IS_ERR(req))
327 return PTR_ERR(req);
70b666c3
SW
328 req->r_inode = inode;
329 ihold(inode);
2817b000
SW
330 req->r_dentry = dget(filp->f_dentry);
331 /* hints to request -> mds selection code */
332 req->r_direct_mode = USE_AUTH_MDS;
333 req->r_direct_hash = ceph_frag_value(frag);
334 req->r_direct_is_hash = true;
335 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
336 req->r_readdir_offset = fi->next_offset;
337 req->r_args.readdir.frag = cpu_to_le32(frag);
338 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
23804d91 339 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
e1e4dd0c 340 req->r_num_caps = max_entries + 1;
2817b000
SW
341 err = ceph_mdsc_do_request(mdsc, NULL, req);
342 if (err < 0) {
343 ceph_mdsc_put_request(req);
344 return err;
345 }
346 dout("readdir got and parsed readdir result=%d"
347 " on frag %x, end=%d, complete=%d\n", err, frag,
348 (int)req->r_reply_info.dir_end,
349 (int)req->r_reply_info.dir_complete);
350
351 if (!req->r_did_prepopulate) {
352 dout("readdir !did_prepopulate");
c6ffe100 353 fi->dir_release_count--; /* preclude D_COMPLETE */
2817b000
SW
354 }
355
356 /* note next offset and last dentry name */
357 fi->offset = fi->next_offset;
358 fi->last_readdir = req;
359
360 if (req->r_reply_info.dir_end) {
361 kfree(fi->last_name);
362 fi->last_name = NULL;
7b88dadc
SW
363 if (ceph_frag_is_rightmost(frag))
364 fi->next_offset = 2;
365 else
366 fi->next_offset = 0;
2817b000
SW
367 } else {
368 rinfo = &req->r_reply_info;
369 err = note_last_dentry(fi,
370 rinfo->dir_dname[rinfo->dir_nr-1],
371 rinfo->dir_dname_len[rinfo->dir_nr-1]);
372 if (err)
373 return err;
374 fi->next_offset += rinfo->dir_nr;
375 }
376 }
377
378 rinfo = &fi->last_readdir->r_reply_info;
379 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
380 rinfo->dir_nr, off, fi->offset);
da39822c 381 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
2817b000
SW
382 u64 pos = ceph_make_fpos(frag, off);
383 struct ceph_mds_reply_inode *in =
384 rinfo->dir_in[off - fi->offset].in;
3105c19c
SW
385 struct ceph_vino vino;
386 ino_t ino;
387
2817b000
SW
388 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
389 off, off - fi->offset, rinfo->dir_nr, pos,
390 rinfo->dir_dname_len[off - fi->offset],
391 rinfo->dir_dname[off - fi->offset], in);
392 BUG_ON(!in);
393 ftype = le32_to_cpu(in->mode) >> 12;
3105c19c
SW
394 vino.ino = le64_to_cpu(in->ino);
395 vino.snap = le64_to_cpu(in->snapid);
396 ino = ceph_vino_to_ino(vino);
2817b000
SW
397 if (filldir(dirent,
398 rinfo->dir_dname[off - fi->offset],
399 rinfo->dir_dname_len[off - fi->offset],
ad1fee96
YS
400 pos,
401 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
2817b000
SW
402 dout("filldir stopping us...\n");
403 return 0;
404 }
405 off++;
406 filp->f_pos = pos + 1;
407 }
408
409 if (fi->last_name) {
410 ceph_mdsc_put_request(fi->last_readdir);
411 fi->last_readdir = NULL;
412 goto more;
413 }
414
415 /* more frags? */
416 if (!ceph_frag_is_rightmost(frag)) {
417 frag = ceph_frag_next(frag);
418 off = 0;
419 filp->f_pos = ceph_make_fpos(frag, off);
420 dout("readdir next frag is %x\n", frag);
421 goto more;
422 }
9cfa1098 423 fi->flags |= CEPH_F_ATEND;
2817b000
SW
424
425 /*
426 * if dir_release_count still matches the dir, no dentries
427 * were released during the whole readdir, and we should have
428 * the complete dir contents in our cache.
429 */
be655596 430 spin_lock(&ci->i_ceph_lock);
2817b000 431 if (ci->i_release_count == fi->dir_release_count) {
c6ffe100 432 ceph_dir_set_complete(inode);
2817b000
SW
433 ci->i_max_offset = filp->f_pos;
434 }
be655596 435 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
436
437 dout("readdir %p filp %p done.\n", inode, filp);
438 return 0;
439}
440
441static void reset_readdir(struct ceph_file_info *fi)
442{
443 if (fi->last_readdir) {
444 ceph_mdsc_put_request(fi->last_readdir);
445 fi->last_readdir = NULL;
446 }
447 kfree(fi->last_name);
a1629c3b 448 fi->last_name = NULL;
2817b000
SW
449 fi->next_offset = 2; /* compensate for . and .. */
450 if (fi->dentry) {
451 dput(fi->dentry);
452 fi->dentry = NULL;
453 }
9cfa1098 454 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
455}
456
965c8e59 457static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
2817b000
SW
458{
459 struct ceph_file_info *fi = file->private_data;
460 struct inode *inode = file->f_mapping->host;
461 loff_t old_offset = offset;
462 loff_t retval;
463
464 mutex_lock(&inode->i_mutex);
06222e49 465 retval = -EINVAL;
965c8e59 466 switch (whence) {
2817b000
SW
467 case SEEK_END:
468 offset += inode->i_size + 2; /* FIXME */
469 break;
470 case SEEK_CUR:
471 offset += file->f_pos;
06222e49
JB
472 case SEEK_SET:
473 break;
474 default:
475 goto out;
2817b000 476 }
06222e49 477
2817b000
SW
478 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
479 if (offset != file->f_pos) {
480 file->f_pos = offset;
481 file->f_version = 0;
9cfa1098 482 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
483 }
484 retval = offset;
485
486 /*
487 * discard buffered readdir content on seekdir(0), or
488 * seek to new frag, or seek prior to current chunk.
489 */
490 if (offset == 0 ||
491 fpos_frag(offset) != fpos_frag(old_offset) ||
492 fpos_off(offset) < fi->offset) {
493 dout("dir_llseek dropping %p content\n", file);
494 reset_readdir(fi);
495 }
496
497 /* bump dir_release_count if we did a forward seek */
498 if (offset > old_offset)
499 fi->dir_release_count--;
500 }
06222e49 501out:
2817b000
SW
502 mutex_unlock(&inode->i_mutex);
503 return retval;
504}
505
506/*
468640e3 507 * Handle lookups for the hidden .snap directory.
2817b000 508 */
468640e3
SW
509int ceph_handle_snapdir(struct ceph_mds_request *req,
510 struct dentry *dentry, int err)
2817b000 511{
3d14c5d2 512 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
d79698da 513 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
2817b000
SW
514
515 /* .snap dir? */
516 if (err == -ENOENT &&
455cec0a 517 ceph_snap(parent) == CEPH_NOSNAP &&
6b805185 518 strcmp(dentry->d_name.name,
3d14c5d2 519 fsc->mount_options->snapdir_name) == 0) {
2817b000
SW
520 struct inode *inode = ceph_get_snapdir(parent);
521 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
522 dentry, dentry->d_name.len, dentry->d_name.name, inode);
9358c6d4 523 BUG_ON(!d_unhashed(dentry));
2817b000
SW
524 d_add(dentry, inode);
525 err = 0;
526 }
468640e3
SW
527 return err;
528}
2817b000 529
468640e3
SW
530/*
531 * Figure out final result of a lookup/open request.
532 *
533 * Mainly, make sure we return the final req->r_dentry (if it already
534 * existed) in place of the original VFS-provided dentry when they
535 * differ.
536 *
537 * Gracefully handle the case where the MDS replies with -ENOENT and
538 * no trace (which it may do, at its discretion, e.g., if it doesn't
539 * care to issue a lease on the negative dentry).
540 */
541struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
542 struct dentry *dentry, int err)
543{
2817b000
SW
544 if (err == -ENOENT) {
545 /* no trace? */
546 err = 0;
547 if (!req->r_reply_info.head->is_dentry) {
548 dout("ENOENT and no trace, dentry %p inode %p\n",
549 dentry, dentry->d_inode);
550 if (dentry->d_inode) {
551 d_drop(dentry);
552 err = -ENOENT;
553 } else {
554 d_add(dentry, NULL);
555 }
556 }
557 }
558 if (err)
559 dentry = ERR_PTR(err);
560 else if (dentry != req->r_dentry)
561 dentry = dget(req->r_dentry); /* we got spliced */
562 else
563 dentry = NULL;
564 return dentry;
565}
566
1d1de916
SW
567static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
568{
569 return ceph_ino(inode) == CEPH_INO_ROOT &&
570 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
571}
572
2817b000
SW
573/*
574 * Look up a single dir entry. If there is a lookup intent, inform
575 * the MDS so that it gets our 'caps wanted' value in a single op.
576 */
577static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
00cd8dd3 578 unsigned int flags)
2817b000 579{
3d14c5d2
YS
580 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
581 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
582 struct ceph_mds_request *req;
583 int op;
584 int err;
585
586 dout("lookup %p dentry %p '%.*s'\n",
587 dir, dentry, dentry->d_name.len, dentry->d_name.name);
588
589 if (dentry->d_name.len > NAME_MAX)
590 return ERR_PTR(-ENAMETOOLONG);
591
592 err = ceph_init_dentry(dentry);
593 if (err < 0)
594 return ERR_PTR(err);
595
2817b000
SW
596 /* can we conclude ENOENT locally? */
597 if (dentry->d_inode == NULL) {
598 struct ceph_inode_info *ci = ceph_inode(dir);
599 struct ceph_dentry_info *di = ceph_dentry(dentry);
600
be655596 601 spin_lock(&ci->i_ceph_lock);
2817b000
SW
602 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
603 if (strncmp(dentry->d_name.name,
3d14c5d2 604 fsc->mount_options->snapdir_name,
2817b000 605 dentry->d_name.len) &&
1d1de916 606 !is_root_ceph_dentry(dir, dentry) &&
c6ffe100 607 ceph_dir_test_complete(dir) &&
2817b000 608 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
be655596 609 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
610 dout(" dir %p complete, -ENOENT\n", dir);
611 d_add(dentry, NULL);
612 di->lease_shared_gen = ci->i_shared_gen;
613 return NULL;
614 }
be655596 615 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
616 }
617
618 op = ceph_snap(dir) == CEPH_SNAPDIR ?
619 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
620 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
621 if (IS_ERR(req))
7e34bc52 622 return ERR_CAST(req);
2817b000
SW
623 req->r_dentry = dget(dentry);
624 req->r_num_caps = 2;
625 /* we only need inode linkage */
626 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
627 req->r_locked_dir = dir;
628 err = ceph_mdsc_do_request(mdsc, NULL, req);
468640e3 629 err = ceph_handle_snapdir(req, dentry, err);
2817b000
SW
630 dentry = ceph_finish_lookup(req, dentry, err);
631 ceph_mdsc_put_request(req); /* will dput(dentry) */
632 dout("lookup result=%p\n", dentry);
633 return dentry;
634}
635
636/*
637 * If we do a create but get no trace back from the MDS, follow up with
638 * a lookup (the VFS expects us to link up the provided dentry).
639 */
640int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
641{
00cd8dd3 642 struct dentry *result = ceph_lookup(dir, dentry, 0);
2817b000
SW
643
644 if (result && !IS_ERR(result)) {
645 /*
646 * We created the item, then did a lookup, and found
647 * it was already linked to another inode we already
648 * had in our cache (and thus got spliced). Link our
649 * dentry to that inode, but don't hash it, just in
650 * case the VFS wants to dereference it.
651 */
652 BUG_ON(!result->d_inode);
653 d_instantiate(dentry, result->d_inode);
654 return 0;
655 }
656 return PTR_ERR(result);
657}
658
659static int ceph_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 660 umode_t mode, dev_t rdev)
2817b000 661{
3d14c5d2
YS
662 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
663 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
664 struct ceph_mds_request *req;
665 int err;
666
667 if (ceph_snap(dir) != CEPH_NOSNAP)
668 return -EROFS;
669
1a67aafb 670 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
2817b000
SW
671 dir, dentry, mode, rdev);
672 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
673 if (IS_ERR(req)) {
674 d_drop(dentry);
675 return PTR_ERR(req);
676 }
677 req->r_dentry = dget(dentry);
678 req->r_num_caps = 2;
679 req->r_locked_dir = dir;
680 req->r_args.mknod.mode = cpu_to_le32(mode);
681 req->r_args.mknod.rdev = cpu_to_le32(rdev);
682 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
683 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
684 err = ceph_mdsc_do_request(mdsc, dir, req);
685 if (!err && !req->r_reply_info.head->is_dentry)
686 err = ceph_handle_notrace_create(dir, dentry);
687 ceph_mdsc_put_request(req);
688 if (err)
689 d_drop(dentry);
690 return err;
691}
692
4acdaf27 693static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
ebfc3b49 694 bool excl)
2817b000 695{
2d83bde9 696 return ceph_mknod(dir, dentry, mode, 0);
2817b000
SW
697}
698
699static int ceph_symlink(struct inode *dir, struct dentry *dentry,
700 const char *dest)
701{
3d14c5d2
YS
702 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
703 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
704 struct ceph_mds_request *req;
705 int err;
706
707 if (ceph_snap(dir) != CEPH_NOSNAP)
708 return -EROFS;
709
710 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
711 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
712 if (IS_ERR(req)) {
713 d_drop(dentry);
714 return PTR_ERR(req);
715 }
716 req->r_dentry = dget(dentry);
717 req->r_num_caps = 2;
718 req->r_path2 = kstrdup(dest, GFP_NOFS);
719 req->r_locked_dir = dir;
720 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
721 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
722 err = ceph_mdsc_do_request(mdsc, dir, req);
723 if (!err && !req->r_reply_info.head->is_dentry)
724 err = ceph_handle_notrace_create(dir, dentry);
725 ceph_mdsc_put_request(req);
726 if (err)
727 d_drop(dentry);
728 return err;
729}
730
18bb1db3 731static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2817b000 732{
3d14c5d2
YS
733 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
734 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
735 struct ceph_mds_request *req;
736 int err = -EROFS;
737 int op;
738
739 if (ceph_snap(dir) == CEPH_SNAPDIR) {
740 /* mkdir .snap/foo is a MKSNAP */
741 op = CEPH_MDS_OP_MKSNAP;
742 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
743 dentry->d_name.len, dentry->d_name.name, dentry);
744 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
18bb1db3 745 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
2817b000
SW
746 op = CEPH_MDS_OP_MKDIR;
747 } else {
748 goto out;
749 }
750 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
751 if (IS_ERR(req)) {
752 err = PTR_ERR(req);
753 goto out;
754 }
755
756 req->r_dentry = dget(dentry);
757 req->r_num_caps = 2;
758 req->r_locked_dir = dir;
759 req->r_args.mkdir.mode = cpu_to_le32(mode);
760 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
761 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
762 err = ceph_mdsc_do_request(mdsc, dir, req);
763 if (!err && !req->r_reply_info.head->is_dentry)
764 err = ceph_handle_notrace_create(dir, dentry);
765 ceph_mdsc_put_request(req);
766out:
767 if (err < 0)
768 d_drop(dentry);
769 return err;
770}
771
772static int ceph_link(struct dentry *old_dentry, struct inode *dir,
773 struct dentry *dentry)
774{
3d14c5d2
YS
775 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
776 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
777 struct ceph_mds_request *req;
778 int err;
779
780 if (ceph_snap(dir) != CEPH_NOSNAP)
781 return -EROFS;
782
783 dout("link in dir %p old_dentry %p dentry %p\n", dir,
784 old_dentry, dentry);
785 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
786 if (IS_ERR(req)) {
787 d_drop(dentry);
788 return PTR_ERR(req);
789 }
790 req->r_dentry = dget(dentry);
791 req->r_num_caps = 2;
792 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
41b02e1f 793 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
2817b000
SW
794 req->r_locked_dir = dir;
795 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
796 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
797 err = ceph_mdsc_do_request(mdsc, dir, req);
70b666c3 798 if (err) {
2817b000 799 d_drop(dentry);
70b666c3
SW
800 } else if (!req->r_reply_info.head->is_dentry) {
801 ihold(old_dentry->d_inode);
802 d_instantiate(dentry, old_dentry->d_inode);
803 }
2817b000
SW
804 ceph_mdsc_put_request(req);
805 return err;
806}
807
808/*
809 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
810 * looks like the link count will hit 0, drop any other caps (other
811 * than PIN) we don't specifically want (due to the file still being
812 * open).
813 */
814static int drop_caps_for_unlink(struct inode *inode)
815{
816 struct ceph_inode_info *ci = ceph_inode(inode);
817 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
818
be655596 819 spin_lock(&ci->i_ceph_lock);
2817b000
SW
820 if (inode->i_nlink == 1) {
821 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
822 ci->i_ceph_flags |= CEPH_I_NODELAY;
823 }
be655596 824 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
825 return drop;
826}
827
828/*
829 * rmdir and unlink are differ only by the metadata op code
830 */
831static int ceph_unlink(struct inode *dir, struct dentry *dentry)
832{
3d14c5d2
YS
833 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
834 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
835 struct inode *inode = dentry->d_inode;
836 struct ceph_mds_request *req;
837 int err = -EROFS;
838 int op;
839
840 if (ceph_snap(dir) == CEPH_SNAPDIR) {
841 /* rmdir .snap/foo is RMSNAP */
842 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
843 dentry->d_name.name, dentry);
844 op = CEPH_MDS_OP_RMSNAP;
845 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
846 dout("unlink/rmdir dir %p dn %p inode %p\n",
847 dir, dentry, inode);
dba19c60 848 op = S_ISDIR(dentry->d_inode->i_mode) ?
2817b000
SW
849 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
850 } else
851 goto out;
852 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
853 if (IS_ERR(req)) {
854 err = PTR_ERR(req);
855 goto out;
856 }
857 req->r_dentry = dget(dentry);
858 req->r_num_caps = 2;
859 req->r_locked_dir = dir;
860 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
861 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
862 req->r_inode_drop = drop_caps_for_unlink(inode);
863 err = ceph_mdsc_do_request(mdsc, dir, req);
864 if (!err && !req->r_reply_info.head->is_dentry)
865 d_delete(dentry);
866 ceph_mdsc_put_request(req);
867out:
868 return err;
869}
870
871static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
872 struct inode *new_dir, struct dentry *new_dentry)
873{
3d14c5d2
YS
874 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
875 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
876 struct ceph_mds_request *req;
877 int err;
878
879 if (ceph_snap(old_dir) != ceph_snap(new_dir))
880 return -EXDEV;
881 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
882 ceph_snap(new_dir) != CEPH_NOSNAP)
883 return -EROFS;
884 dout("rename dir %p dentry %p to dir %p dentry %p\n",
885 old_dir, old_dentry, new_dir, new_dentry);
886 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
887 if (IS_ERR(req))
888 return PTR_ERR(req);
889 req->r_dentry = dget(new_dentry);
890 req->r_num_caps = 2;
891 req->r_old_dentry = dget(old_dentry);
41b02e1f 892 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
2817b000
SW
893 req->r_locked_dir = new_dir;
894 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
895 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
896 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
897 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
898 /* release LINK_RDCACHE on source inode (mds will lock it) */
899 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
900 if (new_dentry->d_inode)
901 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
902 err = ceph_mdsc_do_request(mdsc, old_dir, req);
903 if (!err && !req->r_reply_info.head->is_dentry) {
904 /*
905 * Normally d_move() is done by fill_trace (called by
906 * do_request, above). If there is no trace, we need
907 * to do it here.
908 */
ea1409f9
SW
909
910 /* d_move screws up d_subdirs order */
c6ffe100 911 ceph_dir_clear_complete(new_dir);
ea1409f9 912
2817b000 913 d_move(old_dentry, new_dentry);
ea1409f9
SW
914
915 /* ensure target dentry is invalidated, despite
916 rehashing bug in vfs_rename_dir */
81a6cf2d 917 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
918 }
919 ceph_mdsc_put_request(req);
920 return err;
921}
922
81a6cf2d
SW
923/*
924 * Ensure a dentry lease will no longer revalidate.
925 */
926void ceph_invalidate_dentry_lease(struct dentry *dentry)
927{
928 spin_lock(&dentry->d_lock);
929 dentry->d_time = jiffies;
930 ceph_dentry(dentry)->lease_shared_gen = 0;
931 spin_unlock(&dentry->d_lock);
932}
2817b000
SW
933
934/*
935 * Check if dentry lease is valid. If not, delete the lease. Try to
936 * renew if the least is more than half up.
937 */
938static int dentry_lease_is_valid(struct dentry *dentry)
939{
940 struct ceph_dentry_info *di;
941 struct ceph_mds_session *s;
942 int valid = 0;
943 u32 gen;
944 unsigned long ttl;
945 struct ceph_mds_session *session = NULL;
946 struct inode *dir = NULL;
947 u32 seq = 0;
948
949 spin_lock(&dentry->d_lock);
950 di = ceph_dentry(dentry);
3d8eb7a9 951 if (di->lease_session) {
2817b000 952 s = di->lease_session;
d8fb02ab 953 spin_lock(&s->s_gen_ttl_lock);
2817b000
SW
954 gen = s->s_cap_gen;
955 ttl = s->s_cap_ttl;
d8fb02ab 956 spin_unlock(&s->s_gen_ttl_lock);
2817b000
SW
957
958 if (di->lease_gen == gen &&
959 time_before(jiffies, dentry->d_time) &&
960 time_before(jiffies, ttl)) {
961 valid = 1;
962 if (di->lease_renew_after &&
963 time_after(jiffies, di->lease_renew_after)) {
964 /* we should renew */
965 dir = dentry->d_parent->d_inode;
966 session = ceph_get_mds_session(s);
967 seq = di->lease_seq;
968 di->lease_renew_after = 0;
969 di->lease_renew_from = jiffies;
970 }
2817b000
SW
971 }
972 }
973 spin_unlock(&dentry->d_lock);
974
975 if (session) {
976 ceph_mdsc_lease_send_msg(session, dir, dentry,
977 CEPH_MDS_LEASE_RENEW, seq);
978 ceph_put_mds_session(session);
979 }
980 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
981 return valid;
982}
983
984/*
985 * Check if directory-wide content lease/cap is valid.
986 */
987static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
988{
989 struct ceph_inode_info *ci = ceph_inode(dir);
990 struct ceph_dentry_info *di = ceph_dentry(dentry);
991 int valid = 0;
992
be655596 993 spin_lock(&ci->i_ceph_lock);
2817b000
SW
994 if (ci->i_shared_gen == di->lease_shared_gen)
995 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
be655596 996 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
997 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
998 dir, (unsigned)ci->i_shared_gen, dentry,
999 (unsigned)di->lease_shared_gen, valid);
1000 return valid;
1001}
1002
1003/*
1004 * Check if cached dentry can be trusted.
1005 */
0b728e19 1006static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
2817b000 1007{
bf1c6aca 1008 int valid = 0;
34286d66
NP
1009 struct inode *dir;
1010
0b728e19 1011 if (flags & LOOKUP_RCU)
34286d66
NP
1012 return -ECHILD;
1013
1cd3935b
SW
1014 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1015 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1016 ceph_dentry(dentry)->offset);
2817b000 1017
bf1c6aca
SW
1018 dir = ceph_get_dentry_parent_inode(dentry);
1019
2817b000
SW
1020 /* always trust cached snapped dentries, snapdir dentry */
1021 if (ceph_snap(dir) != CEPH_NOSNAP) {
1022 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1023 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
bf1c6aca
SW
1024 valid = 1;
1025 } else if (dentry->d_inode &&
1026 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1027 valid = 1;
1028 } else if (dentry_lease_is_valid(dentry) ||
1029 dir_lease_is_valid(dir, dentry)) {
1030 valid = 1;
2817b000 1031 }
2817b000 1032
bf1c6aca
SW
1033 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1034 if (valid)
1035 ceph_dentry_lru_touch(dentry);
1036 else
1037 d_drop(dentry);
1038 iput(dir);
1039 return valid;
2817b000
SW
1040}
1041
1042/*
147851d2 1043 * Release our ceph_dentry_info.
2817b000 1044 */
147851d2 1045static void ceph_d_release(struct dentry *dentry)
2817b000
SW
1046{
1047 struct ceph_dentry_info *di = ceph_dentry(dentry);
2817b000 1048
147851d2 1049 dout("d_release %p\n", dentry);
3d8eb7a9
SW
1050 ceph_dentry_lru_del(dentry);
1051 if (di->lease_session)
1052 ceph_put_mds_session(di->lease_session);
1053 kmem_cache_free(ceph_dentry_cachep, di);
1054 dentry->d_fsdata = NULL;
2817b000
SW
1055}
1056
1057static int ceph_snapdir_d_revalidate(struct dentry *dentry,
0b728e19 1058 unsigned int flags)
2817b000
SW
1059{
1060 /*
1061 * Eventually, we'll want to revalidate snapped metadata
1062 * too... probably...
1063 */
1064 return 1;
1065}
1066
c6ffe100
SW
1067/*
1068 * Set/clear/test dir complete flag on the dir's dentry.
1069 */
c6ffe100
SW
1070void ceph_dir_set_complete(struct inode *inode)
1071{
a40dc6cc
SW
1072 struct dentry *dentry = d_find_any_alias(inode);
1073
1074 if (dentry && ceph_dentry(dentry) &&
1075 ceph_test_mount_opt(ceph_sb_to_client(dentry->d_sb), DCACHE)) {
1076 dout(" marking %p (%p) complete\n", inode, dentry);
1077 set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
1078 }
1079 dput(dentry);
c6ffe100
SW
1080}
1081
1082void ceph_dir_clear_complete(struct inode *inode)
1083{
a40dc6cc
SW
1084 struct dentry *dentry = d_find_any_alias(inode);
1085
1086 if (dentry && ceph_dentry(dentry)) {
1087 dout(" marking %p (%p) complete\n", inode, dentry);
1088 set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
1089 }
1090 dput(dentry);
c6ffe100
SW
1091}
1092
1093bool ceph_dir_test_complete(struct inode *inode)
1094{
a40dc6cc
SW
1095 struct dentry *dentry = d_find_any_alias(inode);
1096
1097 if (dentry && ceph_dentry(dentry)) {
1098 dout(" marking %p (%p) NOT complete\n", inode, dentry);
1099 clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
1100 }
1101 dput(dentry);
c6ffe100
SW
1102 return false;
1103}
1104
b58dc410
SW
1105/*
1106 * When the VFS prunes a dentry from the cache, we need to clear the
1107 * complete flag on the parent directory.
1108 *
1109 * Called under dentry->d_lock.
1110 */
1111static void ceph_d_prune(struct dentry *dentry)
1112{
1113 struct ceph_dentry_info *di;
1114
774ac21d 1115 dout("ceph_d_prune %p\n", dentry);
b58dc410
SW
1116
1117 /* do we have a valid parent? */
8842b3be 1118 if (IS_ROOT(dentry))
b58dc410
SW
1119 return;
1120
1121 /* if we are not hashed, we don't affect D_COMPLETE */
1122 if (d_unhashed(dentry))
1123 return;
2817b000 1124
b58dc410
SW
1125 /*
1126 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1127 * cleared until d_release
1128 */
1129 di = ceph_dentry(dentry->d_parent);
1130 clear_bit(CEPH_D_COMPLETE, &di->flags);
1131}
2817b000
SW
1132
1133/*
1134 * read() on a dir. This weird interface hack only works if mounted
1135 * with '-o dirstat'.
1136 */
1137static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1138 loff_t *ppos)
1139{
1140 struct ceph_file_info *cf = file->private_data;
496ad9aa 1141 struct inode *inode = file_inode(file);
2817b000
SW
1142 struct ceph_inode_info *ci = ceph_inode(inode);
1143 int left;
ae598083 1144 const int bufsize = 1024;
2817b000 1145
3d14c5d2 1146 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1147 return -EISDIR;
1148
1149 if (!cf->dir_info) {
ae598083 1150 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
2817b000
SW
1151 if (!cf->dir_info)
1152 return -ENOMEM;
1153 cf->dir_info_len =
ae598083 1154 snprintf(cf->dir_info, bufsize,
2817b000
SW
1155 "entries: %20lld\n"
1156 " files: %20lld\n"
1157 " subdirs: %20lld\n"
1158 "rentries: %20lld\n"
1159 " rfiles: %20lld\n"
1160 " rsubdirs: %20lld\n"
1161 "rbytes: %20lld\n"
1162 "rctime: %10ld.%09ld\n",
1163 ci->i_files + ci->i_subdirs,
1164 ci->i_files,
1165 ci->i_subdirs,
1166 ci->i_rfiles + ci->i_rsubdirs,
1167 ci->i_rfiles,
1168 ci->i_rsubdirs,
1169 ci->i_rbytes,
1170 (long)ci->i_rctime.tv_sec,
1171 (long)ci->i_rctime.tv_nsec);
1172 }
1173
1174 if (*ppos >= cf->dir_info_len)
1175 return 0;
1176 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1177 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1178 if (left == size)
1179 return -EFAULT;
1180 *ppos += (size - left);
1181 return size - left;
1182}
1183
1184/*
1185 * an fsync() on a dir will wait for any uncommitted directory
1186 * operations to commit.
1187 */
02c24a82
JB
1188static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1189 int datasync)
2817b000 1190{
496ad9aa 1191 struct inode *inode = file_inode(file);
2817b000
SW
1192 struct ceph_inode_info *ci = ceph_inode(inode);
1193 struct list_head *head = &ci->i_unsafe_dirops;
1194 struct ceph_mds_request *req;
1195 u64 last_tid;
1196 int ret = 0;
1197
1198 dout("dir_fsync %p\n", inode);
02c24a82
JB
1199 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1200 if (ret)
1201 return ret;
1202 mutex_lock(&inode->i_mutex);
1203
2817b000
SW
1204 spin_lock(&ci->i_unsafe_lock);
1205 if (list_empty(head))
1206 goto out;
1207
1208 req = list_entry(head->prev,
1209 struct ceph_mds_request, r_unsafe_dir_item);
1210 last_tid = req->r_tid;
1211
1212 do {
1213 ceph_mdsc_get_request(req);
1214 spin_unlock(&ci->i_unsafe_lock);
2ff179e6 1215
2817b000
SW
1216 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1217 inode, req->r_tid, last_tid);
1218 if (req->r_timeout) {
1219 ret = wait_for_completion_timeout(
1220 &req->r_safe_completion, req->r_timeout);
1221 if (ret > 0)
1222 ret = 0;
1223 else if (ret == 0)
1224 ret = -EIO; /* timed out */
1225 } else {
1226 wait_for_completion(&req->r_safe_completion);
1227 }
2817b000
SW
1228 ceph_mdsc_put_request(req);
1229
2ff179e6 1230 spin_lock(&ci->i_unsafe_lock);
2817b000
SW
1231 if (ret || list_empty(head))
1232 break;
1233 req = list_entry(head->next,
1234 struct ceph_mds_request, r_unsafe_dir_item);
1235 } while (req->r_tid < last_tid);
1236out:
1237 spin_unlock(&ci->i_unsafe_lock);
02c24a82
JB
1238 mutex_unlock(&inode->i_mutex);
1239
2817b000
SW
1240 return ret;
1241}
1242
1243/*
1244 * We maintain a private dentry LRU.
1245 *
1246 * FIXME: this needs to be changed to a per-mds lru to be useful.
1247 */
1248void ceph_dentry_lru_add(struct dentry *dn)
1249{
1250 struct ceph_dentry_info *di = ceph_dentry(dn);
1251 struct ceph_mds_client *mdsc;
2817b000 1252
04a419f9
SW
1253 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1254 dn->d_name.len, dn->d_name.name);
3d8eb7a9
SW
1255 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1256 spin_lock(&mdsc->dentry_lru_lock);
1257 list_add_tail(&di->lru, &mdsc->dentry_lru);
1258 mdsc->num_dentry++;
1259 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1260}
1261
1262void ceph_dentry_lru_touch(struct dentry *dn)
1263{
1264 struct ceph_dentry_info *di = ceph_dentry(dn);
1265 struct ceph_mds_client *mdsc;
2817b000 1266
1cd3935b
SW
1267 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1268 dn->d_name.len, dn->d_name.name, di->offset);
3d8eb7a9
SW
1269 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1270 spin_lock(&mdsc->dentry_lru_lock);
1271 list_move_tail(&di->lru, &mdsc->dentry_lru);
1272 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1273}
1274
1275void ceph_dentry_lru_del(struct dentry *dn)
1276{
1277 struct ceph_dentry_info *di = ceph_dentry(dn);
1278 struct ceph_mds_client *mdsc;
1279
04a419f9
SW
1280 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1281 dn->d_name.len, dn->d_name.name);
3d8eb7a9
SW
1282 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1283 spin_lock(&mdsc->dentry_lru_lock);
1284 list_del_init(&di->lru);
1285 mdsc->num_dentry--;
1286 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1287}
1288
6c0f3af7
SW
1289/*
1290 * Return name hash for a given dentry. This is dependent on
1291 * the parent directory's hash function.
1292 */
e5f86dc3 1293unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
6c0f3af7 1294{
6c0f3af7
SW
1295 struct ceph_inode_info *dci = ceph_inode(dir);
1296
1297 switch (dci->i_dir_layout.dl_dir_hash) {
1298 case 0: /* for backward compat */
1299 case CEPH_STR_HASH_LINUX:
1300 return dn->d_name.hash;
1301
1302 default:
1303 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1304 dn->d_name.name, dn->d_name.len);
1305 }
1306}
1307
2817b000
SW
1308const struct file_operations ceph_dir_fops = {
1309 .read = ceph_read_dir,
1310 .readdir = ceph_readdir,
1311 .llseek = ceph_dir_llseek,
1312 .open = ceph_open,
1313 .release = ceph_release,
1314 .unlocked_ioctl = ceph_ioctl,
1315 .fsync = ceph_dir_fsync,
1316};
1317
1318const struct inode_operations ceph_dir_iops = {
1319 .lookup = ceph_lookup,
1320 .permission = ceph_permission,
1321 .getattr = ceph_getattr,
1322 .setattr = ceph_setattr,
1323 .setxattr = ceph_setxattr,
1324 .getxattr = ceph_getxattr,
1325 .listxattr = ceph_listxattr,
1326 .removexattr = ceph_removexattr,
1327 .mknod = ceph_mknod,
1328 .symlink = ceph_symlink,
1329 .mkdir = ceph_mkdir,
1330 .link = ceph_link,
1331 .unlink = ceph_unlink,
1332 .rmdir = ceph_unlink,
1333 .rename = ceph_rename,
1334 .create = ceph_create,
2d83bde9 1335 .atomic_open = ceph_atomic_open,
2817b000
SW
1336};
1337
52dfb8ac 1338const struct dentry_operations ceph_dentry_ops = {
2817b000 1339 .d_revalidate = ceph_d_revalidate,
147851d2 1340 .d_release = ceph_d_release,
b58dc410 1341 .d_prune = ceph_d_prune,
2817b000
SW
1342};
1343
52dfb8ac 1344const struct dentry_operations ceph_snapdir_dentry_ops = {
2817b000 1345 .d_revalidate = ceph_snapdir_d_revalidate,
147851d2 1346 .d_release = ceph_d_release,
2817b000
SW
1347};
1348
52dfb8ac 1349const struct dentry_operations ceph_snap_dentry_ops = {
147851d2 1350 .d_release = ceph_d_release,
b58dc410 1351 .d_prune = ceph_d_prune,
2817b000 1352};