]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
VFS: security/: d_inode() annotations
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
2817b000
SW
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
3d14c5d2 10#include "mds_client.h"
2817b000
SW
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
52dfb8ac 29const struct dentry_operations ceph_dentry_ops;
2817b000
SW
30
31/*
32 * Initialize ceph dentry state.
33 */
34int ceph_init_dentry(struct dentry *dentry)
35{
36 struct ceph_dentry_info *di;
37
38 if (dentry->d_fsdata)
39 return 0;
40
36e21687 41 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
2817b000
SW
42 if (!di)
43 return -ENOMEM; /* oh well */
44
45 spin_lock(&dentry->d_lock);
8c6efb58
SW
46 if (dentry->d_fsdata) {
47 /* lost a race */
48 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 49 goto out_unlock;
8c6efb58 50 }
48d0cbd1 51
8842b3be 52 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
48d0cbd1
SW
53 d_set_d_op(dentry, &ceph_dentry_ops);
54 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
55 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
56 else
57 d_set_d_op(dentry, &ceph_snap_dentry_ops);
58
2817b000
SW
59 di->dentry = dentry;
60 di->lease_session = NULL;
2817b000 61 dentry->d_time = jiffies;
48d0cbd1
SW
62 /* avoid reordering d_fsdata setup so that the check above is safe */
63 smp_mb();
64 dentry->d_fsdata = di;
2817b000
SW
65 ceph_dentry_lru_add(dentry);
66out_unlock:
67 spin_unlock(&dentry->d_lock);
68 return 0;
69}
70
5f21c96d
SW
71struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
72{
73 struct inode *inode = NULL;
74
75 if (!dentry)
76 return NULL;
77
78 spin_lock(&dentry->d_lock);
8842b3be 79 if (!IS_ROOT(dentry)) {
5f21c96d
SW
80 inode = dentry->d_parent->d_inode;
81 ihold(inode);
82 }
83 spin_unlock(&dentry->d_lock);
84 return inode;
85}
2817b000
SW
86
87
88/*
89 * for readdir, we encode the directory frag and offset within that
90 * frag into f_pos.
91 */
92static unsigned fpos_frag(loff_t p)
93{
94 return p >> 32;
95}
96static unsigned fpos_off(loff_t p)
97{
98 return p & 0xffffffff;
99}
100
4d5f5df6
YZ
101static int fpos_cmp(loff_t l, loff_t r)
102{
103 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
104 if (v)
105 return v;
106 return (int)(fpos_off(l) - fpos_off(r));
107}
108
2817b000
SW
109/*
110 * When possible, we try to satisfy a readdir by peeking at the
111 * dcache. We make this work by carefully ordering dentries on
946e51f2 112 * d_child when we initially get results back from the MDS, and
2817b000
SW
113 * falling back to a "normal" sync readdir if any dentries in the dir
114 * are dropped.
115 *
2f276c51 116 * Complete dir indicates that we have all dentries in the dir. It is
2817b000
SW
117 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
118 * the MDS if/when the directory is modified).
119 */
a30be7cb
YZ
120static int __dcache_readdir(struct file *file, struct dir_context *ctx,
121 u32 shared_gen)
2817b000 122{
77acfa29 123 struct ceph_file_info *fi = file->private_data;
b583043e 124 struct dentry *parent = file->f_path.dentry;
2817b000
SW
125 struct inode *dir = parent->d_inode;
126 struct list_head *p;
127 struct dentry *dentry, *last;
128 struct ceph_dentry_info *di;
129 int err = 0;
130
131 /* claim ref on last dentry we returned */
132 last = fi->dentry;
133 fi->dentry = NULL;
134
a30be7cb
YZ
135 dout("__dcache_readdir %p v%u at %llu (last %p)\n",
136 dir, shared_gen, ctx->pos, last);
2817b000 137
2fd6b7f5 138 spin_lock(&parent->d_lock);
2817b000
SW
139
140 /* start at beginning? */
77acfa29 141 if (ctx->pos == 2 || last == NULL ||
6da5246d 142 fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
2817b000
SW
143 if (list_empty(&parent->d_subdirs))
144 goto out_unlock;
145 p = parent->d_subdirs.prev;
146 dout(" initial p %p/%p\n", p->prev, p->next);
147 } else {
946e51f2 148 p = last->d_child.prev;
2817b000
SW
149 }
150
151more:
946e51f2 152 dentry = list_entry(p, struct dentry, d_child);
2817b000
SW
153 di = ceph_dentry(dentry);
154 while (1) {
1cd3935b
SW
155 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
156 d_unhashed(dentry) ? "!hashed" : "hashed",
2817b000
SW
157 parent->d_subdirs.prev, parent->d_subdirs.next);
158 if (p == &parent->d_subdirs) {
9cfa1098 159 fi->flags |= CEPH_F_ATEND;
2817b000
SW
160 goto out_unlock;
161 }
2fd6b7f5 162 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
a30be7cb
YZ
163 if (di->lease_shared_gen == shared_gen &&
164 !d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 165 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 166 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
4d5f5df6 167 fpos_cmp(ctx->pos, di->offset) <= 0)
2817b000 168 break;
a455589f
AV
169 dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry,
170 dentry, di->offset,
77acfa29 171 ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
2817b000 172 !dentry->d_inode ? " null" : "");
da502956 173 spin_unlock(&dentry->d_lock);
2817b000 174 p = p->prev;
946e51f2 175 dentry = list_entry(p, struct dentry, d_child);
2817b000
SW
176 di = ceph_dentry(dentry);
177 }
178
da502956 179 dget_dlock(dentry);
b7ab39f6 180 spin_unlock(&dentry->d_lock);
2fd6b7f5 181 spin_unlock(&parent->d_lock);
2817b000 182
0081bd83 183 /* make sure a dentry wasn't dropped while we didn't have parent lock */
70db4f36 184 if (!ceph_dir_is_complete_ordered(dir)) {
0081bd83
YZ
185 dout(" lost dir complete on %p; falling back to mds\n", dir);
186 dput(dentry);
187 err = -EAGAIN;
188 goto out;
189 }
190
a455589f
AV
191 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
192 dentry, dentry, dentry->d_inode);
77acfa29
AV
193 if (!dir_emit(ctx, dentry->d_name.name,
194 dentry->d_name.len,
ad1fee96 195 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
77acfa29
AV
196 dentry->d_inode->i_mode >> 12)) {
197 if (last) {
2817b000
SW
198 /* remember our position */
199 fi->dentry = last;
f0494206 200 fi->next_offset = fpos_off(di->offset);
2817b000 201 }
77acfa29
AV
202 dput(dentry);
203 return 0;
2817b000 204 }
f5b06628 205
0081bd83
YZ
206 ctx->pos = di->offset + 1;
207
77acfa29
AV
208 if (last)
209 dput(last);
210 last = dentry;
2817b000 211
2fd6b7f5 212 spin_lock(&parent->d_lock);
efa4c120
SW
213 p = p->prev; /* advance to next dentry */
214 goto more;
2817b000
SW
215
216out_unlock:
2fd6b7f5 217 spin_unlock(&parent->d_lock);
efa4c120
SW
218out:
219 if (last)
2817b000 220 dput(last);
2817b000
SW
221 return err;
222}
223
224/*
225 * make note of the last dentry we read, so we can
226 * continue at the same lexicographical point,
227 * regardless of what dir changes take place on the
228 * server.
229 */
230static int note_last_dentry(struct ceph_file_info *fi, const char *name,
231 int len)
232{
233 kfree(fi->last_name);
234 fi->last_name = kmalloc(len+1, GFP_NOFS);
235 if (!fi->last_name)
236 return -ENOMEM;
237 memcpy(fi->last_name, name, len);
238 fi->last_name[len] = 0;
239 dout("note_last_dentry '%s'\n", fi->last_name);
240 return 0;
241}
242
77acfa29 243static int ceph_readdir(struct file *file, struct dir_context *ctx)
2817b000 244{
77acfa29
AV
245 struct ceph_file_info *fi = file->private_data;
246 struct inode *inode = file_inode(file);
2817b000 247 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
248 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
249 struct ceph_mds_client *mdsc = fsc->mdsc;
77acfa29
AV
250 unsigned frag = fpos_frag(ctx->pos);
251 int off = fpos_off(ctx->pos);
2817b000
SW
252 int err;
253 u32 ftype;
254 struct ceph_mds_reply_info_parsed *rinfo;
2817b000 255
77acfa29 256 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
9cfa1098 257 if (fi->flags & CEPH_F_ATEND)
2817b000
SW
258 return 0;
259
260 /* always start with . and .. */
77acfa29 261 if (ctx->pos == 0) {
2817b000 262 dout("readdir off 0 -> '.'\n");
77acfa29 263 if (!dir_emit(ctx, ".", 1,
ad1fee96 264 ceph_translate_ino(inode->i_sb, inode->i_ino),
77acfa29 265 inode->i_mode >> 12))
2817b000 266 return 0;
77acfa29 267 ctx->pos = 1;
2817b000
SW
268 off = 1;
269 }
77acfa29 270 if (ctx->pos == 1) {
b583043e 271 ino_t ino = parent_ino(file->f_path.dentry);
2817b000 272 dout("readdir off 1 -> '..'\n");
77acfa29 273 if (!dir_emit(ctx, "..", 2,
ad1fee96 274 ceph_translate_ino(inode->i_sb, ino),
77acfa29 275 inode->i_mode >> 12))
2817b000 276 return 0;
77acfa29 277 ctx->pos = 2;
2817b000
SW
278 off = 2;
279 }
280
281 /* can we use the dcache? */
be655596 282 spin_lock(&ci->i_ceph_lock);
77acfa29 283 if ((ctx->pos == 2 || fi->dentry) &&
3d14c5d2 284 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
a0dff78d 285 ceph_snap(inode) != CEPH_SNAPDIR &&
70db4f36 286 __ceph_dir_is_complete_ordered(ci) &&
2817b000 287 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
a30be7cb 288 u32 shared_gen = ci->i_shared_gen;
be655596 289 spin_unlock(&ci->i_ceph_lock);
a30be7cb 290 err = __dcache_readdir(file, ctx, shared_gen);
efa4c120 291 if (err != -EAGAIN)
2817b000 292 return err;
0081bd83
YZ
293 frag = fpos_frag(ctx->pos);
294 off = fpos_off(ctx->pos);
efa4c120 295 } else {
be655596 296 spin_unlock(&ci->i_ceph_lock);
2817b000 297 }
2817b000
SW
298 if (fi->dentry) {
299 err = note_last_dentry(fi, fi->dentry->d_name.name,
300 fi->dentry->d_name.len);
301 if (err)
302 return err;
303 dput(fi->dentry);
304 fi->dentry = NULL;
305 }
306
307 /* proceed with a normal readdir */
308
70db4f36
YZ
309 if (ctx->pos == 2) {
310 /* note dir version at start of readdir so we can tell
311 * if any dentries get dropped */
312 fi->dir_release_count = atomic_read(&ci->i_release_count);
313 fi->dir_ordered_count = ci->i_ordered_count;
314 }
315
2817b000
SW
316more:
317 /* do we have the correct frag content buffered? */
318 if (fi->frag != frag || fi->last_readdir == NULL) {
319 struct ceph_mds_request *req;
320 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
321 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
322
323 /* discard old result, if any */
393f6620 324 if (fi->last_readdir) {
2817b000 325 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
326 fi->last_readdir = NULL;
327 }
2817b000 328
2817b000
SW
329 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
330 ceph_vinop(inode), frag, fi->last_name);
331 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
332 if (IS_ERR(req))
333 return PTR_ERR(req);
54008399
YZ
334 err = ceph_alloc_readdir_reply_buffer(req, inode);
335 if (err) {
336 ceph_mdsc_put_request(req);
337 return err;
338 }
70b666c3
SW
339 req->r_inode = inode;
340 ihold(inode);
b583043e 341 req->r_dentry = dget(file->f_path.dentry);
2817b000
SW
342 /* hints to request -> mds selection code */
343 req->r_direct_mode = USE_AUTH_MDS;
344 req->r_direct_hash = ceph_frag_value(frag);
345 req->r_direct_is_hash = true;
346 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
347 req->r_readdir_offset = fi->next_offset;
348 req->r_args.readdir.frag = cpu_to_le32(frag);
2817b000
SW
349 err = ceph_mdsc_do_request(mdsc, NULL, req);
350 if (err < 0) {
351 ceph_mdsc_put_request(req);
352 return err;
353 }
354 dout("readdir got and parsed readdir result=%d"
355 " on frag %x, end=%d, complete=%d\n", err, frag,
356 (int)req->r_reply_info.dir_end,
357 (int)req->r_reply_info.dir_complete);
358
359 if (!req->r_did_prepopulate) {
360 dout("readdir !did_prepopulate");
2f276c51
YZ
361 /* preclude from marking dir complete */
362 fi->dir_release_count--;
2817b000
SW
363 }
364
365 /* note next offset and last dentry name */
81c6aea5
YZ
366 rinfo = &req->r_reply_info;
367 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
368 frag = le32_to_cpu(rinfo->dir_dir->frag);
369 if (ceph_frag_is_leftmost(frag))
370 fi->next_offset = 2;
371 else
372 fi->next_offset = 0;
373 off = fi->next_offset;
374 }
f0494206 375 fi->frag = frag;
2817b000
SW
376 fi->offset = fi->next_offset;
377 fi->last_readdir = req;
378
379 if (req->r_reply_info.dir_end) {
380 kfree(fi->last_name);
381 fi->last_name = NULL;
7b88dadc
SW
382 if (ceph_frag_is_rightmost(frag))
383 fi->next_offset = 2;
384 else
385 fi->next_offset = 0;
2817b000 386 } else {
2817b000
SW
387 err = note_last_dentry(fi,
388 rinfo->dir_dname[rinfo->dir_nr-1],
389 rinfo->dir_dname_len[rinfo->dir_nr-1]);
390 if (err)
391 return err;
392 fi->next_offset += rinfo->dir_nr;
393 }
394 }
395
396 rinfo = &fi->last_readdir->r_reply_info;
397 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
398 rinfo->dir_nr, off, fi->offset);
77acfa29
AV
399
400 ctx->pos = ceph_make_fpos(frag, off);
da39822c 401 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
2817b000
SW
402 struct ceph_mds_reply_inode *in =
403 rinfo->dir_in[off - fi->offset].in;
3105c19c
SW
404 struct ceph_vino vino;
405 ino_t ino;
406
2817b000 407 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
77acfa29 408 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
2817b000
SW
409 rinfo->dir_dname_len[off - fi->offset],
410 rinfo->dir_dname[off - fi->offset], in);
411 BUG_ON(!in);
412 ftype = le32_to_cpu(in->mode) >> 12;
3105c19c
SW
413 vino.ino = le64_to_cpu(in->ino);
414 vino.snap = le64_to_cpu(in->snapid);
415 ino = ceph_vino_to_ino(vino);
77acfa29 416 if (!dir_emit(ctx,
2817b000
SW
417 rinfo->dir_dname[off - fi->offset],
418 rinfo->dir_dname_len[off - fi->offset],
77acfa29 419 ceph_translate_ino(inode->i_sb, ino), ftype)) {
2817b000
SW
420 dout("filldir stopping us...\n");
421 return 0;
422 }
423 off++;
77acfa29 424 ctx->pos++;
2817b000
SW
425 }
426
427 if (fi->last_name) {
428 ceph_mdsc_put_request(fi->last_readdir);
429 fi->last_readdir = NULL;
430 goto more;
431 }
432
433 /* more frags? */
434 if (!ceph_frag_is_rightmost(frag)) {
435 frag = ceph_frag_next(frag);
436 off = 0;
77acfa29 437 ctx->pos = ceph_make_fpos(frag, off);
2817b000
SW
438 dout("readdir next frag is %x\n", frag);
439 goto more;
440 }
9cfa1098 441 fi->flags |= CEPH_F_ATEND;
2817b000
SW
442
443 /*
444 * if dir_release_count still matches the dir, no dentries
445 * were released during the whole readdir, and we should have
446 * the complete dir contents in our cache.
447 */
be655596 448 spin_lock(&ci->i_ceph_lock);
2f276c51 449 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
70db4f36
YZ
450 if (ci->i_ordered_count == fi->dir_ordered_count)
451 dout(" marking %p complete and ordered\n", inode);
452 else
453 dout(" marking %p complete\n", inode);
454 __ceph_dir_set_complete(ci, fi->dir_release_count,
455 fi->dir_ordered_count);
2817b000 456 }
be655596 457 spin_unlock(&ci->i_ceph_lock);
2817b000 458
77acfa29 459 dout("readdir %p file %p done.\n", inode, file);
2817b000
SW
460 return 0;
461}
462
dcd3cc05 463static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
2817b000
SW
464{
465 if (fi->last_readdir) {
466 ceph_mdsc_put_request(fi->last_readdir);
467 fi->last_readdir = NULL;
468 }
469 kfree(fi->last_name);
a1629c3b 470 fi->last_name = NULL;
dcd3cc05
YZ
471 if (ceph_frag_is_leftmost(frag))
472 fi->next_offset = 2; /* compensate for . and .. */
473 else
474 fi->next_offset = 0;
2817b000
SW
475 if (fi->dentry) {
476 dput(fi->dentry);
477 fi->dentry = NULL;
478 }
9cfa1098 479 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
480}
481
965c8e59 482static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
2817b000
SW
483{
484 struct ceph_file_info *fi = file->private_data;
485 struct inode *inode = file->f_mapping->host;
f0494206 486 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
2817b000
SW
487 loff_t retval;
488
489 mutex_lock(&inode->i_mutex);
06222e49 490 retval = -EINVAL;
965c8e59 491 switch (whence) {
2817b000
SW
492 case SEEK_END:
493 offset += inode->i_size + 2; /* FIXME */
494 break;
495 case SEEK_CUR:
496 offset += file->f_pos;
06222e49
JB
497 case SEEK_SET:
498 break;
499 default:
500 goto out;
2817b000 501 }
06222e49 502
f0494206 503 if (offset >= 0) {
2817b000
SW
504 if (offset != file->f_pos) {
505 file->f_pos = offset;
506 file->f_version = 0;
9cfa1098 507 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
508 }
509 retval = offset;
510
511 /*
512 * discard buffered readdir content on seekdir(0), or
513 * seek to new frag, or seek prior to current chunk.
514 */
515 if (offset == 0 ||
f0494206 516 fpos_frag(offset) != fi->frag ||
2817b000
SW
517 fpos_off(offset) < fi->offset) {
518 dout("dir_llseek dropping %p content\n", file);
dcd3cc05 519 reset_readdir(fi, fpos_frag(offset));
2817b000
SW
520 }
521
522 /* bump dir_release_count if we did a forward seek */
f0494206 523 if (fpos_cmp(offset, old_offset) > 0)
2817b000
SW
524 fi->dir_release_count--;
525 }
06222e49 526out:
2817b000
SW
527 mutex_unlock(&inode->i_mutex);
528 return retval;
529}
530
531/*
468640e3 532 * Handle lookups for the hidden .snap directory.
2817b000 533 */
468640e3
SW
534int ceph_handle_snapdir(struct ceph_mds_request *req,
535 struct dentry *dentry, int err)
2817b000 536{
3d14c5d2 537 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
d79698da 538 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
2817b000
SW
539
540 /* .snap dir? */
541 if (err == -ENOENT &&
455cec0a 542 ceph_snap(parent) == CEPH_NOSNAP &&
6b805185 543 strcmp(dentry->d_name.name,
3d14c5d2 544 fsc->mount_options->snapdir_name) == 0) {
2817b000 545 struct inode *inode = ceph_get_snapdir(parent);
a455589f
AV
546 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
547 dentry, dentry, inode);
9358c6d4 548 BUG_ON(!d_unhashed(dentry));
2817b000
SW
549 d_add(dentry, inode);
550 err = 0;
551 }
468640e3
SW
552 return err;
553}
2817b000 554
468640e3
SW
555/*
556 * Figure out final result of a lookup/open request.
557 *
558 * Mainly, make sure we return the final req->r_dentry (if it already
559 * existed) in place of the original VFS-provided dentry when they
560 * differ.
561 *
562 * Gracefully handle the case where the MDS replies with -ENOENT and
563 * no trace (which it may do, at its discretion, e.g., if it doesn't
564 * care to issue a lease on the negative dentry).
565 */
566struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
567 struct dentry *dentry, int err)
568{
2817b000
SW
569 if (err == -ENOENT) {
570 /* no trace? */
571 err = 0;
572 if (!req->r_reply_info.head->is_dentry) {
573 dout("ENOENT and no trace, dentry %p inode %p\n",
574 dentry, dentry->d_inode);
575 if (dentry->d_inode) {
576 d_drop(dentry);
577 err = -ENOENT;
578 } else {
579 d_add(dentry, NULL);
580 }
581 }
582 }
583 if (err)
584 dentry = ERR_PTR(err);
585 else if (dentry != req->r_dentry)
586 dentry = dget(req->r_dentry); /* we got spliced */
587 else
588 dentry = NULL;
589 return dentry;
590}
591
1d1de916
SW
592static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
593{
594 return ceph_ino(inode) == CEPH_INO_ROOT &&
595 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
596}
597
2817b000
SW
598/*
599 * Look up a single dir entry. If there is a lookup intent, inform
600 * the MDS so that it gets our 'caps wanted' value in a single op.
601 */
602static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
00cd8dd3 603 unsigned int flags)
2817b000 604{
3d14c5d2
YS
605 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
606 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
607 struct ceph_mds_request *req;
608 int op;
609 int err;
610
a455589f
AV
611 dout("lookup %p dentry %p '%pd'\n",
612 dir, dentry, dentry);
2817b000
SW
613
614 if (dentry->d_name.len > NAME_MAX)
615 return ERR_PTR(-ENAMETOOLONG);
616
617 err = ceph_init_dentry(dentry);
618 if (err < 0)
619 return ERR_PTR(err);
620
2817b000
SW
621 /* can we conclude ENOENT locally? */
622 if (dentry->d_inode == NULL) {
623 struct ceph_inode_info *ci = ceph_inode(dir);
624 struct ceph_dentry_info *di = ceph_dentry(dentry);
625
be655596 626 spin_lock(&ci->i_ceph_lock);
2817b000
SW
627 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
628 if (strncmp(dentry->d_name.name,
3d14c5d2 629 fsc->mount_options->snapdir_name,
2817b000 630 dentry->d_name.len) &&
1d1de916 631 !is_root_ceph_dentry(dir, dentry) &&
2f276c51 632 __ceph_dir_is_complete(ci) &&
2817b000 633 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
be655596 634 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
635 dout(" dir %p complete, -ENOENT\n", dir);
636 d_add(dentry, NULL);
637 di->lease_shared_gen = ci->i_shared_gen;
638 return NULL;
639 }
be655596 640 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
641 }
642
643 op = ceph_snap(dir) == CEPH_SNAPDIR ?
644 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
645 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
646 if (IS_ERR(req))
7e34bc52 647 return ERR_CAST(req);
2817b000
SW
648 req->r_dentry = dget(dentry);
649 req->r_num_caps = 2;
650 /* we only need inode linkage */
651 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
652 req->r_locked_dir = dir;
653 err = ceph_mdsc_do_request(mdsc, NULL, req);
468640e3 654 err = ceph_handle_snapdir(req, dentry, err);
2817b000
SW
655 dentry = ceph_finish_lookup(req, dentry, err);
656 ceph_mdsc_put_request(req); /* will dput(dentry) */
657 dout("lookup result=%p\n", dentry);
658 return dentry;
659}
660
661/*
662 * If we do a create but get no trace back from the MDS, follow up with
663 * a lookup (the VFS expects us to link up the provided dentry).
664 */
665int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
666{
00cd8dd3 667 struct dentry *result = ceph_lookup(dir, dentry, 0);
2817b000
SW
668
669 if (result && !IS_ERR(result)) {
670 /*
671 * We created the item, then did a lookup, and found
672 * it was already linked to another inode we already
4d41cef2
YZ
673 * had in our cache (and thus got spliced). To not
674 * confuse VFS (especially when inode is a directory),
675 * we don't link our dentry to that inode, return an
676 * error instead.
677 *
678 * This event should be rare and it happens only when
679 * we talk to old MDS. Recent MDS does not send traceless
680 * reply for request that creates new inode.
2817b000 681 */
5cba372c 682 d_drop(result);
4d41cef2 683 return -ESTALE;
2817b000
SW
684 }
685 return PTR_ERR(result);
686}
687
688static int ceph_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 689 umode_t mode, dev_t rdev)
2817b000 690{
3d14c5d2
YS
691 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
692 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000 693 struct ceph_mds_request *req;
b1ee94aa 694 struct ceph_acls_info acls = {};
2817b000
SW
695 int err;
696
697 if (ceph_snap(dir) != CEPH_NOSNAP)
698 return -EROFS;
699
b1ee94aa
YZ
700 err = ceph_pre_init_acls(dir, &mode, &acls);
701 if (err < 0)
702 return err;
703
1a67aafb 704 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
2817b000
SW
705 dir, dentry, mode, rdev);
706 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
707 if (IS_ERR(req)) {
b1ee94aa
YZ
708 err = PTR_ERR(req);
709 goto out;
2817b000
SW
710 }
711 req->r_dentry = dget(dentry);
712 req->r_num_caps = 2;
713 req->r_locked_dir = dir;
714 req->r_args.mknod.mode = cpu_to_le32(mode);
715 req->r_args.mknod.rdev = cpu_to_le32(rdev);
716 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
717 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
b1ee94aa
YZ
718 if (acls.pagelist) {
719 req->r_pagelist = acls.pagelist;
720 acls.pagelist = NULL;
721 }
2817b000
SW
722 err = ceph_mdsc_do_request(mdsc, dir, req);
723 if (!err && !req->r_reply_info.head->is_dentry)
724 err = ceph_handle_notrace_create(dir, dentry);
725 ceph_mdsc_put_request(req);
b1ee94aa 726out:
7221fe4c 727 if (!err)
b1ee94aa 728 ceph_init_inode_acls(dentry->d_inode, &acls);
b20a95a0 729 else
2817b000 730 d_drop(dentry);
b1ee94aa 731 ceph_release_acls_info(&acls);
2817b000
SW
732 return err;
733}
734
4acdaf27 735static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
ebfc3b49 736 bool excl)
2817b000 737{
2d83bde9 738 return ceph_mknod(dir, dentry, mode, 0);
2817b000
SW
739}
740
741static int ceph_symlink(struct inode *dir, struct dentry *dentry,
742 const char *dest)
743{
3d14c5d2
YS
744 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
745 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
746 struct ceph_mds_request *req;
747 int err;
748
749 if (ceph_snap(dir) != CEPH_NOSNAP)
750 return -EROFS;
751
752 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
753 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
754 if (IS_ERR(req)) {
b1ee94aa
YZ
755 err = PTR_ERR(req);
756 goto out;
2817b000
SW
757 }
758 req->r_dentry = dget(dentry);
759 req->r_num_caps = 2;
760 req->r_path2 = kstrdup(dest, GFP_NOFS);
761 req->r_locked_dir = dir;
762 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
763 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
764 err = ceph_mdsc_do_request(mdsc, dir, req);
765 if (!err && !req->r_reply_info.head->is_dentry)
766 err = ceph_handle_notrace_create(dir, dentry);
767 ceph_mdsc_put_request(req);
b1ee94aa
YZ
768out:
769 if (err)
2817b000
SW
770 d_drop(dentry);
771 return err;
772}
773
18bb1db3 774static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2817b000 775{
3d14c5d2
YS
776 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
777 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000 778 struct ceph_mds_request *req;
b1ee94aa 779 struct ceph_acls_info acls = {};
2817b000
SW
780 int err = -EROFS;
781 int op;
782
783 if (ceph_snap(dir) == CEPH_SNAPDIR) {
784 /* mkdir .snap/foo is a MKSNAP */
785 op = CEPH_MDS_OP_MKSNAP;
a455589f
AV
786 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
787 dentry, dentry);
2817b000 788 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
18bb1db3 789 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
2817b000
SW
790 op = CEPH_MDS_OP_MKDIR;
791 } else {
792 goto out;
793 }
b1ee94aa
YZ
794
795 mode |= S_IFDIR;
796 err = ceph_pre_init_acls(dir, &mode, &acls);
797 if (err < 0)
798 goto out;
799
2817b000
SW
800 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
801 if (IS_ERR(req)) {
802 err = PTR_ERR(req);
803 goto out;
804 }
805
806 req->r_dentry = dget(dentry);
807 req->r_num_caps = 2;
808 req->r_locked_dir = dir;
809 req->r_args.mkdir.mode = cpu_to_le32(mode);
810 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
811 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
b1ee94aa
YZ
812 if (acls.pagelist) {
813 req->r_pagelist = acls.pagelist;
814 acls.pagelist = NULL;
815 }
2817b000 816 err = ceph_mdsc_do_request(mdsc, dir, req);
275dd19e
YZ
817 if (!err &&
818 !req->r_reply_info.head->is_target &&
819 !req->r_reply_info.head->is_dentry)
2817b000
SW
820 err = ceph_handle_notrace_create(dir, dentry);
821 ceph_mdsc_put_request(req);
822out:
b20a95a0 823 if (!err)
b1ee94aa 824 ceph_init_inode_acls(dentry->d_inode, &acls);
b20a95a0 825 else
2817b000 826 d_drop(dentry);
b1ee94aa 827 ceph_release_acls_info(&acls);
2817b000
SW
828 return err;
829}
830
831static int ceph_link(struct dentry *old_dentry, struct inode *dir,
832 struct dentry *dentry)
833{
3d14c5d2
YS
834 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
835 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
836 struct ceph_mds_request *req;
837 int err;
838
839 if (ceph_snap(dir) != CEPH_NOSNAP)
840 return -EROFS;
841
842 dout("link in dir %p old_dentry %p dentry %p\n", dir,
843 old_dentry, dentry);
844 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
845 if (IS_ERR(req)) {
846 d_drop(dentry);
847 return PTR_ERR(req);
848 }
849 req->r_dentry = dget(dentry);
850 req->r_num_caps = 2;
4b58c9b1 851 req->r_old_dentry = dget(old_dentry);
2817b000
SW
852 req->r_locked_dir = dir;
853 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
854 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
ad88f23f
YZ
855 /* release LINK_SHARED on source inode (mds will lock it) */
856 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
2817b000 857 err = ceph_mdsc_do_request(mdsc, dir, req);
70b666c3 858 if (err) {
2817b000 859 d_drop(dentry);
70b666c3
SW
860 } else if (!req->r_reply_info.head->is_dentry) {
861 ihold(old_dentry->d_inode);
862 d_instantiate(dentry, old_dentry->d_inode);
863 }
2817b000
SW
864 ceph_mdsc_put_request(req);
865 return err;
866}
867
868/*
869 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
870 * looks like the link count will hit 0, drop any other caps (other
871 * than PIN) we don't specifically want (due to the file still being
872 * open).
873 */
874static int drop_caps_for_unlink(struct inode *inode)
875{
876 struct ceph_inode_info *ci = ceph_inode(inode);
877 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
878
be655596 879 spin_lock(&ci->i_ceph_lock);
2817b000
SW
880 if (inode->i_nlink == 1) {
881 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
882 ci->i_ceph_flags |= CEPH_I_NODELAY;
883 }
be655596 884 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
885 return drop;
886}
887
888/*
889 * rmdir and unlink are differ only by the metadata op code
890 */
891static int ceph_unlink(struct inode *dir, struct dentry *dentry)
892{
3d14c5d2
YS
893 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
894 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
895 struct inode *inode = dentry->d_inode;
896 struct ceph_mds_request *req;
897 int err = -EROFS;
898 int op;
899
900 if (ceph_snap(dir) == CEPH_SNAPDIR) {
901 /* rmdir .snap/foo is RMSNAP */
a455589f 902 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
2817b000
SW
903 op = CEPH_MDS_OP_RMSNAP;
904 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
905 dout("unlink/rmdir dir %p dn %p inode %p\n",
906 dir, dentry, inode);
e36cb0b8 907 op = d_is_dir(dentry) ?
2817b000
SW
908 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
909 } else
910 goto out;
911 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
912 if (IS_ERR(req)) {
913 err = PTR_ERR(req);
914 goto out;
915 }
916 req->r_dentry = dget(dentry);
917 req->r_num_caps = 2;
918 req->r_locked_dir = dir;
919 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
920 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
921 req->r_inode_drop = drop_caps_for_unlink(inode);
922 err = ceph_mdsc_do_request(mdsc, dir, req);
923 if (!err && !req->r_reply_info.head->is_dentry)
924 d_delete(dentry);
925 ceph_mdsc_put_request(req);
926out:
927 return err;
928}
929
930static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
931 struct inode *new_dir, struct dentry *new_dentry)
932{
3d14c5d2
YS
933 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
934 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
935 struct ceph_mds_request *req;
936 int err;
937
938 if (ceph_snap(old_dir) != ceph_snap(new_dir))
939 return -EXDEV;
940 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
941 ceph_snap(new_dir) != CEPH_NOSNAP)
942 return -EROFS;
943 dout("rename dir %p dentry %p to dir %p dentry %p\n",
944 old_dir, old_dentry, new_dir, new_dentry);
945 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
946 if (IS_ERR(req))
947 return PTR_ERR(req);
180061a5 948 ihold(old_dir);
2817b000
SW
949 req->r_dentry = dget(new_dentry);
950 req->r_num_caps = 2;
951 req->r_old_dentry = dget(old_dentry);
180061a5 952 req->r_old_dentry_dir = old_dir;
2817b000
SW
953 req->r_locked_dir = new_dir;
954 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
955 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
956 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
957 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
958 /* release LINK_RDCACHE on source inode (mds will lock it) */
959 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
960 if (new_dentry->d_inode)
961 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
962 err = ceph_mdsc_do_request(mdsc, old_dir, req);
963 if (!err && !req->r_reply_info.head->is_dentry) {
964 /*
965 * Normally d_move() is done by fill_trace (called by
966 * do_request, above). If there is no trace, we need
967 * to do it here.
968 */
ea1409f9 969
2817b000 970 d_move(old_dentry, new_dentry);
ea1409f9
SW
971
972 /* ensure target dentry is invalidated, despite
973 rehashing bug in vfs_rename_dir */
81a6cf2d 974 ceph_invalidate_dentry_lease(new_dentry);
0a8a70f9
YZ
975
976 /* d_move screws up sibling dentries' offsets */
977 ceph_dir_clear_complete(old_dir);
978 ceph_dir_clear_complete(new_dir);
979
2817b000
SW
980 }
981 ceph_mdsc_put_request(req);
982 return err;
983}
984
81a6cf2d
SW
985/*
986 * Ensure a dentry lease will no longer revalidate.
987 */
988void ceph_invalidate_dentry_lease(struct dentry *dentry)
989{
990 spin_lock(&dentry->d_lock);
991 dentry->d_time = jiffies;
992 ceph_dentry(dentry)->lease_shared_gen = 0;
993 spin_unlock(&dentry->d_lock);
994}
2817b000
SW
995
996/*
997 * Check if dentry lease is valid. If not, delete the lease. Try to
998 * renew if the least is more than half up.
999 */
1000static int dentry_lease_is_valid(struct dentry *dentry)
1001{
1002 struct ceph_dentry_info *di;
1003 struct ceph_mds_session *s;
1004 int valid = 0;
1005 u32 gen;
1006 unsigned long ttl;
1007 struct ceph_mds_session *session = NULL;
1008 struct inode *dir = NULL;
1009 u32 seq = 0;
1010
1011 spin_lock(&dentry->d_lock);
1012 di = ceph_dentry(dentry);
3d8eb7a9 1013 if (di->lease_session) {
2817b000 1014 s = di->lease_session;
d8fb02ab 1015 spin_lock(&s->s_gen_ttl_lock);
2817b000
SW
1016 gen = s->s_cap_gen;
1017 ttl = s->s_cap_ttl;
d8fb02ab 1018 spin_unlock(&s->s_gen_ttl_lock);
2817b000
SW
1019
1020 if (di->lease_gen == gen &&
1021 time_before(jiffies, dentry->d_time) &&
1022 time_before(jiffies, ttl)) {
1023 valid = 1;
1024 if (di->lease_renew_after &&
1025 time_after(jiffies, di->lease_renew_after)) {
1026 /* we should renew */
1027 dir = dentry->d_parent->d_inode;
1028 session = ceph_get_mds_session(s);
1029 seq = di->lease_seq;
1030 di->lease_renew_after = 0;
1031 di->lease_renew_from = jiffies;
1032 }
2817b000
SW
1033 }
1034 }
1035 spin_unlock(&dentry->d_lock);
1036
1037 if (session) {
1038 ceph_mdsc_lease_send_msg(session, dir, dentry,
1039 CEPH_MDS_LEASE_RENEW, seq);
1040 ceph_put_mds_session(session);
1041 }
1042 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1043 return valid;
1044}
1045
1046/*
1047 * Check if directory-wide content lease/cap is valid.
1048 */
1049static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1050{
1051 struct ceph_inode_info *ci = ceph_inode(dir);
1052 struct ceph_dentry_info *di = ceph_dentry(dentry);
1053 int valid = 0;
1054
be655596 1055 spin_lock(&ci->i_ceph_lock);
2817b000
SW
1056 if (ci->i_shared_gen == di->lease_shared_gen)
1057 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
be655596 1058 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
1059 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1060 dir, (unsigned)ci->i_shared_gen, dentry,
1061 (unsigned)di->lease_shared_gen, valid);
1062 return valid;
1063}
1064
1065/*
1066 * Check if cached dentry can be trusted.
1067 */
0b728e19 1068static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
2817b000 1069{
bf1c6aca 1070 int valid = 0;
34286d66
NP
1071 struct inode *dir;
1072
0b728e19 1073 if (flags & LOOKUP_RCU)
34286d66
NP
1074 return -ECHILD;
1075
a455589f
AV
1076 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1077 dentry, dentry->d_inode, ceph_dentry(dentry)->offset);
2817b000 1078
bf1c6aca
SW
1079 dir = ceph_get_dentry_parent_inode(dentry);
1080
2817b000
SW
1081 /* always trust cached snapped dentries, snapdir dentry */
1082 if (ceph_snap(dir) != CEPH_NOSNAP) {
a455589f
AV
1083 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1084 dentry, dentry->d_inode);
bf1c6aca
SW
1085 valid = 1;
1086 } else if (dentry->d_inode &&
1087 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1088 valid = 1;
1089 } else if (dentry_lease_is_valid(dentry) ||
1090 dir_lease_is_valid(dir, dentry)) {
9215aeea
YZ
1091 if (dentry->d_inode)
1092 valid = ceph_is_any_caps(dentry->d_inode);
1093 else
1094 valid = 1;
2817b000 1095 }
2817b000 1096
bf1c6aca 1097 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
9215aeea 1098 if (valid) {
bf1c6aca 1099 ceph_dentry_lru_touch(dentry);
9215aeea
YZ
1100 } else {
1101 ceph_dir_clear_complete(dir);
9215aeea 1102 }
bf1c6aca
SW
1103 iput(dir);
1104 return valid;
2817b000
SW
1105}
1106
1107/*
147851d2 1108 * Release our ceph_dentry_info.
2817b000 1109 */
147851d2 1110static void ceph_d_release(struct dentry *dentry)
2817b000
SW
1111{
1112 struct ceph_dentry_info *di = ceph_dentry(dentry);
2817b000 1113
147851d2 1114 dout("d_release %p\n", dentry);
3d8eb7a9
SW
1115 ceph_dentry_lru_del(dentry);
1116 if (di->lease_session)
1117 ceph_put_mds_session(di->lease_session);
1118 kmem_cache_free(ceph_dentry_cachep, di);
1119 dentry->d_fsdata = NULL;
2817b000
SW
1120}
1121
1122static int ceph_snapdir_d_revalidate(struct dentry *dentry,
0b728e19 1123 unsigned int flags)
2817b000
SW
1124{
1125 /*
1126 * Eventually, we'll want to revalidate snapped metadata
1127 * too... probably...
1128 */
1129 return 1;
1130}
1131
b58dc410
SW
1132/*
1133 * When the VFS prunes a dentry from the cache, we need to clear the
1134 * complete flag on the parent directory.
1135 *
1136 * Called under dentry->d_lock.
1137 */
1138static void ceph_d_prune(struct dentry *dentry)
1139{
774ac21d 1140 dout("ceph_d_prune %p\n", dentry);
b58dc410
SW
1141
1142 /* do we have a valid parent? */
8842b3be 1143 if (IS_ROOT(dentry))
b58dc410
SW
1144 return;
1145
2f276c51 1146 /* if we are not hashed, we don't affect dir's completeness */
b58dc410
SW
1147 if (d_unhashed(dentry))
1148 return;
2817b000 1149
b58dc410
SW
1150 /*
1151 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1152 * cleared until d_release
1153 */
2f276c51 1154 ceph_dir_clear_complete(dentry->d_parent->d_inode);
b58dc410 1155}
2817b000
SW
1156
1157/*
1158 * read() on a dir. This weird interface hack only works if mounted
1159 * with '-o dirstat'.
1160 */
1161static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1162 loff_t *ppos)
1163{
1164 struct ceph_file_info *cf = file->private_data;
496ad9aa 1165 struct inode *inode = file_inode(file);
2817b000
SW
1166 struct ceph_inode_info *ci = ceph_inode(inode);
1167 int left;
ae598083 1168 const int bufsize = 1024;
2817b000 1169
3d14c5d2 1170 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1171 return -EISDIR;
1172
1173 if (!cf->dir_info) {
ae598083 1174 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
2817b000
SW
1175 if (!cf->dir_info)
1176 return -ENOMEM;
1177 cf->dir_info_len =
ae598083 1178 snprintf(cf->dir_info, bufsize,
2817b000
SW
1179 "entries: %20lld\n"
1180 " files: %20lld\n"
1181 " subdirs: %20lld\n"
1182 "rentries: %20lld\n"
1183 " rfiles: %20lld\n"
1184 " rsubdirs: %20lld\n"
1185 "rbytes: %20lld\n"
1186 "rctime: %10ld.%09ld\n",
1187 ci->i_files + ci->i_subdirs,
1188 ci->i_files,
1189 ci->i_subdirs,
1190 ci->i_rfiles + ci->i_rsubdirs,
1191 ci->i_rfiles,
1192 ci->i_rsubdirs,
1193 ci->i_rbytes,
1194 (long)ci->i_rctime.tv_sec,
1195 (long)ci->i_rctime.tv_nsec);
1196 }
1197
1198 if (*ppos >= cf->dir_info_len)
1199 return 0;
1200 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1201 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1202 if (left == size)
1203 return -EFAULT;
1204 *ppos += (size - left);
1205 return size - left;
1206}
1207
1208/*
1209 * an fsync() on a dir will wait for any uncommitted directory
1210 * operations to commit.
1211 */
02c24a82
JB
1212static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1213 int datasync)
2817b000 1214{
496ad9aa 1215 struct inode *inode = file_inode(file);
2817b000
SW
1216 struct ceph_inode_info *ci = ceph_inode(inode);
1217 struct list_head *head = &ci->i_unsafe_dirops;
1218 struct ceph_mds_request *req;
1219 u64 last_tid;
1220 int ret = 0;
1221
1222 dout("dir_fsync %p\n", inode);
02c24a82
JB
1223 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1224 if (ret)
1225 return ret;
1226 mutex_lock(&inode->i_mutex);
1227
2817b000
SW
1228 spin_lock(&ci->i_unsafe_lock);
1229 if (list_empty(head))
1230 goto out;
1231
1232 req = list_entry(head->prev,
1233 struct ceph_mds_request, r_unsafe_dir_item);
1234 last_tid = req->r_tid;
1235
1236 do {
1237 ceph_mdsc_get_request(req);
1238 spin_unlock(&ci->i_unsafe_lock);
2ff179e6 1239
2817b000
SW
1240 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1241 inode, req->r_tid, last_tid);
1242 if (req->r_timeout) {
1243 ret = wait_for_completion_timeout(
1244 &req->r_safe_completion, req->r_timeout);
1245 if (ret > 0)
1246 ret = 0;
1247 else if (ret == 0)
1248 ret = -EIO; /* timed out */
1249 } else {
1250 wait_for_completion(&req->r_safe_completion);
1251 }
2817b000
SW
1252 ceph_mdsc_put_request(req);
1253
2ff179e6 1254 spin_lock(&ci->i_unsafe_lock);
2817b000
SW
1255 if (ret || list_empty(head))
1256 break;
1257 req = list_entry(head->next,
1258 struct ceph_mds_request, r_unsafe_dir_item);
1259 } while (req->r_tid < last_tid);
1260out:
1261 spin_unlock(&ci->i_unsafe_lock);
02c24a82
JB
1262 mutex_unlock(&inode->i_mutex);
1263
2817b000
SW
1264 return ret;
1265}
1266
1267/*
1268 * We maintain a private dentry LRU.
1269 *
1270 * FIXME: this needs to be changed to a per-mds lru to be useful.
1271 */
1272void ceph_dentry_lru_add(struct dentry *dn)
1273{
1274 struct ceph_dentry_info *di = ceph_dentry(dn);
1275 struct ceph_mds_client *mdsc;
2817b000 1276
a455589f 1277 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
3d8eb7a9
SW
1278 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1279 spin_lock(&mdsc->dentry_lru_lock);
1280 list_add_tail(&di->lru, &mdsc->dentry_lru);
1281 mdsc->num_dentry++;
1282 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1283}
1284
1285void ceph_dentry_lru_touch(struct dentry *dn)
1286{
1287 struct ceph_dentry_info *di = ceph_dentry(dn);
1288 struct ceph_mds_client *mdsc;
2817b000 1289
a455589f
AV
1290 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1291 di->offset);
3d8eb7a9
SW
1292 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1293 spin_lock(&mdsc->dentry_lru_lock);
1294 list_move_tail(&di->lru, &mdsc->dentry_lru);
1295 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1296}
1297
1298void ceph_dentry_lru_del(struct dentry *dn)
1299{
1300 struct ceph_dentry_info *di = ceph_dentry(dn);
1301 struct ceph_mds_client *mdsc;
1302
a455589f 1303 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
3d8eb7a9
SW
1304 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1305 spin_lock(&mdsc->dentry_lru_lock);
1306 list_del_init(&di->lru);
1307 mdsc->num_dentry--;
1308 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1309}
1310
6c0f3af7
SW
1311/*
1312 * Return name hash for a given dentry. This is dependent on
1313 * the parent directory's hash function.
1314 */
e5f86dc3 1315unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
6c0f3af7 1316{
6c0f3af7
SW
1317 struct ceph_inode_info *dci = ceph_inode(dir);
1318
1319 switch (dci->i_dir_layout.dl_dir_hash) {
1320 case 0: /* for backward compat */
1321 case CEPH_STR_HASH_LINUX:
1322 return dn->d_name.hash;
1323
1324 default:
1325 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1326 dn->d_name.name, dn->d_name.len);
1327 }
1328}
1329
2817b000
SW
1330const struct file_operations ceph_dir_fops = {
1331 .read = ceph_read_dir,
77acfa29 1332 .iterate = ceph_readdir,
2817b000
SW
1333 .llseek = ceph_dir_llseek,
1334 .open = ceph_open,
1335 .release = ceph_release,
1336 .unlocked_ioctl = ceph_ioctl,
1337 .fsync = ceph_dir_fsync,
1338};
1339
38c48b5f
YZ
1340const struct file_operations ceph_snapdir_fops = {
1341 .iterate = ceph_readdir,
1342 .llseek = ceph_dir_llseek,
1343 .open = ceph_open,
1344 .release = ceph_release,
1345};
1346
2817b000
SW
1347const struct inode_operations ceph_dir_iops = {
1348 .lookup = ceph_lookup,
1349 .permission = ceph_permission,
1350 .getattr = ceph_getattr,
1351 .setattr = ceph_setattr,
1352 .setxattr = ceph_setxattr,
1353 .getxattr = ceph_getxattr,
1354 .listxattr = ceph_listxattr,
1355 .removexattr = ceph_removexattr,
7221fe4c 1356 .get_acl = ceph_get_acl,
72466d0b 1357 .set_acl = ceph_set_acl,
2817b000
SW
1358 .mknod = ceph_mknod,
1359 .symlink = ceph_symlink,
1360 .mkdir = ceph_mkdir,
1361 .link = ceph_link,
1362 .unlink = ceph_unlink,
1363 .rmdir = ceph_unlink,
1364 .rename = ceph_rename,
1365 .create = ceph_create,
2d83bde9 1366 .atomic_open = ceph_atomic_open,
2817b000
SW
1367};
1368
38c48b5f
YZ
1369const struct inode_operations ceph_snapdir_iops = {
1370 .lookup = ceph_lookup,
1371 .permission = ceph_permission,
1372 .getattr = ceph_getattr,
1373 .mkdir = ceph_mkdir,
1374 .rmdir = ceph_unlink,
1375};
1376
52dfb8ac 1377const struct dentry_operations ceph_dentry_ops = {
2817b000 1378 .d_revalidate = ceph_d_revalidate,
147851d2 1379 .d_release = ceph_d_release,
b58dc410 1380 .d_prune = ceph_d_prune,
2817b000
SW
1381};
1382
52dfb8ac 1383const struct dentry_operations ceph_snapdir_dentry_ops = {
2817b000 1384 .d_revalidate = ceph_snapdir_d_revalidate,
147851d2 1385 .d_release = ceph_d_release,
2817b000
SW
1386};
1387
52dfb8ac 1388const struct dentry_operations ceph_snap_dentry_ops = {
147851d2 1389 .d_release = ceph_d_release,
b58dc410 1390 .d_prune = ceph_d_prune,
2817b000 1391};