]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
ceph: skip invalid dentry during dcache readdir
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
2817b000
SW
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
3d14c5d2 10#include "mds_client.h"
2817b000
SW
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29const struct inode_operations ceph_dir_iops;
30const struct file_operations ceph_dir_fops;
52dfb8ac 31const struct dentry_operations ceph_dentry_ops;
2817b000
SW
32
33/*
34 * Initialize ceph dentry state.
35 */
36int ceph_init_dentry(struct dentry *dentry)
37{
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
36e21687 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
2817b000
SW
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
8c6efb58
SW
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 51 goto out_unlock;
8c6efb58 52 }
48d0cbd1 53
8842b3be 54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
48d0cbd1
SW
55 d_set_d_op(dentry, &ceph_dentry_ops);
56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
58 else
59 d_set_d_op(dentry, &ceph_snap_dentry_ops);
60
2817b000
SW
61 di->dentry = dentry;
62 di->lease_session = NULL;
2817b000 63 dentry->d_time = jiffies;
48d0cbd1
SW
64 /* avoid reordering d_fsdata setup so that the check above is safe */
65 smp_mb();
66 dentry->d_fsdata = di;
2817b000
SW
67 ceph_dentry_lru_add(dentry);
68out_unlock:
69 spin_unlock(&dentry->d_lock);
70 return 0;
71}
72
5f21c96d
SW
73struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
74{
75 struct inode *inode = NULL;
76
77 if (!dentry)
78 return NULL;
79
80 spin_lock(&dentry->d_lock);
8842b3be 81 if (!IS_ROOT(dentry)) {
5f21c96d
SW
82 inode = dentry->d_parent->d_inode;
83 ihold(inode);
84 }
85 spin_unlock(&dentry->d_lock);
86 return inode;
87}
2817b000
SW
88
89
90/*
91 * for readdir, we encode the directory frag and offset within that
92 * frag into f_pos.
93 */
94static unsigned fpos_frag(loff_t p)
95{
96 return p >> 32;
97}
98static unsigned fpos_off(loff_t p)
99{
100 return p & 0xffffffff;
101}
102
4d5f5df6
YZ
103static int fpos_cmp(loff_t l, loff_t r)
104{
105 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
106 if (v)
107 return v;
108 return (int)(fpos_off(l) - fpos_off(r));
109}
110
2817b000
SW
111/*
112 * When possible, we try to satisfy a readdir by peeking at the
113 * dcache. We make this work by carefully ordering dentries on
114 * d_u.d_child when we initially get results back from the MDS, and
115 * falling back to a "normal" sync readdir if any dentries in the dir
116 * are dropped.
117 *
2f276c51 118 * Complete dir indicates that we have all dentries in the dir. It is
2817b000
SW
119 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
120 * the MDS if/when the directory is modified).
121 */
a30be7cb
YZ
122static int __dcache_readdir(struct file *file, struct dir_context *ctx,
123 u32 shared_gen)
2817b000 124{
77acfa29
AV
125 struct ceph_file_info *fi = file->private_data;
126 struct dentry *parent = file->f_dentry;
2817b000
SW
127 struct inode *dir = parent->d_inode;
128 struct list_head *p;
129 struct dentry *dentry, *last;
130 struct ceph_dentry_info *di;
131 int err = 0;
132
133 /* claim ref on last dentry we returned */
134 last = fi->dentry;
135 fi->dentry = NULL;
136
a30be7cb
YZ
137 dout("__dcache_readdir %p v%u at %llu (last %p)\n",
138 dir, shared_gen, ctx->pos, last);
2817b000 139
2fd6b7f5 140 spin_lock(&parent->d_lock);
2817b000
SW
141
142 /* start at beginning? */
77acfa29
AV
143 if (ctx->pos == 2 || last == NULL ||
144 ctx->pos < ceph_dentry(last)->offset) {
2817b000
SW
145 if (list_empty(&parent->d_subdirs))
146 goto out_unlock;
147 p = parent->d_subdirs.prev;
148 dout(" initial p %p/%p\n", p->prev, p->next);
149 } else {
150 p = last->d_u.d_child.prev;
151 }
152
153more:
154 dentry = list_entry(p, struct dentry, d_u.d_child);
155 di = ceph_dentry(dentry);
156 while (1) {
1cd3935b
SW
157 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
158 d_unhashed(dentry) ? "!hashed" : "hashed",
2817b000
SW
159 parent->d_subdirs.prev, parent->d_subdirs.next);
160 if (p == &parent->d_subdirs) {
9cfa1098 161 fi->flags |= CEPH_F_ATEND;
2817b000
SW
162 goto out_unlock;
163 }
2fd6b7f5 164 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
a30be7cb
YZ
165 if (di->lease_shared_gen == shared_gen &&
166 !d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 167 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 168 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
4d5f5df6 169 fpos_cmp(ctx->pos, di->offset) <= 0)
2817b000
SW
170 break;
171 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
172 dentry->d_name.len, dentry->d_name.name, di->offset,
77acfa29 173 ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
2817b000 174 !dentry->d_inode ? " null" : "");
da502956 175 spin_unlock(&dentry->d_lock);
2817b000
SW
176 p = p->prev;
177 dentry = list_entry(p, struct dentry, d_u.d_child);
178 di = ceph_dentry(dentry);
179 }
180
da502956 181 dget_dlock(dentry);
b7ab39f6 182 spin_unlock(&dentry->d_lock);
2fd6b7f5 183 spin_unlock(&parent->d_lock);
2817b000 184
77acfa29 185 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
2817b000 186 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
77acfa29
AV
187 ctx->pos = di->offset;
188 if (!dir_emit(ctx, dentry->d_name.name,
189 dentry->d_name.len,
ad1fee96 190 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
77acfa29
AV
191 dentry->d_inode->i_mode >> 12)) {
192 if (last) {
2817b000
SW
193 /* remember our position */
194 fi->dentry = last;
f0494206 195 fi->next_offset = fpos_off(di->offset);
2817b000 196 }
77acfa29
AV
197 dput(dentry);
198 return 0;
2817b000 199 }
f5b06628 200
77acfa29
AV
201 if (last)
202 dput(last);
203 last = dentry;
2817b000 204
77acfa29 205 ctx->pos++;
2817b000 206
b5c84bf6 207 /* make sure a dentry wasn't dropped while we didn't have parent lock */
2f276c51
YZ
208 if (!ceph_dir_is_complete(dir)) {
209 dout(" lost dir complete on %p; falling back to mds\n", dir);
efa4c120
SW
210 err = -EAGAIN;
211 goto out;
212 }
213
2fd6b7f5 214 spin_lock(&parent->d_lock);
efa4c120
SW
215 p = p->prev; /* advance to next dentry */
216 goto more;
2817b000
SW
217
218out_unlock:
2fd6b7f5 219 spin_unlock(&parent->d_lock);
efa4c120
SW
220out:
221 if (last)
2817b000 222 dput(last);
2817b000
SW
223 return err;
224}
225
226/*
227 * make note of the last dentry we read, so we can
228 * continue at the same lexicographical point,
229 * regardless of what dir changes take place on the
230 * server.
231 */
232static int note_last_dentry(struct ceph_file_info *fi, const char *name,
233 int len)
234{
235 kfree(fi->last_name);
236 fi->last_name = kmalloc(len+1, GFP_NOFS);
237 if (!fi->last_name)
238 return -ENOMEM;
239 memcpy(fi->last_name, name, len);
240 fi->last_name[len] = 0;
241 dout("note_last_dentry '%s'\n", fi->last_name);
242 return 0;
243}
244
77acfa29 245static int ceph_readdir(struct file *file, struct dir_context *ctx)
2817b000 246{
77acfa29
AV
247 struct ceph_file_info *fi = file->private_data;
248 struct inode *inode = file_inode(file);
2817b000 249 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
250 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
251 struct ceph_mds_client *mdsc = fsc->mdsc;
77acfa29
AV
252 unsigned frag = fpos_frag(ctx->pos);
253 int off = fpos_off(ctx->pos);
2817b000
SW
254 int err;
255 u32 ftype;
256 struct ceph_mds_reply_info_parsed *rinfo;
2817b000 257
77acfa29 258 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
9cfa1098 259 if (fi->flags & CEPH_F_ATEND)
2817b000
SW
260 return 0;
261
262 /* always start with . and .. */
77acfa29 263 if (ctx->pos == 0) {
2817b000
SW
264 /* note dir version at start of readdir so we can tell
265 * if any dentries get dropped */
2f276c51 266 fi->dir_release_count = atomic_read(&ci->i_release_count);
2817b000
SW
267
268 dout("readdir off 0 -> '.'\n");
77acfa29 269 if (!dir_emit(ctx, ".", 1,
ad1fee96 270 ceph_translate_ino(inode->i_sb, inode->i_ino),
77acfa29 271 inode->i_mode >> 12))
2817b000 272 return 0;
77acfa29 273 ctx->pos = 1;
2817b000
SW
274 off = 1;
275 }
77acfa29
AV
276 if (ctx->pos == 1) {
277 ino_t ino = parent_ino(file->f_dentry);
2817b000 278 dout("readdir off 1 -> '..'\n");
77acfa29 279 if (!dir_emit(ctx, "..", 2,
ad1fee96 280 ceph_translate_ino(inode->i_sb, ino),
77acfa29 281 inode->i_mode >> 12))
2817b000 282 return 0;
77acfa29 283 ctx->pos = 2;
2817b000
SW
284 off = 2;
285 }
286
287 /* can we use the dcache? */
be655596 288 spin_lock(&ci->i_ceph_lock);
77acfa29 289 if ((ctx->pos == 2 || fi->dentry) &&
3d14c5d2 290 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
a0dff78d 291 ceph_snap(inode) != CEPH_SNAPDIR &&
2f276c51 292 __ceph_dir_is_complete(ci) &&
2817b000 293 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
a30be7cb 294 u32 shared_gen = ci->i_shared_gen;
be655596 295 spin_unlock(&ci->i_ceph_lock);
a30be7cb 296 err = __dcache_readdir(file, ctx, shared_gen);
efa4c120 297 if (err != -EAGAIN)
2817b000 298 return err;
efa4c120 299 } else {
be655596 300 spin_unlock(&ci->i_ceph_lock);
2817b000 301 }
2817b000
SW
302 if (fi->dentry) {
303 err = note_last_dentry(fi, fi->dentry->d_name.name,
304 fi->dentry->d_name.len);
305 if (err)
306 return err;
307 dput(fi->dentry);
308 fi->dentry = NULL;
309 }
310
311 /* proceed with a normal readdir */
312
313more:
314 /* do we have the correct frag content buffered? */
315 if (fi->frag != frag || fi->last_readdir == NULL) {
316 struct ceph_mds_request *req;
317 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
318 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
319
320 /* discard old result, if any */
393f6620 321 if (fi->last_readdir) {
2817b000 322 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
323 fi->last_readdir = NULL;
324 }
2817b000 325
2817b000
SW
326 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
327 ceph_vinop(inode), frag, fi->last_name);
328 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
329 if (IS_ERR(req))
330 return PTR_ERR(req);
54008399
YZ
331 err = ceph_alloc_readdir_reply_buffer(req, inode);
332 if (err) {
333 ceph_mdsc_put_request(req);
334 return err;
335 }
70b666c3
SW
336 req->r_inode = inode;
337 ihold(inode);
77acfa29 338 req->r_dentry = dget(file->f_dentry);
2817b000
SW
339 /* hints to request -> mds selection code */
340 req->r_direct_mode = USE_AUTH_MDS;
341 req->r_direct_hash = ceph_frag_value(frag);
342 req->r_direct_is_hash = true;
343 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
344 req->r_readdir_offset = fi->next_offset;
345 req->r_args.readdir.frag = cpu_to_le32(frag);
2817b000
SW
346 err = ceph_mdsc_do_request(mdsc, NULL, req);
347 if (err < 0) {
348 ceph_mdsc_put_request(req);
349 return err;
350 }
351 dout("readdir got and parsed readdir result=%d"
352 " on frag %x, end=%d, complete=%d\n", err, frag,
353 (int)req->r_reply_info.dir_end,
354 (int)req->r_reply_info.dir_complete);
355
356 if (!req->r_did_prepopulate) {
357 dout("readdir !did_prepopulate");
2f276c51
YZ
358 /* preclude from marking dir complete */
359 fi->dir_release_count--;
2817b000
SW
360 }
361
362 /* note next offset and last dentry name */
81c6aea5
YZ
363 rinfo = &req->r_reply_info;
364 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
365 frag = le32_to_cpu(rinfo->dir_dir->frag);
366 if (ceph_frag_is_leftmost(frag))
367 fi->next_offset = 2;
368 else
369 fi->next_offset = 0;
370 off = fi->next_offset;
371 }
f0494206 372 fi->frag = frag;
2817b000
SW
373 fi->offset = fi->next_offset;
374 fi->last_readdir = req;
375
376 if (req->r_reply_info.dir_end) {
377 kfree(fi->last_name);
378 fi->last_name = NULL;
7b88dadc
SW
379 if (ceph_frag_is_rightmost(frag))
380 fi->next_offset = 2;
381 else
382 fi->next_offset = 0;
2817b000 383 } else {
2817b000
SW
384 err = note_last_dentry(fi,
385 rinfo->dir_dname[rinfo->dir_nr-1],
386 rinfo->dir_dname_len[rinfo->dir_nr-1]);
387 if (err)
388 return err;
389 fi->next_offset += rinfo->dir_nr;
390 }
391 }
392
393 rinfo = &fi->last_readdir->r_reply_info;
394 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
395 rinfo->dir_nr, off, fi->offset);
77acfa29
AV
396
397 ctx->pos = ceph_make_fpos(frag, off);
da39822c 398 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
2817b000
SW
399 struct ceph_mds_reply_inode *in =
400 rinfo->dir_in[off - fi->offset].in;
3105c19c
SW
401 struct ceph_vino vino;
402 ino_t ino;
403
2817b000 404 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
77acfa29 405 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
2817b000
SW
406 rinfo->dir_dname_len[off - fi->offset],
407 rinfo->dir_dname[off - fi->offset], in);
408 BUG_ON(!in);
409 ftype = le32_to_cpu(in->mode) >> 12;
3105c19c
SW
410 vino.ino = le64_to_cpu(in->ino);
411 vino.snap = le64_to_cpu(in->snapid);
412 ino = ceph_vino_to_ino(vino);
77acfa29 413 if (!dir_emit(ctx,
2817b000
SW
414 rinfo->dir_dname[off - fi->offset],
415 rinfo->dir_dname_len[off - fi->offset],
77acfa29 416 ceph_translate_ino(inode->i_sb, ino), ftype)) {
2817b000
SW
417 dout("filldir stopping us...\n");
418 return 0;
419 }
420 off++;
77acfa29 421 ctx->pos++;
2817b000
SW
422 }
423
424 if (fi->last_name) {
425 ceph_mdsc_put_request(fi->last_readdir);
426 fi->last_readdir = NULL;
427 goto more;
428 }
429
430 /* more frags? */
431 if (!ceph_frag_is_rightmost(frag)) {
432 frag = ceph_frag_next(frag);
433 off = 0;
77acfa29 434 ctx->pos = ceph_make_fpos(frag, off);
2817b000
SW
435 dout("readdir next frag is %x\n", frag);
436 goto more;
437 }
9cfa1098 438 fi->flags |= CEPH_F_ATEND;
2817b000
SW
439
440 /*
441 * if dir_release_count still matches the dir, no dentries
442 * were released during the whole readdir, and we should have
443 * the complete dir contents in our cache.
444 */
be655596 445 spin_lock(&ci->i_ceph_lock);
2f276c51 446 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
a8673d61 447 dout(" marking %p complete\n", inode);
2f276c51 448 __ceph_dir_set_complete(ci, fi->dir_release_count);
77acfa29 449 ci->i_max_offset = ctx->pos;
2817b000 450 }
be655596 451 spin_unlock(&ci->i_ceph_lock);
2817b000 452
77acfa29 453 dout("readdir %p file %p done.\n", inode, file);
2817b000
SW
454 return 0;
455}
456
dcd3cc05 457static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
2817b000
SW
458{
459 if (fi->last_readdir) {
460 ceph_mdsc_put_request(fi->last_readdir);
461 fi->last_readdir = NULL;
462 }
463 kfree(fi->last_name);
a1629c3b 464 fi->last_name = NULL;
dcd3cc05
YZ
465 if (ceph_frag_is_leftmost(frag))
466 fi->next_offset = 2; /* compensate for . and .. */
467 else
468 fi->next_offset = 0;
2817b000
SW
469 if (fi->dentry) {
470 dput(fi->dentry);
471 fi->dentry = NULL;
472 }
9cfa1098 473 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
474}
475
965c8e59 476static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
2817b000
SW
477{
478 struct ceph_file_info *fi = file->private_data;
479 struct inode *inode = file->f_mapping->host;
f0494206 480 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
2817b000
SW
481 loff_t retval;
482
483 mutex_lock(&inode->i_mutex);
06222e49 484 retval = -EINVAL;
965c8e59 485 switch (whence) {
2817b000
SW
486 case SEEK_END:
487 offset += inode->i_size + 2; /* FIXME */
488 break;
489 case SEEK_CUR:
490 offset += file->f_pos;
06222e49
JB
491 case SEEK_SET:
492 break;
493 default:
494 goto out;
2817b000 495 }
06222e49 496
f0494206 497 if (offset >= 0) {
2817b000
SW
498 if (offset != file->f_pos) {
499 file->f_pos = offset;
500 file->f_version = 0;
9cfa1098 501 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
502 }
503 retval = offset;
504
505 /*
506 * discard buffered readdir content on seekdir(0), or
507 * seek to new frag, or seek prior to current chunk.
508 */
509 if (offset == 0 ||
f0494206 510 fpos_frag(offset) != fi->frag ||
2817b000
SW
511 fpos_off(offset) < fi->offset) {
512 dout("dir_llseek dropping %p content\n", file);
dcd3cc05 513 reset_readdir(fi, fpos_frag(offset));
2817b000
SW
514 }
515
516 /* bump dir_release_count if we did a forward seek */
f0494206 517 if (fpos_cmp(offset, old_offset) > 0)
2817b000
SW
518 fi->dir_release_count--;
519 }
06222e49 520out:
2817b000
SW
521 mutex_unlock(&inode->i_mutex);
522 return retval;
523}
524
525/*
468640e3 526 * Handle lookups for the hidden .snap directory.
2817b000 527 */
468640e3
SW
528int ceph_handle_snapdir(struct ceph_mds_request *req,
529 struct dentry *dentry, int err)
2817b000 530{
3d14c5d2 531 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
d79698da 532 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
2817b000
SW
533
534 /* .snap dir? */
535 if (err == -ENOENT &&
455cec0a 536 ceph_snap(parent) == CEPH_NOSNAP &&
6b805185 537 strcmp(dentry->d_name.name,
3d14c5d2 538 fsc->mount_options->snapdir_name) == 0) {
2817b000
SW
539 struct inode *inode = ceph_get_snapdir(parent);
540 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
541 dentry, dentry->d_name.len, dentry->d_name.name, inode);
9358c6d4 542 BUG_ON(!d_unhashed(dentry));
2817b000
SW
543 d_add(dentry, inode);
544 err = 0;
545 }
468640e3
SW
546 return err;
547}
2817b000 548
468640e3
SW
549/*
550 * Figure out final result of a lookup/open request.
551 *
552 * Mainly, make sure we return the final req->r_dentry (if it already
553 * existed) in place of the original VFS-provided dentry when they
554 * differ.
555 *
556 * Gracefully handle the case where the MDS replies with -ENOENT and
557 * no trace (which it may do, at its discretion, e.g., if it doesn't
558 * care to issue a lease on the negative dentry).
559 */
560struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
561 struct dentry *dentry, int err)
562{
2817b000
SW
563 if (err == -ENOENT) {
564 /* no trace? */
565 err = 0;
566 if (!req->r_reply_info.head->is_dentry) {
567 dout("ENOENT and no trace, dentry %p inode %p\n",
568 dentry, dentry->d_inode);
569 if (dentry->d_inode) {
570 d_drop(dentry);
571 err = -ENOENT;
572 } else {
573 d_add(dentry, NULL);
574 }
575 }
576 }
577 if (err)
578 dentry = ERR_PTR(err);
579 else if (dentry != req->r_dentry)
580 dentry = dget(req->r_dentry); /* we got spliced */
581 else
582 dentry = NULL;
583 return dentry;
584}
585
1d1de916
SW
586static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
587{
588 return ceph_ino(inode) == CEPH_INO_ROOT &&
589 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
590}
591
2817b000
SW
592/*
593 * Look up a single dir entry. If there is a lookup intent, inform
594 * the MDS so that it gets our 'caps wanted' value in a single op.
595 */
596static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
00cd8dd3 597 unsigned int flags)
2817b000 598{
3d14c5d2
YS
599 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
600 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
601 struct ceph_mds_request *req;
602 int op;
603 int err;
604
605 dout("lookup %p dentry %p '%.*s'\n",
606 dir, dentry, dentry->d_name.len, dentry->d_name.name);
607
608 if (dentry->d_name.len > NAME_MAX)
609 return ERR_PTR(-ENAMETOOLONG);
610
611 err = ceph_init_dentry(dentry);
612 if (err < 0)
613 return ERR_PTR(err);
614
2817b000
SW
615 /* can we conclude ENOENT locally? */
616 if (dentry->d_inode == NULL) {
617 struct ceph_inode_info *ci = ceph_inode(dir);
618 struct ceph_dentry_info *di = ceph_dentry(dentry);
619
be655596 620 spin_lock(&ci->i_ceph_lock);
2817b000
SW
621 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
622 if (strncmp(dentry->d_name.name,
3d14c5d2 623 fsc->mount_options->snapdir_name,
2817b000 624 dentry->d_name.len) &&
1d1de916 625 !is_root_ceph_dentry(dir, dentry) &&
2f276c51 626 __ceph_dir_is_complete(ci) &&
2817b000 627 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
be655596 628 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
629 dout(" dir %p complete, -ENOENT\n", dir);
630 d_add(dentry, NULL);
631 di->lease_shared_gen = ci->i_shared_gen;
632 return NULL;
633 }
be655596 634 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
635 }
636
637 op = ceph_snap(dir) == CEPH_SNAPDIR ?
638 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
639 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
640 if (IS_ERR(req))
7e34bc52 641 return ERR_CAST(req);
2817b000
SW
642 req->r_dentry = dget(dentry);
643 req->r_num_caps = 2;
644 /* we only need inode linkage */
645 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
646 req->r_locked_dir = dir;
647 err = ceph_mdsc_do_request(mdsc, NULL, req);
468640e3 648 err = ceph_handle_snapdir(req, dentry, err);
2817b000
SW
649 dentry = ceph_finish_lookup(req, dentry, err);
650 ceph_mdsc_put_request(req); /* will dput(dentry) */
651 dout("lookup result=%p\n", dentry);
652 return dentry;
653}
654
655/*
656 * If we do a create but get no trace back from the MDS, follow up with
657 * a lookup (the VFS expects us to link up the provided dentry).
658 */
659int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
660{
00cd8dd3 661 struct dentry *result = ceph_lookup(dir, dentry, 0);
2817b000
SW
662
663 if (result && !IS_ERR(result)) {
664 /*
665 * We created the item, then did a lookup, and found
666 * it was already linked to another inode we already
667 * had in our cache (and thus got spliced). Link our
668 * dentry to that inode, but don't hash it, just in
669 * case the VFS wants to dereference it.
670 */
671 BUG_ON(!result->d_inode);
672 d_instantiate(dentry, result->d_inode);
673 return 0;
674 }
675 return PTR_ERR(result);
676}
677
678static int ceph_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 679 umode_t mode, dev_t rdev)
2817b000 680{
3d14c5d2
YS
681 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
682 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
683 struct ceph_mds_request *req;
684 int err;
685
686 if (ceph_snap(dir) != CEPH_NOSNAP)
687 return -EROFS;
688
1a67aafb 689 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
2817b000
SW
690 dir, dentry, mode, rdev);
691 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
692 if (IS_ERR(req)) {
693 d_drop(dentry);
694 return PTR_ERR(req);
695 }
696 req->r_dentry = dget(dentry);
697 req->r_num_caps = 2;
698 req->r_locked_dir = dir;
699 req->r_args.mknod.mode = cpu_to_le32(mode);
700 req->r_args.mknod.rdev = cpu_to_le32(rdev);
701 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
702 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
703 err = ceph_mdsc_do_request(mdsc, dir, req);
704 if (!err && !req->r_reply_info.head->is_dentry)
705 err = ceph_handle_notrace_create(dir, dentry);
706 ceph_mdsc_put_request(req);
7221fe4c
GZ
707
708 if (!err)
b20a95a0
YZ
709 ceph_init_acl(dentry, dentry->d_inode, dir);
710 else
2817b000
SW
711 d_drop(dentry);
712 return err;
713}
714
4acdaf27 715static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
ebfc3b49 716 bool excl)
2817b000 717{
2d83bde9 718 return ceph_mknod(dir, dentry, mode, 0);
2817b000
SW
719}
720
721static int ceph_symlink(struct inode *dir, struct dentry *dentry,
722 const char *dest)
723{
3d14c5d2
YS
724 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
725 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
726 struct ceph_mds_request *req;
727 int err;
728
729 if (ceph_snap(dir) != CEPH_NOSNAP)
730 return -EROFS;
731
732 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
733 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
734 if (IS_ERR(req)) {
735 d_drop(dentry);
736 return PTR_ERR(req);
737 }
738 req->r_dentry = dget(dentry);
739 req->r_num_caps = 2;
740 req->r_path2 = kstrdup(dest, GFP_NOFS);
741 req->r_locked_dir = dir;
742 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
743 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
744 err = ceph_mdsc_do_request(mdsc, dir, req);
745 if (!err && !req->r_reply_info.head->is_dentry)
746 err = ceph_handle_notrace_create(dir, dentry);
747 ceph_mdsc_put_request(req);
b20a95a0
YZ
748 if (!err)
749 ceph_init_acl(dentry, dentry->d_inode, dir);
750 else
2817b000
SW
751 d_drop(dentry);
752 return err;
753}
754
18bb1db3 755static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2817b000 756{
3d14c5d2
YS
757 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
758 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
759 struct ceph_mds_request *req;
760 int err = -EROFS;
761 int op;
762
763 if (ceph_snap(dir) == CEPH_SNAPDIR) {
764 /* mkdir .snap/foo is a MKSNAP */
765 op = CEPH_MDS_OP_MKSNAP;
766 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
767 dentry->d_name.len, dentry->d_name.name, dentry);
768 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
18bb1db3 769 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
2817b000
SW
770 op = CEPH_MDS_OP_MKDIR;
771 } else {
772 goto out;
773 }
774 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
775 if (IS_ERR(req)) {
776 err = PTR_ERR(req);
777 goto out;
778 }
779
780 req->r_dentry = dget(dentry);
781 req->r_num_caps = 2;
782 req->r_locked_dir = dir;
783 req->r_args.mkdir.mode = cpu_to_le32(mode);
784 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
785 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
786 err = ceph_mdsc_do_request(mdsc, dir, req);
787 if (!err && !req->r_reply_info.head->is_dentry)
788 err = ceph_handle_notrace_create(dir, dentry);
789 ceph_mdsc_put_request(req);
790out:
b20a95a0
YZ
791 if (!err)
792 ceph_init_acl(dentry, dentry->d_inode, dir);
793 else
2817b000
SW
794 d_drop(dentry);
795 return err;
796}
797
798static int ceph_link(struct dentry *old_dentry, struct inode *dir,
799 struct dentry *dentry)
800{
3d14c5d2
YS
801 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
802 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
803 struct ceph_mds_request *req;
804 int err;
805
806 if (ceph_snap(dir) != CEPH_NOSNAP)
807 return -EROFS;
808
809 dout("link in dir %p old_dentry %p dentry %p\n", dir,
810 old_dentry, dentry);
811 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
812 if (IS_ERR(req)) {
813 d_drop(dentry);
814 return PTR_ERR(req);
815 }
816 req->r_dentry = dget(dentry);
817 req->r_num_caps = 2;
4b58c9b1 818 req->r_old_dentry = dget(old_dentry);
2817b000
SW
819 req->r_locked_dir = dir;
820 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
821 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
ad88f23f
YZ
822 /* release LINK_SHARED on source inode (mds will lock it) */
823 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
2817b000 824 err = ceph_mdsc_do_request(mdsc, dir, req);
70b666c3 825 if (err) {
2817b000 826 d_drop(dentry);
70b666c3
SW
827 } else if (!req->r_reply_info.head->is_dentry) {
828 ihold(old_dentry->d_inode);
829 d_instantiate(dentry, old_dentry->d_inode);
830 }
2817b000
SW
831 ceph_mdsc_put_request(req);
832 return err;
833}
834
835/*
836 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
837 * looks like the link count will hit 0, drop any other caps (other
838 * than PIN) we don't specifically want (due to the file still being
839 * open).
840 */
841static int drop_caps_for_unlink(struct inode *inode)
842{
843 struct ceph_inode_info *ci = ceph_inode(inode);
844 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
845
be655596 846 spin_lock(&ci->i_ceph_lock);
2817b000
SW
847 if (inode->i_nlink == 1) {
848 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
849 ci->i_ceph_flags |= CEPH_I_NODELAY;
850 }
be655596 851 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
852 return drop;
853}
854
855/*
856 * rmdir and unlink are differ only by the metadata op code
857 */
858static int ceph_unlink(struct inode *dir, struct dentry *dentry)
859{
3d14c5d2
YS
860 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
861 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
862 struct inode *inode = dentry->d_inode;
863 struct ceph_mds_request *req;
864 int err = -EROFS;
865 int op;
866
867 if (ceph_snap(dir) == CEPH_SNAPDIR) {
868 /* rmdir .snap/foo is RMSNAP */
869 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
870 dentry->d_name.name, dentry);
871 op = CEPH_MDS_OP_RMSNAP;
872 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
873 dout("unlink/rmdir dir %p dn %p inode %p\n",
874 dir, dentry, inode);
dba19c60 875 op = S_ISDIR(dentry->d_inode->i_mode) ?
2817b000
SW
876 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
877 } else
878 goto out;
879 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
880 if (IS_ERR(req)) {
881 err = PTR_ERR(req);
882 goto out;
883 }
884 req->r_dentry = dget(dentry);
885 req->r_num_caps = 2;
886 req->r_locked_dir = dir;
887 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
888 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
889 req->r_inode_drop = drop_caps_for_unlink(inode);
890 err = ceph_mdsc_do_request(mdsc, dir, req);
891 if (!err && !req->r_reply_info.head->is_dentry)
892 d_delete(dentry);
893 ceph_mdsc_put_request(req);
894out:
895 return err;
896}
897
898static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
899 struct inode *new_dir, struct dentry *new_dentry)
900{
3d14c5d2
YS
901 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
902 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
903 struct ceph_mds_request *req;
904 int err;
905
906 if (ceph_snap(old_dir) != ceph_snap(new_dir))
907 return -EXDEV;
908 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
909 ceph_snap(new_dir) != CEPH_NOSNAP)
910 return -EROFS;
911 dout("rename dir %p dentry %p to dir %p dentry %p\n",
912 old_dir, old_dentry, new_dir, new_dentry);
913 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
914 if (IS_ERR(req))
915 return PTR_ERR(req);
180061a5 916 ihold(old_dir);
2817b000
SW
917 req->r_dentry = dget(new_dentry);
918 req->r_num_caps = 2;
919 req->r_old_dentry = dget(old_dentry);
180061a5 920 req->r_old_dentry_dir = old_dir;
2817b000
SW
921 req->r_locked_dir = new_dir;
922 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
923 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
924 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
925 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
926 /* release LINK_RDCACHE on source inode (mds will lock it) */
927 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
928 if (new_dentry->d_inode)
929 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
930 err = ceph_mdsc_do_request(mdsc, old_dir, req);
931 if (!err && !req->r_reply_info.head->is_dentry) {
932 /*
933 * Normally d_move() is done by fill_trace (called by
934 * do_request, above). If there is no trace, we need
935 * to do it here.
936 */
ea1409f9
SW
937
938 /* d_move screws up d_subdirs order */
2f276c51 939 ceph_dir_clear_complete(new_dir);
ea1409f9 940
2817b000 941 d_move(old_dentry, new_dentry);
ea1409f9
SW
942
943 /* ensure target dentry is invalidated, despite
944 rehashing bug in vfs_rename_dir */
81a6cf2d 945 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
946 }
947 ceph_mdsc_put_request(req);
948 return err;
949}
950
81a6cf2d
SW
951/*
952 * Ensure a dentry lease will no longer revalidate.
953 */
954void ceph_invalidate_dentry_lease(struct dentry *dentry)
955{
956 spin_lock(&dentry->d_lock);
957 dentry->d_time = jiffies;
958 ceph_dentry(dentry)->lease_shared_gen = 0;
959 spin_unlock(&dentry->d_lock);
960}
2817b000
SW
961
962/*
963 * Check if dentry lease is valid. If not, delete the lease. Try to
964 * renew if the least is more than half up.
965 */
966static int dentry_lease_is_valid(struct dentry *dentry)
967{
968 struct ceph_dentry_info *di;
969 struct ceph_mds_session *s;
970 int valid = 0;
971 u32 gen;
972 unsigned long ttl;
973 struct ceph_mds_session *session = NULL;
974 struct inode *dir = NULL;
975 u32 seq = 0;
976
977 spin_lock(&dentry->d_lock);
978 di = ceph_dentry(dentry);
3d8eb7a9 979 if (di->lease_session) {
2817b000 980 s = di->lease_session;
d8fb02ab 981 spin_lock(&s->s_gen_ttl_lock);
2817b000
SW
982 gen = s->s_cap_gen;
983 ttl = s->s_cap_ttl;
d8fb02ab 984 spin_unlock(&s->s_gen_ttl_lock);
2817b000
SW
985
986 if (di->lease_gen == gen &&
987 time_before(jiffies, dentry->d_time) &&
988 time_before(jiffies, ttl)) {
989 valid = 1;
990 if (di->lease_renew_after &&
991 time_after(jiffies, di->lease_renew_after)) {
992 /* we should renew */
993 dir = dentry->d_parent->d_inode;
994 session = ceph_get_mds_session(s);
995 seq = di->lease_seq;
996 di->lease_renew_after = 0;
997 di->lease_renew_from = jiffies;
998 }
2817b000
SW
999 }
1000 }
1001 spin_unlock(&dentry->d_lock);
1002
1003 if (session) {
1004 ceph_mdsc_lease_send_msg(session, dir, dentry,
1005 CEPH_MDS_LEASE_RENEW, seq);
1006 ceph_put_mds_session(session);
1007 }
1008 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1009 return valid;
1010}
1011
1012/*
1013 * Check if directory-wide content lease/cap is valid.
1014 */
1015static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1016{
1017 struct ceph_inode_info *ci = ceph_inode(dir);
1018 struct ceph_dentry_info *di = ceph_dentry(dentry);
1019 int valid = 0;
1020
be655596 1021 spin_lock(&ci->i_ceph_lock);
2817b000
SW
1022 if (ci->i_shared_gen == di->lease_shared_gen)
1023 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
be655596 1024 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
1025 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1026 dir, (unsigned)ci->i_shared_gen, dentry,
1027 (unsigned)di->lease_shared_gen, valid);
1028 return valid;
1029}
1030
1031/*
1032 * Check if cached dentry can be trusted.
1033 */
0b728e19 1034static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
2817b000 1035{
bf1c6aca 1036 int valid = 0;
34286d66
NP
1037 struct inode *dir;
1038
0b728e19 1039 if (flags & LOOKUP_RCU)
34286d66
NP
1040 return -ECHILD;
1041
1cd3935b
SW
1042 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1043 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1044 ceph_dentry(dentry)->offset);
2817b000 1045
bf1c6aca
SW
1046 dir = ceph_get_dentry_parent_inode(dentry);
1047
2817b000
SW
1048 /* always trust cached snapped dentries, snapdir dentry */
1049 if (ceph_snap(dir) != CEPH_NOSNAP) {
1050 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1051 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
bf1c6aca
SW
1052 valid = 1;
1053 } else if (dentry->d_inode &&
1054 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1055 valid = 1;
1056 } else if (dentry_lease_is_valid(dentry) ||
1057 dir_lease_is_valid(dir, dentry)) {
9215aeea
YZ
1058 if (dentry->d_inode)
1059 valid = ceph_is_any_caps(dentry->d_inode);
1060 else
1061 valid = 1;
2817b000 1062 }
2817b000 1063
bf1c6aca 1064 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
9215aeea 1065 if (valid) {
bf1c6aca 1066 ceph_dentry_lru_touch(dentry);
9215aeea
YZ
1067 } else {
1068 ceph_dir_clear_complete(dir);
bf1c6aca 1069 d_drop(dentry);
9215aeea 1070 }
bf1c6aca
SW
1071 iput(dir);
1072 return valid;
2817b000
SW
1073}
1074
1075/*
147851d2 1076 * Release our ceph_dentry_info.
2817b000 1077 */
147851d2 1078static void ceph_d_release(struct dentry *dentry)
2817b000
SW
1079{
1080 struct ceph_dentry_info *di = ceph_dentry(dentry);
2817b000 1081
147851d2 1082 dout("d_release %p\n", dentry);
3d8eb7a9
SW
1083 ceph_dentry_lru_del(dentry);
1084 if (di->lease_session)
1085 ceph_put_mds_session(di->lease_session);
1086 kmem_cache_free(ceph_dentry_cachep, di);
1087 dentry->d_fsdata = NULL;
2817b000
SW
1088}
1089
1090static int ceph_snapdir_d_revalidate(struct dentry *dentry,
0b728e19 1091 unsigned int flags)
2817b000
SW
1092{
1093 /*
1094 * Eventually, we'll want to revalidate snapped metadata
1095 * too... probably...
1096 */
1097 return 1;
1098}
1099
b58dc410
SW
1100/*
1101 * When the VFS prunes a dentry from the cache, we need to clear the
1102 * complete flag on the parent directory.
1103 *
1104 * Called under dentry->d_lock.
1105 */
1106static void ceph_d_prune(struct dentry *dentry)
1107{
774ac21d 1108 dout("ceph_d_prune %p\n", dentry);
b58dc410
SW
1109
1110 /* do we have a valid parent? */
8842b3be 1111 if (IS_ROOT(dentry))
b58dc410
SW
1112 return;
1113
2f276c51 1114 /* if we are not hashed, we don't affect dir's completeness */
b58dc410
SW
1115 if (d_unhashed(dentry))
1116 return;
2817b000 1117
b58dc410
SW
1118 /*
1119 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1120 * cleared until d_release
1121 */
2f276c51 1122 ceph_dir_clear_complete(dentry->d_parent->d_inode);
b58dc410 1123}
2817b000
SW
1124
1125/*
1126 * read() on a dir. This weird interface hack only works if mounted
1127 * with '-o dirstat'.
1128 */
1129static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1130 loff_t *ppos)
1131{
1132 struct ceph_file_info *cf = file->private_data;
496ad9aa 1133 struct inode *inode = file_inode(file);
2817b000
SW
1134 struct ceph_inode_info *ci = ceph_inode(inode);
1135 int left;
ae598083 1136 const int bufsize = 1024;
2817b000 1137
3d14c5d2 1138 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1139 return -EISDIR;
1140
1141 if (!cf->dir_info) {
ae598083 1142 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
2817b000
SW
1143 if (!cf->dir_info)
1144 return -ENOMEM;
1145 cf->dir_info_len =
ae598083 1146 snprintf(cf->dir_info, bufsize,
2817b000
SW
1147 "entries: %20lld\n"
1148 " files: %20lld\n"
1149 " subdirs: %20lld\n"
1150 "rentries: %20lld\n"
1151 " rfiles: %20lld\n"
1152 " rsubdirs: %20lld\n"
1153 "rbytes: %20lld\n"
1154 "rctime: %10ld.%09ld\n",
1155 ci->i_files + ci->i_subdirs,
1156 ci->i_files,
1157 ci->i_subdirs,
1158 ci->i_rfiles + ci->i_rsubdirs,
1159 ci->i_rfiles,
1160 ci->i_rsubdirs,
1161 ci->i_rbytes,
1162 (long)ci->i_rctime.tv_sec,
1163 (long)ci->i_rctime.tv_nsec);
1164 }
1165
1166 if (*ppos >= cf->dir_info_len)
1167 return 0;
1168 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1169 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1170 if (left == size)
1171 return -EFAULT;
1172 *ppos += (size - left);
1173 return size - left;
1174}
1175
1176/*
1177 * an fsync() on a dir will wait for any uncommitted directory
1178 * operations to commit.
1179 */
02c24a82
JB
1180static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1181 int datasync)
2817b000 1182{
496ad9aa 1183 struct inode *inode = file_inode(file);
2817b000
SW
1184 struct ceph_inode_info *ci = ceph_inode(inode);
1185 struct list_head *head = &ci->i_unsafe_dirops;
1186 struct ceph_mds_request *req;
1187 u64 last_tid;
1188 int ret = 0;
1189
1190 dout("dir_fsync %p\n", inode);
02c24a82
JB
1191 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1192 if (ret)
1193 return ret;
1194 mutex_lock(&inode->i_mutex);
1195
2817b000
SW
1196 spin_lock(&ci->i_unsafe_lock);
1197 if (list_empty(head))
1198 goto out;
1199
1200 req = list_entry(head->prev,
1201 struct ceph_mds_request, r_unsafe_dir_item);
1202 last_tid = req->r_tid;
1203
1204 do {
1205 ceph_mdsc_get_request(req);
1206 spin_unlock(&ci->i_unsafe_lock);
2ff179e6 1207
2817b000
SW
1208 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1209 inode, req->r_tid, last_tid);
1210 if (req->r_timeout) {
1211 ret = wait_for_completion_timeout(
1212 &req->r_safe_completion, req->r_timeout);
1213 if (ret > 0)
1214 ret = 0;
1215 else if (ret == 0)
1216 ret = -EIO; /* timed out */
1217 } else {
1218 wait_for_completion(&req->r_safe_completion);
1219 }
2817b000
SW
1220 ceph_mdsc_put_request(req);
1221
2ff179e6 1222 spin_lock(&ci->i_unsafe_lock);
2817b000
SW
1223 if (ret || list_empty(head))
1224 break;
1225 req = list_entry(head->next,
1226 struct ceph_mds_request, r_unsafe_dir_item);
1227 } while (req->r_tid < last_tid);
1228out:
1229 spin_unlock(&ci->i_unsafe_lock);
02c24a82
JB
1230 mutex_unlock(&inode->i_mutex);
1231
2817b000
SW
1232 return ret;
1233}
1234
1235/*
1236 * We maintain a private dentry LRU.
1237 *
1238 * FIXME: this needs to be changed to a per-mds lru to be useful.
1239 */
1240void ceph_dentry_lru_add(struct dentry *dn)
1241{
1242 struct ceph_dentry_info *di = ceph_dentry(dn);
1243 struct ceph_mds_client *mdsc;
2817b000 1244
04a419f9
SW
1245 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1246 dn->d_name.len, dn->d_name.name);
3d8eb7a9
SW
1247 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1248 spin_lock(&mdsc->dentry_lru_lock);
1249 list_add_tail(&di->lru, &mdsc->dentry_lru);
1250 mdsc->num_dentry++;
1251 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1252}
1253
1254void ceph_dentry_lru_touch(struct dentry *dn)
1255{
1256 struct ceph_dentry_info *di = ceph_dentry(dn);
1257 struct ceph_mds_client *mdsc;
2817b000 1258
1cd3935b
SW
1259 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1260 dn->d_name.len, dn->d_name.name, di->offset);
3d8eb7a9
SW
1261 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1262 spin_lock(&mdsc->dentry_lru_lock);
1263 list_move_tail(&di->lru, &mdsc->dentry_lru);
1264 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1265}
1266
1267void ceph_dentry_lru_del(struct dentry *dn)
1268{
1269 struct ceph_dentry_info *di = ceph_dentry(dn);
1270 struct ceph_mds_client *mdsc;
1271
04a419f9
SW
1272 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1273 dn->d_name.len, dn->d_name.name);
3d8eb7a9
SW
1274 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1275 spin_lock(&mdsc->dentry_lru_lock);
1276 list_del_init(&di->lru);
1277 mdsc->num_dentry--;
1278 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1279}
1280
6c0f3af7
SW
1281/*
1282 * Return name hash for a given dentry. This is dependent on
1283 * the parent directory's hash function.
1284 */
e5f86dc3 1285unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
6c0f3af7 1286{
6c0f3af7
SW
1287 struct ceph_inode_info *dci = ceph_inode(dir);
1288
1289 switch (dci->i_dir_layout.dl_dir_hash) {
1290 case 0: /* for backward compat */
1291 case CEPH_STR_HASH_LINUX:
1292 return dn->d_name.hash;
1293
1294 default:
1295 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1296 dn->d_name.name, dn->d_name.len);
1297 }
1298}
1299
2817b000
SW
1300const struct file_operations ceph_dir_fops = {
1301 .read = ceph_read_dir,
77acfa29 1302 .iterate = ceph_readdir,
2817b000
SW
1303 .llseek = ceph_dir_llseek,
1304 .open = ceph_open,
1305 .release = ceph_release,
1306 .unlocked_ioctl = ceph_ioctl,
1307 .fsync = ceph_dir_fsync,
1308};
1309
1310const struct inode_operations ceph_dir_iops = {
1311 .lookup = ceph_lookup,
1312 .permission = ceph_permission,
1313 .getattr = ceph_getattr,
1314 .setattr = ceph_setattr,
1315 .setxattr = ceph_setxattr,
1316 .getxattr = ceph_getxattr,
1317 .listxattr = ceph_listxattr,
1318 .removexattr = ceph_removexattr,
7221fe4c 1319 .get_acl = ceph_get_acl,
72466d0b 1320 .set_acl = ceph_set_acl,
2817b000
SW
1321 .mknod = ceph_mknod,
1322 .symlink = ceph_symlink,
1323 .mkdir = ceph_mkdir,
1324 .link = ceph_link,
1325 .unlink = ceph_unlink,
1326 .rmdir = ceph_unlink,
1327 .rename = ceph_rename,
1328 .create = ceph_create,
2d83bde9 1329 .atomic_open = ceph_atomic_open,
2817b000
SW
1330};
1331
52dfb8ac 1332const struct dentry_operations ceph_dentry_ops = {
2817b000 1333 .d_revalidate = ceph_d_revalidate,
147851d2 1334 .d_release = ceph_d_release,
b58dc410 1335 .d_prune = ceph_d_prune,
2817b000
SW
1336};
1337
52dfb8ac 1338const struct dentry_operations ceph_snapdir_dentry_ops = {
2817b000 1339 .d_revalidate = ceph_snapdir_d_revalidate,
147851d2 1340 .d_release = ceph_d_release,
2817b000
SW
1341};
1342
52dfb8ac 1343const struct dentry_operations ceph_snap_dentry_ops = {
147851d2 1344 .d_release = ceph_d_release,
b58dc410 1345 .d_prune = ceph_d_prune,
2817b000 1346};