]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
ceph: using hash value to compose dentry offset
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
2817b000
SW
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
3d14c5d2 10#include "mds_client.h"
2817b000
SW
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
52dfb8ac 29const struct dentry_operations ceph_dentry_ops;
2817b000
SW
30
31/*
32 * Initialize ceph dentry state.
33 */
34int ceph_init_dentry(struct dentry *dentry)
35{
36 struct ceph_dentry_info *di;
37
38 if (dentry->d_fsdata)
39 return 0;
40
99ec2697 41 di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
2817b000
SW
42 if (!di)
43 return -ENOMEM; /* oh well */
44
45 spin_lock(&dentry->d_lock);
8c6efb58
SW
46 if (dentry->d_fsdata) {
47 /* lost a race */
48 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 49 goto out_unlock;
8c6efb58 50 }
48d0cbd1 51
2b0143b5 52 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
48d0cbd1 53 d_set_d_op(dentry, &ceph_dentry_ops);
2b0143b5 54 else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
48d0cbd1
SW
55 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
56 else
57 d_set_d_op(dentry, &ceph_snap_dentry_ops);
58
2817b000
SW
59 di->dentry = dentry;
60 di->lease_session = NULL;
2817b000 61 dentry->d_time = jiffies;
48d0cbd1
SW
62 /* avoid reordering d_fsdata setup so that the check above is safe */
63 smp_mb();
64 dentry->d_fsdata = di;
2817b000
SW
65 ceph_dentry_lru_add(dentry);
66out_unlock:
67 spin_unlock(&dentry->d_lock);
68 return 0;
69}
70
2817b000 71/*
f3c4ebe6
YZ
72 * for f_pos for readdir:
73 * - hash order:
74 * (0xff << 52) | ((24 bits hash) << 28) |
75 * (the nth entry has hash collision);
76 * - frag+name order;
77 * ((frag value) << 28) | (the nth entry in frag);
2817b000 78 */
f3c4ebe6
YZ
79#define OFFSET_BITS 28
80#define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
81#define HASH_ORDER (0xffull << (OFFSET_BITS + 24))
82loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
83{
84 loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
85 if (hash_order)
86 fpos |= HASH_ORDER;
87 return fpos;
88}
89
90static bool is_hash_order(loff_t p)
91{
92 return (p & HASH_ORDER) == HASH_ORDER;
93}
94
2817b000
SW
95static unsigned fpos_frag(loff_t p)
96{
f3c4ebe6 97 return p >> OFFSET_BITS;
2817b000 98}
f3c4ebe6
YZ
99
100static unsigned fpos_hash(loff_t p)
101{
102 return ceph_frag_value(fpos_frag(p));
103}
104
2817b000
SW
105static unsigned fpos_off(loff_t p)
106{
f3c4ebe6 107 return p & OFFSET_MASK;
2817b000
SW
108}
109
4d5f5df6
YZ
110static int fpos_cmp(loff_t l, loff_t r)
111{
112 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
113 if (v)
114 return v;
115 return (int)(fpos_off(l) - fpos_off(r));
116}
117
fdd4e158
YZ
118/*
119 * make note of the last dentry we read, so we can
120 * continue at the same lexicographical point,
121 * regardless of what dir changes take place on the
122 * server.
123 */
124static int note_last_dentry(struct ceph_file_info *fi, const char *name,
125 int len, unsigned next_offset)
126{
127 char *buf = kmalloc(len+1, GFP_KERNEL);
128 if (!buf)
129 return -ENOMEM;
130 kfree(fi->last_name);
131 fi->last_name = buf;
132 memcpy(fi->last_name, name, len);
133 fi->last_name[len] = 0;
134 fi->next_offset = next_offset;
135 dout("note_last_dentry '%s'\n", fi->last_name);
136 return 0;
137}
138
c530cd24
YZ
139
140static struct dentry *
141__dcache_find_get_entry(struct dentry *parent, u64 idx,
142 struct ceph_readdir_cache_control *cache_ctl)
143{
144 struct inode *dir = d_inode(parent);
145 struct dentry *dentry;
146 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
147 loff_t ptr_pos = idx * sizeof(struct dentry *);
148 pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
149
150 if (ptr_pos >= i_size_read(dir))
151 return NULL;
152
153 if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
154 ceph_readdir_cache_release(cache_ctl);
155 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
156 if (!cache_ctl->page) {
157 dout(" page %lu not found\n", ptr_pgoff);
158 return ERR_PTR(-EAGAIN);
159 }
160 /* reading/filling the cache are serialized by
161 i_mutex, no need to use page lock */
162 unlock_page(cache_ctl->page);
163 cache_ctl->dentries = kmap(cache_ctl->page);
164 }
165
166 cache_ctl->index = idx & idx_mask;
167
168 rcu_read_lock();
169 spin_lock(&parent->d_lock);
170 /* check i_size again here, because empty directory can be
171 * marked as complete while not holding the i_mutex. */
172 if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
173 dentry = cache_ctl->dentries[cache_ctl->index];
174 else
175 dentry = NULL;
176 spin_unlock(&parent->d_lock);
177 if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
178 dentry = NULL;
179 rcu_read_unlock();
180 return dentry ? : ERR_PTR(-EAGAIN);
181}
182
2817b000
SW
183/*
184 * When possible, we try to satisfy a readdir by peeking at the
185 * dcache. We make this work by carefully ordering dentries on
946e51f2 186 * d_child when we initially get results back from the MDS, and
2817b000
SW
187 * falling back to a "normal" sync readdir if any dentries in the dir
188 * are dropped.
189 *
2f276c51 190 * Complete dir indicates that we have all dentries in the dir. It is
2817b000
SW
191 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
192 * the MDS if/when the directory is modified).
193 */
a30be7cb
YZ
194static int __dcache_readdir(struct file *file, struct dir_context *ctx,
195 u32 shared_gen)
2817b000 196{
77acfa29 197 struct ceph_file_info *fi = file->private_data;
b583043e 198 struct dentry *parent = file->f_path.dentry;
2b0143b5 199 struct inode *dir = d_inode(parent);
fdd4e158 200 struct dentry *dentry, *last = NULL;
2817b000 201 struct ceph_dentry_info *di;
fdd4e158 202 struct ceph_readdir_cache_control cache_ctl = {};
c530cd24
YZ
203 u64 idx = 0;
204 int err = 0;
2817b000 205
f3c4ebe6 206 dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
2817b000 207
c530cd24
YZ
208 /* search start position */
209 if (ctx->pos > 2) {
210 u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
211 while (count > 0) {
212 u64 step = count >> 1;
213 dentry = __dcache_find_get_entry(parent, idx + step,
214 &cache_ctl);
215 if (!dentry) {
216 /* use linar search */
217 idx = 0;
218 break;
219 }
220 if (IS_ERR(dentry)) {
221 err = PTR_ERR(dentry);
222 goto out;
223 }
224 di = ceph_dentry(dentry);
225 spin_lock(&dentry->d_lock);
226 if (fpos_cmp(di->offset, ctx->pos) < 0) {
227 idx += step + 1;
228 count -= step + 1;
229 } else {
230 count = step;
231 }
232 spin_unlock(&dentry->d_lock);
233 dput(dentry);
234 }
235
236 dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
2817b000
SW
237 }
238
fdd4e158 239
c530cd24
YZ
240 for (;;) {
241 bool emit_dentry = false;
242 dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
243 if (!dentry) {
9cfa1098 244 fi->flags |= CEPH_F_ATEND;
fdd4e158
YZ
245 err = 0;
246 break;
2817b000 247 }
c530cd24
YZ
248 if (IS_ERR(dentry)) {
249 err = PTR_ERR(dentry);
250 goto out;
fdd4e158
YZ
251 }
252
fdd4e158
YZ
253 di = ceph_dentry(dentry);
254 spin_lock(&dentry->d_lock);
a30be7cb 255 if (di->lease_shared_gen == shared_gen &&
fdd4e158 256 d_really_is_positive(dentry) &&
fdd4e158
YZ
257 fpos_cmp(ctx->pos, di->offset) <= 0) {
258 emit_dentry = true;
259 }
da502956 260 spin_unlock(&dentry->d_lock);
2817b000 261
fdd4e158 262 if (emit_dentry) {
f3c4ebe6 263 dout(" %llx dentry %p %pd %p\n", di->offset,
fdd4e158
YZ
264 dentry, dentry, d_inode(dentry));
265 ctx->pos = di->offset;
266 if (!dir_emit(ctx, dentry->d_name.name,
267 dentry->d_name.len,
268 ceph_translate_ino(dentry->d_sb,
269 d_inode(dentry)->i_ino),
270 d_inode(dentry)->i_mode >> 12)) {
271 dput(dentry);
272 err = 0;
273 break;
274 }
275 ctx->pos++;
0081bd83 276
fdd4e158
YZ
277 if (last)
278 dput(last);
279 last = dentry;
280 } else {
281 dput(dentry);
2817b000 282 }
fdd4e158 283 }
c530cd24 284out:
fdd4e158
YZ
285 ceph_readdir_cache_release(&cache_ctl);
286 if (last) {
287 int ret;
288 di = ceph_dentry(last);
289 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
290 fpos_off(di->offset) + 1);
291 if (ret < 0)
292 err = ret;
2817b000 293 dput(last);
fdd4e158 294 }
2817b000
SW
295 return err;
296}
297
f3c4ebe6
YZ
298static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos)
299{
300 if (!fi->last_readdir)
301 return true;
302 if (is_hash_order(pos))
303 return !ceph_frag_contains_value(fi->frag, fpos_hash(pos));
304 else
305 return fi->frag != fpos_frag(pos);
306}
307
77acfa29 308static int ceph_readdir(struct file *file, struct dir_context *ctx)
2817b000 309{
77acfa29
AV
310 struct ceph_file_info *fi = file->private_data;
311 struct inode *inode = file_inode(file);
2817b000 312 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
313 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
314 struct ceph_mds_client *mdsc = fsc->mdsc;
8974eebd 315 int i;
2817b000
SW
316 int err;
317 u32 ftype;
318 struct ceph_mds_reply_info_parsed *rinfo;
2817b000 319
8974eebd 320 dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
9cfa1098 321 if (fi->flags & CEPH_F_ATEND)
2817b000
SW
322 return 0;
323
324 /* always start with . and .. */
77acfa29 325 if (ctx->pos == 0) {
2817b000 326 dout("readdir off 0 -> '.'\n");
77acfa29 327 if (!dir_emit(ctx, ".", 1,
ad1fee96 328 ceph_translate_ino(inode->i_sb, inode->i_ino),
77acfa29 329 inode->i_mode >> 12))
2817b000 330 return 0;
77acfa29 331 ctx->pos = 1;
2817b000 332 }
77acfa29 333 if (ctx->pos == 1) {
b583043e 334 ino_t ino = parent_ino(file->f_path.dentry);
2817b000 335 dout("readdir off 1 -> '..'\n");
77acfa29 336 if (!dir_emit(ctx, "..", 2,
ad1fee96 337 ceph_translate_ino(inode->i_sb, ino),
77acfa29 338 inode->i_mode >> 12))
2817b000 339 return 0;
77acfa29 340 ctx->pos = 2;
2817b000
SW
341 }
342
343 /* can we use the dcache? */
be655596 344 spin_lock(&ci->i_ceph_lock);
fdd4e158 345 if (ceph_test_mount_opt(fsc, DCACHE) &&
3d14c5d2 346 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
a0dff78d 347 ceph_snap(inode) != CEPH_SNAPDIR &&
70db4f36 348 __ceph_dir_is_complete_ordered(ci) &&
2817b000 349 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
a30be7cb 350 u32 shared_gen = ci->i_shared_gen;
be655596 351 spin_unlock(&ci->i_ceph_lock);
a30be7cb 352 err = __dcache_readdir(file, ctx, shared_gen);
efa4c120 353 if (err != -EAGAIN)
2817b000 354 return err;
efa4c120 355 } else {
be655596 356 spin_unlock(&ci->i_ceph_lock);
2817b000 357 }
2817b000
SW
358
359 /* proceed with a normal readdir */
2817b000
SW
360more:
361 /* do we have the correct frag content buffered? */
f3c4ebe6 362 if (need_send_readdir(fi, ctx->pos)) {
2817b000 363 struct ceph_mds_request *req;
f3c4ebe6 364 unsigned frag;
2817b000
SW
365 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
366 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
367
368 /* discard old result, if any */
393f6620 369 if (fi->last_readdir) {
2817b000 370 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
371 fi->last_readdir = NULL;
372 }
2817b000 373
f3c4ebe6
YZ
374 if (is_hash_order(ctx->pos)) {
375 frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
376 NULL, NULL);
377 } else {
378 frag = fpos_frag(ctx->pos);
379 }
380
2817b000
SW
381 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
382 ceph_vinop(inode), frag, fi->last_name);
383 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
384 if (IS_ERR(req))
385 return PTR_ERR(req);
54008399
YZ
386 err = ceph_alloc_readdir_reply_buffer(req, inode);
387 if (err) {
388 ceph_mdsc_put_request(req);
389 return err;
390 }
2817b000
SW
391 /* hints to request -> mds selection code */
392 req->r_direct_mode = USE_AUTH_MDS;
393 req->r_direct_hash = ceph_frag_value(frag);
394 req->r_direct_is_hash = true;
a149bb9a 395 if (fi->last_name) {
687265e5 396 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
a149bb9a
SK
397 if (!req->r_path2) {
398 ceph_mdsc_put_request(req);
399 return -ENOMEM;
400 }
401 }
fdd4e158
YZ
402 req->r_dir_release_cnt = fi->dir_release_count;
403 req->r_dir_ordered_cnt = fi->dir_ordered_count;
404 req->r_readdir_cache_idx = fi->readdir_cache_idx;
2817b000
SW
405 req->r_readdir_offset = fi->next_offset;
406 req->r_args.readdir.frag = cpu_to_le32(frag);
956d39d6
YZ
407 req->r_args.readdir.flags =
408 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
a149bb9a
SK
409
410 req->r_inode = inode;
411 ihold(inode);
412 req->r_dentry = dget(file->f_path.dentry);
2817b000
SW
413 err = ceph_mdsc_do_request(mdsc, NULL, req);
414 if (err < 0) {
415 ceph_mdsc_put_request(req);
416 return err;
417 }
f3c4ebe6
YZ
418 dout("readdir got and parsed readdir result=%d on "
419 "frag %x, end=%d, complete=%d, hash_order=%d\n",
420 err, frag,
2817b000 421 (int)req->r_reply_info.dir_end,
f3c4ebe6
YZ
422 (int)req->r_reply_info.dir_complete,
423 (int)req->r_reply_info.hash_order);
2817b000 424
81c6aea5
YZ
425 rinfo = &req->r_reply_info;
426 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
427 frag = le32_to_cpu(rinfo->dir_dir->frag);
f3c4ebe6
YZ
428 if (!rinfo->hash_order) {
429 fi->next_offset = req->r_readdir_offset;
430 /* adjust ctx->pos to beginning of frag */
431 ctx->pos = ceph_make_fpos(frag,
432 fi->next_offset,
433 false);
434 }
81c6aea5 435 }
fdd4e158 436
f0494206 437 fi->frag = frag;
2817b000
SW
438 fi->last_readdir = req;
439
fdd4e158
YZ
440 if (req->r_did_prepopulate) {
441 fi->readdir_cache_idx = req->r_readdir_cache_idx;
442 if (fi->readdir_cache_idx < 0) {
443 /* preclude from marking dir ordered */
444 fi->dir_ordered_count = 0;
8974eebd
YZ
445 } else if (ceph_frag_is_leftmost(frag) &&
446 fi->next_offset == 2) {
fdd4e158
YZ
447 /* note dir version at start of readdir so
448 * we can tell if any dentries get dropped */
449 fi->dir_release_count = req->r_dir_release_cnt;
450 fi->dir_ordered_count = req->r_dir_ordered_cnt;
451 }
452 } else {
453 dout("readdir !did_prepopulate");
454 /* disable readdir cache */
455 fi->readdir_cache_idx = -1;
456 /* preclude from marking dir complete */
457 fi->dir_release_count = 0;
458 }
459
f3c4ebe6
YZ
460 /* note next offset and last dentry name */
461 if (rinfo->dir_nr > 0) {
2a5beea3
YZ
462 struct ceph_mds_reply_dir_entry *rde =
463 rinfo->dir_entries + (rinfo->dir_nr-1);
f3c4ebe6
YZ
464 unsigned next_offset = req->r_reply_info.dir_end ?
465 2 : (fpos_off(rde->offset) + 1);
2a5beea3 466 err = note_last_dentry(fi, rde->name, rde->name_len,
f3c4ebe6 467 next_offset);
2817b000
SW
468 if (err)
469 return err;
f3c4ebe6
YZ
470 } else if (req->r_reply_info.dir_end) {
471 fi->next_offset = 2;
472 /* keep last name */
2817b000
SW
473 }
474 }
475
476 rinfo = &fi->last_readdir->r_reply_info;
8974eebd 477 dout("readdir frag %x num %d pos %llx chunk first %llx\n",
f3c4ebe6 478 fi->frag, rinfo->dir_nr, ctx->pos,
8974eebd 479 rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
77acfa29 480
8974eebd
YZ
481 i = 0;
482 /* search start position */
483 if (rinfo->dir_nr > 0) {
484 int step, nr = rinfo->dir_nr;
485 while (nr > 0) {
486 step = nr >> 1;
487 if (rinfo->dir_entries[i + step].offset < ctx->pos) {
488 i += step + 1;
489 nr -= step + 1;
490 } else {
491 nr = step;
492 }
493 }
494 }
495 for (; i < rinfo->dir_nr; i++) {
496 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
3105c19c
SW
497 struct ceph_vino vino;
498 ino_t ino;
499
8974eebd
YZ
500 BUG_ON(rde->offset < ctx->pos);
501
502 ctx->pos = rde->offset;
503 dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
504 i, rinfo->dir_nr, ctx->pos,
2a5beea3 505 rde->name_len, rde->name, &rde->inode.in);
8974eebd 506
2a5beea3
YZ
507 BUG_ON(!rde->inode.in);
508 ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
509 vino.ino = le64_to_cpu(rde->inode.in->ino);
510 vino.snap = le64_to_cpu(rde->inode.in->snapid);
3105c19c 511 ino = ceph_vino_to_ino(vino);
8974eebd 512
2a5beea3
YZ
513 if (!dir_emit(ctx, rde->name, rde->name_len,
514 ceph_translate_ino(inode->i_sb, ino), ftype)) {
2817b000
SW
515 dout("filldir stopping us...\n");
516 return 0;
517 }
77acfa29 518 ctx->pos++;
2817b000
SW
519 }
520
f3c4ebe6 521 if (fi->next_offset > 2) {
2817b000
SW
522 ceph_mdsc_put_request(fi->last_readdir);
523 fi->last_readdir = NULL;
524 goto more;
525 }
526
527 /* more frags? */
f3c4ebe6
YZ
528 if (!ceph_frag_is_rightmost(fi->frag)) {
529 unsigned frag = ceph_frag_next(fi->frag);
530 if (is_hash_order(ctx->pos)) {
531 loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
532 fi->next_offset, true);
533 if (new_pos > ctx->pos)
534 ctx->pos = new_pos;
535 /* keep last_name */
536 } else {
537 ctx->pos = ceph_make_fpos(frag, fi->next_offset, false);
538 kfree(fi->last_name);
539 fi->last_name = NULL;
540 }
2817b000
SW
541 dout("readdir next frag is %x\n", frag);
542 goto more;
543 }
9cfa1098 544 fi->flags |= CEPH_F_ATEND;
2817b000
SW
545
546 /*
547 * if dir_release_count still matches the dir, no dentries
548 * were released during the whole readdir, and we should have
549 * the complete dir contents in our cache.
550 */
fdd4e158
YZ
551 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
552 spin_lock(&ci->i_ceph_lock);
553 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
70db4f36 554 dout(" marking %p complete and ordered\n", inode);
fdd4e158
YZ
555 /* use i_size to track number of entries in
556 * readdir cache */
557 BUG_ON(fi->readdir_cache_idx < 0);
558 i_size_write(inode, fi->readdir_cache_idx *
559 sizeof(struct dentry*));
560 } else {
70db4f36 561 dout(" marking %p complete\n", inode);
fdd4e158 562 }
70db4f36
YZ
563 __ceph_dir_set_complete(ci, fi->dir_release_count,
564 fi->dir_ordered_count);
fdd4e158 565 spin_unlock(&ci->i_ceph_lock);
2817b000 566 }
2817b000 567
77acfa29 568 dout("readdir %p file %p done.\n", inode, file);
2817b000
SW
569 return 0;
570}
571
8974eebd 572static void reset_readdir(struct ceph_file_info *fi)
2817b000
SW
573{
574 if (fi->last_readdir) {
575 ceph_mdsc_put_request(fi->last_readdir);
576 fi->last_readdir = NULL;
577 }
578 kfree(fi->last_name);
a1629c3b 579 fi->last_name = NULL;
fdd4e158
YZ
580 fi->dir_release_count = 0;
581 fi->readdir_cache_idx = -1;
a78600e7 582 fi->next_offset = 2; /* compensate for . and .. */
9cfa1098 583 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
584}
585
8974eebd
YZ
586/*
587 * discard buffered readdir content on seekdir(0), or seek to new frag,
588 * or seek prior to current chunk
589 */
590static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
591{
592 struct ceph_mds_reply_info_parsed *rinfo;
f3c4ebe6 593 loff_t chunk_offset;
8974eebd
YZ
594 if (new_pos == 0)
595 return true;
f3c4ebe6
YZ
596 if (is_hash_order(new_pos)) {
597 /* no need to reset last_name for a forward seek when
598 * dentries are sotred in hash order */
599 } else if (fi->frag |= fpos_frag(new_pos)) {
8974eebd 600 return true;
f3c4ebe6 601 }
8974eebd
YZ
602 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
603 if (!rinfo || !rinfo->dir_nr)
604 return true;
f3c4ebe6
YZ
605 chunk_offset = rinfo->dir_entries[0].offset;
606 return new_pos < chunk_offset ||
607 is_hash_order(new_pos) != is_hash_order(chunk_offset);
8974eebd
YZ
608}
609
965c8e59 610static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
2817b000
SW
611{
612 struct ceph_file_info *fi = file->private_data;
613 struct inode *inode = file->f_mapping->host;
2817b000
SW
614 loff_t retval;
615
5955102c 616 inode_lock(inode);
06222e49 617 retval = -EINVAL;
965c8e59 618 switch (whence) {
2817b000
SW
619 case SEEK_CUR:
620 offset += file->f_pos;
06222e49
JB
621 case SEEK_SET:
622 break;
fdd4e158
YZ
623 case SEEK_END:
624 retval = -EOPNOTSUPP;
06222e49
JB
625 default:
626 goto out;
2817b000 627 }
06222e49 628
f0494206 629 if (offset >= 0) {
f3c4ebe6
YZ
630 if (need_reset_readdir(fi, offset)) {
631 dout("dir_llseek dropping %p content\n", file);
632 reset_readdir(fi);
633 } else if (is_hash_order(offset) && offset > file->f_pos) {
634 /* for hash offset, we don't know if a forward seek
635 * is within same frag */
636 fi->dir_release_count = 0;
637 fi->readdir_cache_idx = -1;
638 }
639
2817b000
SW
640 if (offset != file->f_pos) {
641 file->f_pos = offset;
642 file->f_version = 0;
9cfa1098 643 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
644 }
645 retval = offset;
2817b000 646 }
06222e49 647out:
5955102c 648 inode_unlock(inode);
2817b000
SW
649 return retval;
650}
651
652/*
468640e3 653 * Handle lookups for the hidden .snap directory.
2817b000 654 */
468640e3
SW
655int ceph_handle_snapdir(struct ceph_mds_request *req,
656 struct dentry *dentry, int err)
2817b000 657{
3d14c5d2 658 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
2b0143b5 659 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
2817b000
SW
660
661 /* .snap dir? */
662 if (err == -ENOENT &&
455cec0a 663 ceph_snap(parent) == CEPH_NOSNAP &&
6b805185 664 strcmp(dentry->d_name.name,
3d14c5d2 665 fsc->mount_options->snapdir_name) == 0) {
2817b000 666 struct inode *inode = ceph_get_snapdir(parent);
a455589f
AV
667 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
668 dentry, dentry, inode);
9358c6d4 669 BUG_ON(!d_unhashed(dentry));
2817b000
SW
670 d_add(dentry, inode);
671 err = 0;
672 }
468640e3
SW
673 return err;
674}
2817b000 675
468640e3
SW
676/*
677 * Figure out final result of a lookup/open request.
678 *
679 * Mainly, make sure we return the final req->r_dentry (if it already
680 * existed) in place of the original VFS-provided dentry when they
681 * differ.
682 *
683 * Gracefully handle the case where the MDS replies with -ENOENT and
684 * no trace (which it may do, at its discretion, e.g., if it doesn't
685 * care to issue a lease on the negative dentry).
686 */
687struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
688 struct dentry *dentry, int err)
689{
2817b000
SW
690 if (err == -ENOENT) {
691 /* no trace? */
692 err = 0;
693 if (!req->r_reply_info.head->is_dentry) {
694 dout("ENOENT and no trace, dentry %p inode %p\n",
2b0143b5
DH
695 dentry, d_inode(dentry));
696 if (d_really_is_positive(dentry)) {
2817b000
SW
697 d_drop(dentry);
698 err = -ENOENT;
699 } else {
700 d_add(dentry, NULL);
701 }
702 }
703 }
704 if (err)
705 dentry = ERR_PTR(err);
706 else if (dentry != req->r_dentry)
707 dentry = dget(req->r_dentry); /* we got spliced */
708 else
709 dentry = NULL;
710 return dentry;
711}
712
1d1de916
SW
713static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
714{
715 return ceph_ino(inode) == CEPH_INO_ROOT &&
716 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
717}
718
2817b000
SW
719/*
720 * Look up a single dir entry. If there is a lookup intent, inform
721 * the MDS so that it gets our 'caps wanted' value in a single op.
722 */
723static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
00cd8dd3 724 unsigned int flags)
2817b000 725{
3d14c5d2
YS
726 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
727 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
728 struct ceph_mds_request *req;
729 int op;
315f2408 730 int mask;
2817b000
SW
731 int err;
732
a455589f
AV
733 dout("lookup %p dentry %p '%pd'\n",
734 dir, dentry, dentry);
2817b000
SW
735
736 if (dentry->d_name.len > NAME_MAX)
737 return ERR_PTR(-ENAMETOOLONG);
738
739 err = ceph_init_dentry(dentry);
740 if (err < 0)
741 return ERR_PTR(err);
742
2817b000 743 /* can we conclude ENOENT locally? */
2b0143b5 744 if (d_really_is_negative(dentry)) {
2817b000
SW
745 struct ceph_inode_info *ci = ceph_inode(dir);
746 struct ceph_dentry_info *di = ceph_dentry(dentry);
747
be655596 748 spin_lock(&ci->i_ceph_lock);
2817b000
SW
749 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
750 if (strncmp(dentry->d_name.name,
3d14c5d2 751 fsc->mount_options->snapdir_name,
2817b000 752 dentry->d_name.len) &&
1d1de916 753 !is_root_ceph_dentry(dir, dentry) &&
e2c3de04 754 ceph_test_mount_opt(fsc, DCACHE) &&
2f276c51 755 __ceph_dir_is_complete(ci) &&
2817b000 756 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
be655596 757 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
758 dout(" dir %p complete, -ENOENT\n", dir);
759 d_add(dentry, NULL);
760 di->lease_shared_gen = ci->i_shared_gen;
761 return NULL;
762 }
be655596 763 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
764 }
765
766 op = ceph_snap(dir) == CEPH_SNAPDIR ?
767 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
768 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
769 if (IS_ERR(req))
7e34bc52 770 return ERR_CAST(req);
2817b000
SW
771 req->r_dentry = dget(dentry);
772 req->r_num_caps = 2;
315f2408
YZ
773
774 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
775 if (ceph_security_xattr_wanted(dir))
776 mask |= CEPH_CAP_XATTR_SHARED;
777 req->r_args.getattr.mask = cpu_to_le32(mask);
778
2817b000
SW
779 req->r_locked_dir = dir;
780 err = ceph_mdsc_do_request(mdsc, NULL, req);
468640e3 781 err = ceph_handle_snapdir(req, dentry, err);
2817b000
SW
782 dentry = ceph_finish_lookup(req, dentry, err);
783 ceph_mdsc_put_request(req); /* will dput(dentry) */
784 dout("lookup result=%p\n", dentry);
785 return dentry;
786}
787
788/*
789 * If we do a create but get no trace back from the MDS, follow up with
790 * a lookup (the VFS expects us to link up the provided dentry).
791 */
792int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
793{
00cd8dd3 794 struct dentry *result = ceph_lookup(dir, dentry, 0);
2817b000
SW
795
796 if (result && !IS_ERR(result)) {
797 /*
798 * We created the item, then did a lookup, and found
799 * it was already linked to another inode we already
4d41cef2
YZ
800 * had in our cache (and thus got spliced). To not
801 * confuse VFS (especially when inode is a directory),
802 * we don't link our dentry to that inode, return an
803 * error instead.
804 *
805 * This event should be rare and it happens only when
806 * we talk to old MDS. Recent MDS does not send traceless
807 * reply for request that creates new inode.
2817b000 808 */
5cba372c 809 d_drop(result);
4d41cef2 810 return -ESTALE;
2817b000
SW
811 }
812 return PTR_ERR(result);
813}
814
815static int ceph_mknod(struct inode *dir, struct dentry *dentry,
1a67aafb 816 umode_t mode, dev_t rdev)
2817b000 817{
3d14c5d2
YS
818 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
819 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000 820 struct ceph_mds_request *req;
b1ee94aa 821 struct ceph_acls_info acls = {};
2817b000
SW
822 int err;
823
824 if (ceph_snap(dir) != CEPH_NOSNAP)
825 return -EROFS;
826
b1ee94aa
YZ
827 err = ceph_pre_init_acls(dir, &mode, &acls);
828 if (err < 0)
829 return err;
830
1a67aafb 831 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
2817b000
SW
832 dir, dentry, mode, rdev);
833 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
834 if (IS_ERR(req)) {
b1ee94aa
YZ
835 err = PTR_ERR(req);
836 goto out;
2817b000
SW
837 }
838 req->r_dentry = dget(dentry);
839 req->r_num_caps = 2;
840 req->r_locked_dir = dir;
841 req->r_args.mknod.mode = cpu_to_le32(mode);
842 req->r_args.mknod.rdev = cpu_to_le32(rdev);
843 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
844 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
b1ee94aa
YZ
845 if (acls.pagelist) {
846 req->r_pagelist = acls.pagelist;
847 acls.pagelist = NULL;
848 }
2817b000
SW
849 err = ceph_mdsc_do_request(mdsc, dir, req);
850 if (!err && !req->r_reply_info.head->is_dentry)
851 err = ceph_handle_notrace_create(dir, dentry);
852 ceph_mdsc_put_request(req);
b1ee94aa 853out:
7221fe4c 854 if (!err)
2b0143b5 855 ceph_init_inode_acls(d_inode(dentry), &acls);
b20a95a0 856 else
2817b000 857 d_drop(dentry);
b1ee94aa 858 ceph_release_acls_info(&acls);
2817b000
SW
859 return err;
860}
861
4acdaf27 862static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
ebfc3b49 863 bool excl)
2817b000 864{
2d83bde9 865 return ceph_mknod(dir, dentry, mode, 0);
2817b000
SW
866}
867
868static int ceph_symlink(struct inode *dir, struct dentry *dentry,
869 const char *dest)
870{
3d14c5d2
YS
871 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
872 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
873 struct ceph_mds_request *req;
874 int err;
875
876 if (ceph_snap(dir) != CEPH_NOSNAP)
877 return -EROFS;
878
879 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
880 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
881 if (IS_ERR(req)) {
b1ee94aa
YZ
882 err = PTR_ERR(req);
883 goto out;
2817b000 884 }
687265e5 885 req->r_path2 = kstrdup(dest, GFP_KERNEL);
a149bb9a
SK
886 if (!req->r_path2) {
887 err = -ENOMEM;
888 ceph_mdsc_put_request(req);
889 goto out;
890 }
2817b000 891 req->r_locked_dir = dir;
a149bb9a
SK
892 req->r_dentry = dget(dentry);
893 req->r_num_caps = 2;
2817b000
SW
894 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
895 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
896 err = ceph_mdsc_do_request(mdsc, dir, req);
897 if (!err && !req->r_reply_info.head->is_dentry)
898 err = ceph_handle_notrace_create(dir, dentry);
899 ceph_mdsc_put_request(req);
b1ee94aa
YZ
900out:
901 if (err)
2817b000
SW
902 d_drop(dentry);
903 return err;
904}
905
18bb1db3 906static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2817b000 907{
3d14c5d2
YS
908 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
909 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000 910 struct ceph_mds_request *req;
b1ee94aa 911 struct ceph_acls_info acls = {};
2817b000
SW
912 int err = -EROFS;
913 int op;
914
915 if (ceph_snap(dir) == CEPH_SNAPDIR) {
916 /* mkdir .snap/foo is a MKSNAP */
917 op = CEPH_MDS_OP_MKSNAP;
a455589f
AV
918 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
919 dentry, dentry);
2817b000 920 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
18bb1db3 921 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
2817b000
SW
922 op = CEPH_MDS_OP_MKDIR;
923 } else {
924 goto out;
925 }
b1ee94aa
YZ
926
927 mode |= S_IFDIR;
928 err = ceph_pre_init_acls(dir, &mode, &acls);
929 if (err < 0)
930 goto out;
931
2817b000
SW
932 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
933 if (IS_ERR(req)) {
934 err = PTR_ERR(req);
935 goto out;
936 }
937
938 req->r_dentry = dget(dentry);
939 req->r_num_caps = 2;
940 req->r_locked_dir = dir;
941 req->r_args.mkdir.mode = cpu_to_le32(mode);
942 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
943 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
b1ee94aa
YZ
944 if (acls.pagelist) {
945 req->r_pagelist = acls.pagelist;
946 acls.pagelist = NULL;
947 }
2817b000 948 err = ceph_mdsc_do_request(mdsc, dir, req);
275dd19e
YZ
949 if (!err &&
950 !req->r_reply_info.head->is_target &&
951 !req->r_reply_info.head->is_dentry)
2817b000
SW
952 err = ceph_handle_notrace_create(dir, dentry);
953 ceph_mdsc_put_request(req);
954out:
b20a95a0 955 if (!err)
2b0143b5 956 ceph_init_inode_acls(d_inode(dentry), &acls);
b20a95a0 957 else
2817b000 958 d_drop(dentry);
b1ee94aa 959 ceph_release_acls_info(&acls);
2817b000
SW
960 return err;
961}
962
963static int ceph_link(struct dentry *old_dentry, struct inode *dir,
964 struct dentry *dentry)
965{
3d14c5d2
YS
966 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
967 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
968 struct ceph_mds_request *req;
969 int err;
970
971 if (ceph_snap(dir) != CEPH_NOSNAP)
972 return -EROFS;
973
974 dout("link in dir %p old_dentry %p dentry %p\n", dir,
975 old_dentry, dentry);
976 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
977 if (IS_ERR(req)) {
978 d_drop(dentry);
979 return PTR_ERR(req);
980 }
981 req->r_dentry = dget(dentry);
982 req->r_num_caps = 2;
4b58c9b1 983 req->r_old_dentry = dget(old_dentry);
2817b000
SW
984 req->r_locked_dir = dir;
985 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
986 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
ad88f23f
YZ
987 /* release LINK_SHARED on source inode (mds will lock it) */
988 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
2817b000 989 err = ceph_mdsc_do_request(mdsc, dir, req);
70b666c3 990 if (err) {
2817b000 991 d_drop(dentry);
70b666c3 992 } else if (!req->r_reply_info.head->is_dentry) {
2b0143b5
DH
993 ihold(d_inode(old_dentry));
994 d_instantiate(dentry, d_inode(old_dentry));
70b666c3 995 }
2817b000
SW
996 ceph_mdsc_put_request(req);
997 return err;
998}
999
1000/*
1001 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
1002 * looks like the link count will hit 0, drop any other caps (other
1003 * than PIN) we don't specifically want (due to the file still being
1004 * open).
1005 */
1006static int drop_caps_for_unlink(struct inode *inode)
1007{
1008 struct ceph_inode_info *ci = ceph_inode(inode);
1009 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
1010
be655596 1011 spin_lock(&ci->i_ceph_lock);
2817b000
SW
1012 if (inode->i_nlink == 1) {
1013 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
1014 ci->i_ceph_flags |= CEPH_I_NODELAY;
1015 }
be655596 1016 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
1017 return drop;
1018}
1019
1020/*
1021 * rmdir and unlink are differ only by the metadata op code
1022 */
1023static int ceph_unlink(struct inode *dir, struct dentry *dentry)
1024{
3d14c5d2
YS
1025 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
1026 struct ceph_mds_client *mdsc = fsc->mdsc;
2b0143b5 1027 struct inode *inode = d_inode(dentry);
2817b000
SW
1028 struct ceph_mds_request *req;
1029 int err = -EROFS;
1030 int op;
1031
1032 if (ceph_snap(dir) == CEPH_SNAPDIR) {
1033 /* rmdir .snap/foo is RMSNAP */
a455589f 1034 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
2817b000
SW
1035 op = CEPH_MDS_OP_RMSNAP;
1036 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
1037 dout("unlink/rmdir dir %p dn %p inode %p\n",
1038 dir, dentry, inode);
e36cb0b8 1039 op = d_is_dir(dentry) ?
2817b000
SW
1040 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
1041 } else
1042 goto out;
1043 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1044 if (IS_ERR(req)) {
1045 err = PTR_ERR(req);
1046 goto out;
1047 }
1048 req->r_dentry = dget(dentry);
1049 req->r_num_caps = 2;
1050 req->r_locked_dir = dir;
1051 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1052 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1053 req->r_inode_drop = drop_caps_for_unlink(inode);
1054 err = ceph_mdsc_do_request(mdsc, dir, req);
1055 if (!err && !req->r_reply_info.head->is_dentry)
1056 d_delete(dentry);
1057 ceph_mdsc_put_request(req);
1058out:
1059 return err;
1060}
1061
1062static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
1063 struct inode *new_dir, struct dentry *new_dentry)
1064{
3d14c5d2
YS
1065 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
1066 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000 1067 struct ceph_mds_request *req;
0ea611a3 1068 int op = CEPH_MDS_OP_RENAME;
2817b000
SW
1069 int err;
1070
1071 if (ceph_snap(old_dir) != ceph_snap(new_dir))
1072 return -EXDEV;
0ea611a3
YZ
1073 if (ceph_snap(old_dir) != CEPH_NOSNAP) {
1074 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
1075 op = CEPH_MDS_OP_RENAMESNAP;
1076 else
1077 return -EROFS;
1078 }
2817b000
SW
1079 dout("rename dir %p dentry %p to dir %p dentry %p\n",
1080 old_dir, old_dentry, new_dir, new_dentry);
0ea611a3 1081 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
2817b000
SW
1082 if (IS_ERR(req))
1083 return PTR_ERR(req);
180061a5 1084 ihold(old_dir);
2817b000
SW
1085 req->r_dentry = dget(new_dentry);
1086 req->r_num_caps = 2;
1087 req->r_old_dentry = dget(old_dentry);
180061a5 1088 req->r_old_dentry_dir = old_dir;
2817b000
SW
1089 req->r_locked_dir = new_dir;
1090 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
1091 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
1092 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1093 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1094 /* release LINK_RDCACHE on source inode (mds will lock it) */
1095 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
2b0143b5
DH
1096 if (d_really_is_positive(new_dentry))
1097 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
2817b000
SW
1098 err = ceph_mdsc_do_request(mdsc, old_dir, req);
1099 if (!err && !req->r_reply_info.head->is_dentry) {
1100 /*
1101 * Normally d_move() is done by fill_trace (called by
1102 * do_request, above). If there is no trace, we need
1103 * to do it here.
1104 */
ea1409f9 1105
fdd4e158
YZ
1106 /* d_move screws up sibling dentries' offsets */
1107 ceph_dir_clear_complete(old_dir);
1108 ceph_dir_clear_complete(new_dir);
1109
2817b000 1110 d_move(old_dentry, new_dentry);
ea1409f9
SW
1111
1112 /* ensure target dentry is invalidated, despite
1113 rehashing bug in vfs_rename_dir */
81a6cf2d 1114 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
1115 }
1116 ceph_mdsc_put_request(req);
1117 return err;
1118}
1119
81a6cf2d
SW
1120/*
1121 * Ensure a dentry lease will no longer revalidate.
1122 */
1123void ceph_invalidate_dentry_lease(struct dentry *dentry)
1124{
1125 spin_lock(&dentry->d_lock);
1126 dentry->d_time = jiffies;
1127 ceph_dentry(dentry)->lease_shared_gen = 0;
1128 spin_unlock(&dentry->d_lock);
1129}
2817b000
SW
1130
1131/*
1132 * Check if dentry lease is valid. If not, delete the lease. Try to
1133 * renew if the least is more than half up.
1134 */
1135static int dentry_lease_is_valid(struct dentry *dentry)
1136{
1137 struct ceph_dentry_info *di;
1138 struct ceph_mds_session *s;
1139 int valid = 0;
1140 u32 gen;
1141 unsigned long ttl;
1142 struct ceph_mds_session *session = NULL;
1143 struct inode *dir = NULL;
1144 u32 seq = 0;
1145
1146 spin_lock(&dentry->d_lock);
1147 di = ceph_dentry(dentry);
3d8eb7a9 1148 if (di->lease_session) {
2817b000 1149 s = di->lease_session;
d8fb02ab 1150 spin_lock(&s->s_gen_ttl_lock);
2817b000
SW
1151 gen = s->s_cap_gen;
1152 ttl = s->s_cap_ttl;
d8fb02ab 1153 spin_unlock(&s->s_gen_ttl_lock);
2817b000
SW
1154
1155 if (di->lease_gen == gen &&
1156 time_before(jiffies, dentry->d_time) &&
1157 time_before(jiffies, ttl)) {
1158 valid = 1;
1159 if (di->lease_renew_after &&
1160 time_after(jiffies, di->lease_renew_after)) {
1161 /* we should renew */
2b0143b5 1162 dir = d_inode(dentry->d_parent);
2817b000
SW
1163 session = ceph_get_mds_session(s);
1164 seq = di->lease_seq;
1165 di->lease_renew_after = 0;
1166 di->lease_renew_from = jiffies;
1167 }
2817b000
SW
1168 }
1169 }
1170 spin_unlock(&dentry->d_lock);
1171
1172 if (session) {
1173 ceph_mdsc_lease_send_msg(session, dir, dentry,
1174 CEPH_MDS_LEASE_RENEW, seq);
1175 ceph_put_mds_session(session);
1176 }
1177 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1178 return valid;
1179}
1180
1181/*
1182 * Check if directory-wide content lease/cap is valid.
1183 */
1184static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1185{
1186 struct ceph_inode_info *ci = ceph_inode(dir);
1187 struct ceph_dentry_info *di = ceph_dentry(dentry);
1188 int valid = 0;
1189
be655596 1190 spin_lock(&ci->i_ceph_lock);
2817b000
SW
1191 if (ci->i_shared_gen == di->lease_shared_gen)
1192 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
be655596 1193 spin_unlock(&ci->i_ceph_lock);
2817b000
SW
1194 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1195 dir, (unsigned)ci->i_shared_gen, dentry,
1196 (unsigned)di->lease_shared_gen, valid);
1197 return valid;
1198}
1199
1200/*
1201 * Check if cached dentry can be trusted.
1202 */
0b728e19 1203static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
2817b000 1204{
bf1c6aca 1205 int valid = 0;
641235d8 1206 struct dentry *parent;
34286d66
NP
1207 struct inode *dir;
1208
0b728e19 1209 if (flags & LOOKUP_RCU)
34286d66
NP
1210 return -ECHILD;
1211
a455589f 1212 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
2b0143b5 1213 dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
2817b000 1214
641235d8
YZ
1215 parent = dget_parent(dentry);
1216 dir = d_inode(parent);
bf1c6aca 1217
2817b000
SW
1218 /* always trust cached snapped dentries, snapdir dentry */
1219 if (ceph_snap(dir) != CEPH_NOSNAP) {
a455589f 1220 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
2b0143b5 1221 dentry, d_inode(dentry));
bf1c6aca 1222 valid = 1;
2b0143b5
DH
1223 } else if (d_really_is_positive(dentry) &&
1224 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
bf1c6aca
SW
1225 valid = 1;
1226 } else if (dentry_lease_is_valid(dentry) ||
1227 dir_lease_is_valid(dir, dentry)) {
2b0143b5
DH
1228 if (d_really_is_positive(dentry))
1229 valid = ceph_is_any_caps(d_inode(dentry));
9215aeea
YZ
1230 else
1231 valid = 1;
2817b000 1232 }
2817b000 1233
200fd27c
YZ
1234 if (!valid) {
1235 struct ceph_mds_client *mdsc =
1236 ceph_sb_to_client(dir->i_sb)->mdsc;
1237 struct ceph_mds_request *req;
1238 int op, mask, err;
1239
1240 op = ceph_snap(dir) == CEPH_SNAPDIR ?
1241 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
1242 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
1243 if (!IS_ERR(req)) {
1244 req->r_dentry = dget(dentry);
1245 req->r_num_caps = 2;
1246
1247 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1248 if (ceph_security_xattr_wanted(dir))
1249 mask |= CEPH_CAP_XATTR_SHARED;
1250 req->r_args.getattr.mask = mask;
1251
1252 req->r_locked_dir = dir;
1253 err = ceph_mdsc_do_request(mdsc, NULL, req);
1254 if (err == 0 || err == -ENOENT) {
1255 if (dentry == req->r_dentry) {
1256 valid = !d_unhashed(dentry);
1257 } else {
1258 d_invalidate(req->r_dentry);
1259 err = -EAGAIN;
1260 }
1261 }
1262 ceph_mdsc_put_request(req);
1263 dout("d_revalidate %p lookup result=%d\n",
1264 dentry, err);
1265 }
1266 }
1267
bf1c6aca 1268 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
9215aeea 1269 if (valid) {
bf1c6aca 1270 ceph_dentry_lru_touch(dentry);
9215aeea
YZ
1271 } else {
1272 ceph_dir_clear_complete(dir);
9215aeea 1273 }
641235d8
YZ
1274
1275 dput(parent);
bf1c6aca 1276 return valid;
2817b000
SW
1277}
1278
1279/*
147851d2 1280 * Release our ceph_dentry_info.
2817b000 1281 */
147851d2 1282static void ceph_d_release(struct dentry *dentry)
2817b000
SW
1283{
1284 struct ceph_dentry_info *di = ceph_dentry(dentry);
2817b000 1285
147851d2 1286 dout("d_release %p\n", dentry);
3d8eb7a9
SW
1287 ceph_dentry_lru_del(dentry);
1288 if (di->lease_session)
1289 ceph_put_mds_session(di->lease_session);
1290 kmem_cache_free(ceph_dentry_cachep, di);
1291 dentry->d_fsdata = NULL;
2817b000
SW
1292}
1293
1294static int ceph_snapdir_d_revalidate(struct dentry *dentry,
0b728e19 1295 unsigned int flags)
2817b000
SW
1296{
1297 /*
1298 * Eventually, we'll want to revalidate snapped metadata
1299 * too... probably...
1300 */
1301 return 1;
1302}
1303
b58dc410
SW
1304/*
1305 * When the VFS prunes a dentry from the cache, we need to clear the
1306 * complete flag on the parent directory.
1307 *
1308 * Called under dentry->d_lock.
1309 */
1310static void ceph_d_prune(struct dentry *dentry)
1311{
774ac21d 1312 dout("ceph_d_prune %p\n", dentry);
b58dc410
SW
1313
1314 /* do we have a valid parent? */
8842b3be 1315 if (IS_ROOT(dentry))
b58dc410
SW
1316 return;
1317
2f276c51 1318 /* if we are not hashed, we don't affect dir's completeness */
b58dc410
SW
1319 if (d_unhashed(dentry))
1320 return;
2817b000 1321
b58dc410
SW
1322 /*
1323 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1324 * cleared until d_release
1325 */
2b0143b5 1326 ceph_dir_clear_complete(d_inode(dentry->d_parent));
b58dc410 1327}
2817b000
SW
1328
1329/*
1330 * read() on a dir. This weird interface hack only works if mounted
1331 * with '-o dirstat'.
1332 */
1333static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1334 loff_t *ppos)
1335{
1336 struct ceph_file_info *cf = file->private_data;
496ad9aa 1337 struct inode *inode = file_inode(file);
2817b000
SW
1338 struct ceph_inode_info *ci = ceph_inode(inode);
1339 int left;
ae598083 1340 const int bufsize = 1024;
2817b000 1341
3d14c5d2 1342 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1343 return -EISDIR;
1344
1345 if (!cf->dir_info) {
687265e5 1346 cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
2817b000
SW
1347 if (!cf->dir_info)
1348 return -ENOMEM;
1349 cf->dir_info_len =
ae598083 1350 snprintf(cf->dir_info, bufsize,
2817b000
SW
1351 "entries: %20lld\n"
1352 " files: %20lld\n"
1353 " subdirs: %20lld\n"
1354 "rentries: %20lld\n"
1355 " rfiles: %20lld\n"
1356 " rsubdirs: %20lld\n"
1357 "rbytes: %20lld\n"
1358 "rctime: %10ld.%09ld\n",
1359 ci->i_files + ci->i_subdirs,
1360 ci->i_files,
1361 ci->i_subdirs,
1362 ci->i_rfiles + ci->i_rsubdirs,
1363 ci->i_rfiles,
1364 ci->i_rsubdirs,
1365 ci->i_rbytes,
1366 (long)ci->i_rctime.tv_sec,
1367 (long)ci->i_rctime.tv_nsec);
1368 }
1369
1370 if (*ppos >= cf->dir_info_len)
1371 return 0;
1372 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1373 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1374 if (left == size)
1375 return -EFAULT;
1376 *ppos += (size - left);
1377 return size - left;
1378}
1379
2817b000
SW
1380/*
1381 * We maintain a private dentry LRU.
1382 *
1383 * FIXME: this needs to be changed to a per-mds lru to be useful.
1384 */
1385void ceph_dentry_lru_add(struct dentry *dn)
1386{
1387 struct ceph_dentry_info *di = ceph_dentry(dn);
1388 struct ceph_mds_client *mdsc;
2817b000 1389
a455589f 1390 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
3d8eb7a9
SW
1391 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1392 spin_lock(&mdsc->dentry_lru_lock);
1393 list_add_tail(&di->lru, &mdsc->dentry_lru);
1394 mdsc->num_dentry++;
1395 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1396}
1397
1398void ceph_dentry_lru_touch(struct dentry *dn)
1399{
1400 struct ceph_dentry_info *di = ceph_dentry(dn);
1401 struct ceph_mds_client *mdsc;
2817b000 1402
a455589f
AV
1403 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1404 di->offset);
3d8eb7a9
SW
1405 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1406 spin_lock(&mdsc->dentry_lru_lock);
1407 list_move_tail(&di->lru, &mdsc->dentry_lru);
1408 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1409}
1410
1411void ceph_dentry_lru_del(struct dentry *dn)
1412{
1413 struct ceph_dentry_info *di = ceph_dentry(dn);
1414 struct ceph_mds_client *mdsc;
1415
a455589f 1416 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
3d8eb7a9
SW
1417 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1418 spin_lock(&mdsc->dentry_lru_lock);
1419 list_del_init(&di->lru);
1420 mdsc->num_dentry--;
1421 spin_unlock(&mdsc->dentry_lru_lock);
2817b000
SW
1422}
1423
6c0f3af7
SW
1424/*
1425 * Return name hash for a given dentry. This is dependent on
1426 * the parent directory's hash function.
1427 */
e5f86dc3 1428unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
6c0f3af7 1429{
6c0f3af7
SW
1430 struct ceph_inode_info *dci = ceph_inode(dir);
1431
1432 switch (dci->i_dir_layout.dl_dir_hash) {
1433 case 0: /* for backward compat */
1434 case CEPH_STR_HASH_LINUX:
1435 return dn->d_name.hash;
1436
1437 default:
1438 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1439 dn->d_name.name, dn->d_name.len);
1440 }
1441}
1442
2817b000
SW
1443const struct file_operations ceph_dir_fops = {
1444 .read = ceph_read_dir,
77acfa29 1445 .iterate = ceph_readdir,
2817b000
SW
1446 .llseek = ceph_dir_llseek,
1447 .open = ceph_open,
1448 .release = ceph_release,
1449 .unlocked_ioctl = ceph_ioctl,
da819c81 1450 .fsync = ceph_fsync,
2817b000
SW
1451};
1452
38c48b5f
YZ
1453const struct file_operations ceph_snapdir_fops = {
1454 .iterate = ceph_readdir,
1455 .llseek = ceph_dir_llseek,
1456 .open = ceph_open,
1457 .release = ceph_release,
1458};
1459
2817b000
SW
1460const struct inode_operations ceph_dir_iops = {
1461 .lookup = ceph_lookup,
1462 .permission = ceph_permission,
1463 .getattr = ceph_getattr,
1464 .setattr = ceph_setattr,
1465 .setxattr = ceph_setxattr,
1466 .getxattr = ceph_getxattr,
1467 .listxattr = ceph_listxattr,
1468 .removexattr = ceph_removexattr,
7221fe4c 1469 .get_acl = ceph_get_acl,
72466d0b 1470 .set_acl = ceph_set_acl,
2817b000
SW
1471 .mknod = ceph_mknod,
1472 .symlink = ceph_symlink,
1473 .mkdir = ceph_mkdir,
1474 .link = ceph_link,
1475 .unlink = ceph_unlink,
1476 .rmdir = ceph_unlink,
1477 .rename = ceph_rename,
1478 .create = ceph_create,
2d83bde9 1479 .atomic_open = ceph_atomic_open,
2817b000
SW
1480};
1481
38c48b5f
YZ
1482const struct inode_operations ceph_snapdir_iops = {
1483 .lookup = ceph_lookup,
1484 .permission = ceph_permission,
1485 .getattr = ceph_getattr,
1486 .mkdir = ceph_mkdir,
1487 .rmdir = ceph_unlink,
0ea611a3 1488 .rename = ceph_rename,
38c48b5f
YZ
1489};
1490
52dfb8ac 1491const struct dentry_operations ceph_dentry_ops = {
2817b000 1492 .d_revalidate = ceph_d_revalidate,
147851d2 1493 .d_release = ceph_d_release,
b58dc410 1494 .d_prune = ceph_d_prune,
2817b000
SW
1495};
1496
52dfb8ac 1497const struct dentry_operations ceph_snapdir_dentry_ops = {
2817b000 1498 .d_revalidate = ceph_snapdir_d_revalidate,
147851d2 1499 .d_release = ceph_d_release,
2817b000
SW
1500};
1501
52dfb8ac 1502const struct dentry_operations ceph_snap_dentry_ops = {
147851d2 1503 .d_release = ceph_d_release,
b58dc410 1504 .d_prune = ceph_d_prune,
2817b000 1505};