]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ceph/dir.c
selftests: timers: freq-step: fix compile error
[mirror_ubuntu-artful-kernel.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/xattr.h>
9
10 #include "super.h"
11 #include "mds_client.h"
12
13 /*
14 * Directory operations: readdir, lookup, create, link, unlink,
15 * rename, etc.
16 */
17
18 /*
19 * Ceph MDS operations are specified in terms of a base ino and
20 * relative path. Thus, the client can specify an operation on a
21 * specific inode (e.g., a getattr due to fstat(2)), or as a path
22 * relative to, say, the root directory.
23 *
24 * Normally, we limit ourselves to strict inode ops (no path component)
25 * or dentry operations (a single path component relative to an ino). The
26 * exception to this is open_root_dentry(), which will open the mount
27 * point by name.
28 */
29
30 const struct dentry_operations ceph_dentry_ops;
31
32 /*
33 * Initialize ceph dentry state.
34 */
35 static int ceph_d_init(struct dentry *dentry)
36 {
37 struct ceph_dentry_info *di;
38
39 di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
40 if (!di)
41 return -ENOMEM; /* oh well */
42
43 di->dentry = dentry;
44 di->lease_session = NULL;
45 di->time = jiffies;
46 dentry->d_fsdata = di;
47 ceph_dentry_lru_add(dentry);
48 return 0;
49 }
50
51 /*
52 * for f_pos for readdir:
53 * - hash order:
54 * (0xff << 52) | ((24 bits hash) << 28) |
55 * (the nth entry has hash collision);
56 * - frag+name order;
57 * ((frag value) << 28) | (the nth entry in frag);
58 */
59 #define OFFSET_BITS 28
60 #define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
61 #define HASH_ORDER (0xffull << (OFFSET_BITS + 24))
62 loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
63 {
64 loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
65 if (hash_order)
66 fpos |= HASH_ORDER;
67 return fpos;
68 }
69
70 static bool is_hash_order(loff_t p)
71 {
72 return (p & HASH_ORDER) == HASH_ORDER;
73 }
74
75 static unsigned fpos_frag(loff_t p)
76 {
77 return p >> OFFSET_BITS;
78 }
79
80 static unsigned fpos_hash(loff_t p)
81 {
82 return ceph_frag_value(fpos_frag(p));
83 }
84
85 static unsigned fpos_off(loff_t p)
86 {
87 return p & OFFSET_MASK;
88 }
89
90 static int fpos_cmp(loff_t l, loff_t r)
91 {
92 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
93 if (v)
94 return v;
95 return (int)(fpos_off(l) - fpos_off(r));
96 }
97
98 /*
99 * make note of the last dentry we read, so we can
100 * continue at the same lexicographical point,
101 * regardless of what dir changes take place on the
102 * server.
103 */
104 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
105 int len, unsigned next_offset)
106 {
107 char *buf = kmalloc(len+1, GFP_KERNEL);
108 if (!buf)
109 return -ENOMEM;
110 kfree(fi->last_name);
111 fi->last_name = buf;
112 memcpy(fi->last_name, name, len);
113 fi->last_name[len] = 0;
114 fi->next_offset = next_offset;
115 dout("note_last_dentry '%s'\n", fi->last_name);
116 return 0;
117 }
118
119
120 static struct dentry *
121 __dcache_find_get_entry(struct dentry *parent, u64 idx,
122 struct ceph_readdir_cache_control *cache_ctl)
123 {
124 struct inode *dir = d_inode(parent);
125 struct dentry *dentry;
126 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
127 loff_t ptr_pos = idx * sizeof(struct dentry *);
128 pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
129
130 if (ptr_pos >= i_size_read(dir))
131 return NULL;
132
133 if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
134 ceph_readdir_cache_release(cache_ctl);
135 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
136 if (!cache_ctl->page) {
137 dout(" page %lu not found\n", ptr_pgoff);
138 return ERR_PTR(-EAGAIN);
139 }
140 /* reading/filling the cache are serialized by
141 i_mutex, no need to use page lock */
142 unlock_page(cache_ctl->page);
143 cache_ctl->dentries = kmap(cache_ctl->page);
144 }
145
146 cache_ctl->index = idx & idx_mask;
147
148 rcu_read_lock();
149 spin_lock(&parent->d_lock);
150 /* check i_size again here, because empty directory can be
151 * marked as complete while not holding the i_mutex. */
152 if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
153 dentry = cache_ctl->dentries[cache_ctl->index];
154 else
155 dentry = NULL;
156 spin_unlock(&parent->d_lock);
157 if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
158 dentry = NULL;
159 rcu_read_unlock();
160 return dentry ? : ERR_PTR(-EAGAIN);
161 }
162
163 /*
164 * When possible, we try to satisfy a readdir by peeking at the
165 * dcache. We make this work by carefully ordering dentries on
166 * d_child when we initially get results back from the MDS, and
167 * falling back to a "normal" sync readdir if any dentries in the dir
168 * are dropped.
169 *
170 * Complete dir indicates that we have all dentries in the dir. It is
171 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
172 * the MDS if/when the directory is modified).
173 */
174 static int __dcache_readdir(struct file *file, struct dir_context *ctx,
175 u32 shared_gen)
176 {
177 struct ceph_file_info *fi = file->private_data;
178 struct dentry *parent = file->f_path.dentry;
179 struct inode *dir = d_inode(parent);
180 struct dentry *dentry, *last = NULL;
181 struct ceph_dentry_info *di;
182 struct ceph_readdir_cache_control cache_ctl = {};
183 u64 idx = 0;
184 int err = 0;
185
186 dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
187
188 /* search start position */
189 if (ctx->pos > 2) {
190 u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
191 while (count > 0) {
192 u64 step = count >> 1;
193 dentry = __dcache_find_get_entry(parent, idx + step,
194 &cache_ctl);
195 if (!dentry) {
196 /* use linar search */
197 idx = 0;
198 break;
199 }
200 if (IS_ERR(dentry)) {
201 err = PTR_ERR(dentry);
202 goto out;
203 }
204 di = ceph_dentry(dentry);
205 spin_lock(&dentry->d_lock);
206 if (fpos_cmp(di->offset, ctx->pos) < 0) {
207 idx += step + 1;
208 count -= step + 1;
209 } else {
210 count = step;
211 }
212 spin_unlock(&dentry->d_lock);
213 dput(dentry);
214 }
215
216 dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
217 }
218
219
220 for (;;) {
221 bool emit_dentry = false;
222 dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
223 if (!dentry) {
224 fi->flags |= CEPH_F_ATEND;
225 err = 0;
226 break;
227 }
228 if (IS_ERR(dentry)) {
229 err = PTR_ERR(dentry);
230 goto out;
231 }
232
233 di = ceph_dentry(dentry);
234 spin_lock(&dentry->d_lock);
235 if (di->lease_shared_gen == shared_gen &&
236 d_really_is_positive(dentry) &&
237 fpos_cmp(ctx->pos, di->offset) <= 0) {
238 emit_dentry = true;
239 }
240 spin_unlock(&dentry->d_lock);
241
242 if (emit_dentry) {
243 dout(" %llx dentry %p %pd %p\n", di->offset,
244 dentry, dentry, d_inode(dentry));
245 ctx->pos = di->offset;
246 if (!dir_emit(ctx, dentry->d_name.name,
247 dentry->d_name.len,
248 ceph_translate_ino(dentry->d_sb,
249 d_inode(dentry)->i_ino),
250 d_inode(dentry)->i_mode >> 12)) {
251 dput(dentry);
252 err = 0;
253 break;
254 }
255 ctx->pos++;
256
257 if (last)
258 dput(last);
259 last = dentry;
260 } else {
261 dput(dentry);
262 }
263 }
264 out:
265 ceph_readdir_cache_release(&cache_ctl);
266 if (last) {
267 int ret;
268 di = ceph_dentry(last);
269 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
270 fpos_off(di->offset) + 1);
271 if (ret < 0)
272 err = ret;
273 dput(last);
274 }
275 return err;
276 }
277
278 static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos)
279 {
280 if (!fi->last_readdir)
281 return true;
282 if (is_hash_order(pos))
283 return !ceph_frag_contains_value(fi->frag, fpos_hash(pos));
284 else
285 return fi->frag != fpos_frag(pos);
286 }
287
288 static int ceph_readdir(struct file *file, struct dir_context *ctx)
289 {
290 struct ceph_file_info *fi = file->private_data;
291 struct inode *inode = file_inode(file);
292 struct ceph_inode_info *ci = ceph_inode(inode);
293 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
294 struct ceph_mds_client *mdsc = fsc->mdsc;
295 int i;
296 int err;
297 unsigned frag = -1;
298 struct ceph_mds_reply_info_parsed *rinfo;
299
300 dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
301 if (fi->flags & CEPH_F_ATEND)
302 return 0;
303
304 /* always start with . and .. */
305 if (ctx->pos == 0) {
306 dout("readdir off 0 -> '.'\n");
307 if (!dir_emit(ctx, ".", 1,
308 ceph_translate_ino(inode->i_sb, inode->i_ino),
309 inode->i_mode >> 12))
310 return 0;
311 ctx->pos = 1;
312 }
313 if (ctx->pos == 1) {
314 ino_t ino = parent_ino(file->f_path.dentry);
315 dout("readdir off 1 -> '..'\n");
316 if (!dir_emit(ctx, "..", 2,
317 ceph_translate_ino(inode->i_sb, ino),
318 inode->i_mode >> 12))
319 return 0;
320 ctx->pos = 2;
321 }
322
323 /* can we use the dcache? */
324 spin_lock(&ci->i_ceph_lock);
325 if (ceph_test_mount_opt(fsc, DCACHE) &&
326 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
327 ceph_snap(inode) != CEPH_SNAPDIR &&
328 __ceph_dir_is_complete_ordered(ci) &&
329 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
330 u32 shared_gen = ci->i_shared_gen;
331 spin_unlock(&ci->i_ceph_lock);
332 err = __dcache_readdir(file, ctx, shared_gen);
333 if (err != -EAGAIN)
334 return err;
335 } else {
336 spin_unlock(&ci->i_ceph_lock);
337 }
338
339 /* proceed with a normal readdir */
340 more:
341 /* do we have the correct frag content buffered? */
342 if (need_send_readdir(fi, ctx->pos)) {
343 struct ceph_mds_request *req;
344 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
345 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
346
347 /* discard old result, if any */
348 if (fi->last_readdir) {
349 ceph_mdsc_put_request(fi->last_readdir);
350 fi->last_readdir = NULL;
351 }
352
353 if (is_hash_order(ctx->pos)) {
354 /* fragtree isn't always accurate. choose frag
355 * based on previous reply when possible. */
356 if (frag == (unsigned)-1)
357 frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
358 NULL, NULL);
359 } else {
360 frag = fpos_frag(ctx->pos);
361 }
362
363 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
364 ceph_vinop(inode), frag, fi->last_name);
365 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
366 if (IS_ERR(req))
367 return PTR_ERR(req);
368 err = ceph_alloc_readdir_reply_buffer(req, inode);
369 if (err) {
370 ceph_mdsc_put_request(req);
371 return err;
372 }
373 /* hints to request -> mds selection code */
374 req->r_direct_mode = USE_AUTH_MDS;
375 req->r_direct_hash = ceph_frag_value(frag);
376 __set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
377 if (fi->last_name) {
378 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
379 if (!req->r_path2) {
380 ceph_mdsc_put_request(req);
381 return -ENOMEM;
382 }
383 } else if (is_hash_order(ctx->pos)) {
384 req->r_args.readdir.offset_hash =
385 cpu_to_le32(fpos_hash(ctx->pos));
386 }
387
388 req->r_dir_release_cnt = fi->dir_release_count;
389 req->r_dir_ordered_cnt = fi->dir_ordered_count;
390 req->r_readdir_cache_idx = fi->readdir_cache_idx;
391 req->r_readdir_offset = fi->next_offset;
392 req->r_args.readdir.frag = cpu_to_le32(frag);
393 req->r_args.readdir.flags =
394 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
395
396 req->r_inode = inode;
397 ihold(inode);
398 req->r_dentry = dget(file->f_path.dentry);
399 err = ceph_mdsc_do_request(mdsc, NULL, req);
400 if (err < 0) {
401 ceph_mdsc_put_request(req);
402 return err;
403 }
404 dout("readdir got and parsed readdir result=%d on "
405 "frag %x, end=%d, complete=%d, hash_order=%d\n",
406 err, frag,
407 (int)req->r_reply_info.dir_end,
408 (int)req->r_reply_info.dir_complete,
409 (int)req->r_reply_info.hash_order);
410
411 rinfo = &req->r_reply_info;
412 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
413 frag = le32_to_cpu(rinfo->dir_dir->frag);
414 if (!rinfo->hash_order) {
415 fi->next_offset = req->r_readdir_offset;
416 /* adjust ctx->pos to beginning of frag */
417 ctx->pos = ceph_make_fpos(frag,
418 fi->next_offset,
419 false);
420 }
421 }
422
423 fi->frag = frag;
424 fi->last_readdir = req;
425
426 if (test_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags)) {
427 fi->readdir_cache_idx = req->r_readdir_cache_idx;
428 if (fi->readdir_cache_idx < 0) {
429 /* preclude from marking dir ordered */
430 fi->dir_ordered_count = 0;
431 } else if (ceph_frag_is_leftmost(frag) &&
432 fi->next_offset == 2) {
433 /* note dir version at start of readdir so
434 * we can tell if any dentries get dropped */
435 fi->dir_release_count = req->r_dir_release_cnt;
436 fi->dir_ordered_count = req->r_dir_ordered_cnt;
437 }
438 } else {
439 dout("readdir !did_prepopulate");
440 /* disable readdir cache */
441 fi->readdir_cache_idx = -1;
442 /* preclude from marking dir complete */
443 fi->dir_release_count = 0;
444 }
445
446 /* note next offset and last dentry name */
447 if (rinfo->dir_nr > 0) {
448 struct ceph_mds_reply_dir_entry *rde =
449 rinfo->dir_entries + (rinfo->dir_nr-1);
450 unsigned next_offset = req->r_reply_info.dir_end ?
451 2 : (fpos_off(rde->offset) + 1);
452 err = note_last_dentry(fi, rde->name, rde->name_len,
453 next_offset);
454 if (err)
455 return err;
456 } else if (req->r_reply_info.dir_end) {
457 fi->next_offset = 2;
458 /* keep last name */
459 }
460 }
461
462 rinfo = &fi->last_readdir->r_reply_info;
463 dout("readdir frag %x num %d pos %llx chunk first %llx\n",
464 fi->frag, rinfo->dir_nr, ctx->pos,
465 rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
466
467 i = 0;
468 /* search start position */
469 if (rinfo->dir_nr > 0) {
470 int step, nr = rinfo->dir_nr;
471 while (nr > 0) {
472 step = nr >> 1;
473 if (rinfo->dir_entries[i + step].offset < ctx->pos) {
474 i += step + 1;
475 nr -= step + 1;
476 } else {
477 nr = step;
478 }
479 }
480 }
481 for (; i < rinfo->dir_nr; i++) {
482 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
483 struct ceph_vino vino;
484 ino_t ino;
485 u32 ftype;
486
487 BUG_ON(rde->offset < ctx->pos);
488
489 ctx->pos = rde->offset;
490 dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
491 i, rinfo->dir_nr, ctx->pos,
492 rde->name_len, rde->name, &rde->inode.in);
493
494 BUG_ON(!rde->inode.in);
495 ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
496 vino.ino = le64_to_cpu(rde->inode.in->ino);
497 vino.snap = le64_to_cpu(rde->inode.in->snapid);
498 ino = ceph_vino_to_ino(vino);
499
500 if (!dir_emit(ctx, rde->name, rde->name_len,
501 ceph_translate_ino(inode->i_sb, ino), ftype)) {
502 dout("filldir stopping us...\n");
503 return 0;
504 }
505 ctx->pos++;
506 }
507
508 ceph_mdsc_put_request(fi->last_readdir);
509 fi->last_readdir = NULL;
510
511 if (fi->next_offset > 2) {
512 frag = fi->frag;
513 goto more;
514 }
515
516 /* more frags? */
517 if (!ceph_frag_is_rightmost(fi->frag)) {
518 frag = ceph_frag_next(fi->frag);
519 if (is_hash_order(ctx->pos)) {
520 loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
521 fi->next_offset, true);
522 if (new_pos > ctx->pos)
523 ctx->pos = new_pos;
524 /* keep last_name */
525 } else {
526 ctx->pos = ceph_make_fpos(frag, fi->next_offset, false);
527 kfree(fi->last_name);
528 fi->last_name = NULL;
529 }
530 dout("readdir next frag is %x\n", frag);
531 goto more;
532 }
533 fi->flags |= CEPH_F_ATEND;
534
535 /*
536 * if dir_release_count still matches the dir, no dentries
537 * were released during the whole readdir, and we should have
538 * the complete dir contents in our cache.
539 */
540 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
541 spin_lock(&ci->i_ceph_lock);
542 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
543 dout(" marking %p complete and ordered\n", inode);
544 /* use i_size to track number of entries in
545 * readdir cache */
546 BUG_ON(fi->readdir_cache_idx < 0);
547 i_size_write(inode, fi->readdir_cache_idx *
548 sizeof(struct dentry*));
549 } else {
550 dout(" marking %p complete\n", inode);
551 }
552 __ceph_dir_set_complete(ci, fi->dir_release_count,
553 fi->dir_ordered_count);
554 spin_unlock(&ci->i_ceph_lock);
555 }
556
557 dout("readdir %p file %p done.\n", inode, file);
558 return 0;
559 }
560
561 static void reset_readdir(struct ceph_file_info *fi)
562 {
563 if (fi->last_readdir) {
564 ceph_mdsc_put_request(fi->last_readdir);
565 fi->last_readdir = NULL;
566 }
567 kfree(fi->last_name);
568 fi->last_name = NULL;
569 fi->dir_release_count = 0;
570 fi->readdir_cache_idx = -1;
571 fi->next_offset = 2; /* compensate for . and .. */
572 fi->flags &= ~CEPH_F_ATEND;
573 }
574
575 /*
576 * discard buffered readdir content on seekdir(0), or seek to new frag,
577 * or seek prior to current chunk
578 */
579 static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
580 {
581 struct ceph_mds_reply_info_parsed *rinfo;
582 loff_t chunk_offset;
583 if (new_pos == 0)
584 return true;
585 if (is_hash_order(new_pos)) {
586 /* no need to reset last_name for a forward seek when
587 * dentries are sotred in hash order */
588 } else if (fi->frag != fpos_frag(new_pos)) {
589 return true;
590 }
591 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
592 if (!rinfo || !rinfo->dir_nr)
593 return true;
594 chunk_offset = rinfo->dir_entries[0].offset;
595 return new_pos < chunk_offset ||
596 is_hash_order(new_pos) != is_hash_order(chunk_offset);
597 }
598
599 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
600 {
601 struct ceph_file_info *fi = file->private_data;
602 struct inode *inode = file->f_mapping->host;
603 loff_t retval;
604
605 inode_lock(inode);
606 retval = -EINVAL;
607 switch (whence) {
608 case SEEK_CUR:
609 offset += file->f_pos;
610 case SEEK_SET:
611 break;
612 case SEEK_END:
613 retval = -EOPNOTSUPP;
614 default:
615 goto out;
616 }
617
618 if (offset >= 0) {
619 if (need_reset_readdir(fi, offset)) {
620 dout("dir_llseek dropping %p content\n", file);
621 reset_readdir(fi);
622 } else if (is_hash_order(offset) && offset > file->f_pos) {
623 /* for hash offset, we don't know if a forward seek
624 * is within same frag */
625 fi->dir_release_count = 0;
626 fi->readdir_cache_idx = -1;
627 }
628
629 if (offset != file->f_pos) {
630 file->f_pos = offset;
631 file->f_version = 0;
632 fi->flags &= ~CEPH_F_ATEND;
633 }
634 retval = offset;
635 }
636 out:
637 inode_unlock(inode);
638 return retval;
639 }
640
641 /*
642 * Handle lookups for the hidden .snap directory.
643 */
644 int ceph_handle_snapdir(struct ceph_mds_request *req,
645 struct dentry *dentry, int err)
646 {
647 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
648 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
649
650 /* .snap dir? */
651 if (err == -ENOENT &&
652 ceph_snap(parent) == CEPH_NOSNAP &&
653 strcmp(dentry->d_name.name,
654 fsc->mount_options->snapdir_name) == 0) {
655 struct inode *inode = ceph_get_snapdir(parent);
656 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
657 dentry, dentry, inode);
658 BUG_ON(!d_unhashed(dentry));
659 d_add(dentry, inode);
660 err = 0;
661 }
662 return err;
663 }
664
665 /*
666 * Figure out final result of a lookup/open request.
667 *
668 * Mainly, make sure we return the final req->r_dentry (if it already
669 * existed) in place of the original VFS-provided dentry when they
670 * differ.
671 *
672 * Gracefully handle the case where the MDS replies with -ENOENT and
673 * no trace (which it may do, at its discretion, e.g., if it doesn't
674 * care to issue a lease on the negative dentry).
675 */
676 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
677 struct dentry *dentry, int err)
678 {
679 if (err == -ENOENT) {
680 /* no trace? */
681 err = 0;
682 if (!req->r_reply_info.head->is_dentry) {
683 dout("ENOENT and no trace, dentry %p inode %p\n",
684 dentry, d_inode(dentry));
685 if (d_really_is_positive(dentry)) {
686 d_drop(dentry);
687 err = -ENOENT;
688 } else {
689 d_add(dentry, NULL);
690 }
691 }
692 }
693 if (err)
694 dentry = ERR_PTR(err);
695 else if (dentry != req->r_dentry)
696 dentry = dget(req->r_dentry); /* we got spliced */
697 else
698 dentry = NULL;
699 return dentry;
700 }
701
702 static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
703 {
704 return ceph_ino(inode) == CEPH_INO_ROOT &&
705 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
706 }
707
708 /*
709 * Look up a single dir entry. If there is a lookup intent, inform
710 * the MDS so that it gets our 'caps wanted' value in a single op.
711 */
712 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
713 unsigned int flags)
714 {
715 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
716 struct ceph_mds_client *mdsc = fsc->mdsc;
717 struct ceph_mds_request *req;
718 int op;
719 int mask;
720 int err;
721
722 dout("lookup %p dentry %p '%pd'\n",
723 dir, dentry, dentry);
724
725 if (dentry->d_name.len > NAME_MAX)
726 return ERR_PTR(-ENAMETOOLONG);
727
728 /* can we conclude ENOENT locally? */
729 if (d_really_is_negative(dentry)) {
730 struct ceph_inode_info *ci = ceph_inode(dir);
731 struct ceph_dentry_info *di = ceph_dentry(dentry);
732
733 spin_lock(&ci->i_ceph_lock);
734 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
735 if (strncmp(dentry->d_name.name,
736 fsc->mount_options->snapdir_name,
737 dentry->d_name.len) &&
738 !is_root_ceph_dentry(dir, dentry) &&
739 ceph_test_mount_opt(fsc, DCACHE) &&
740 __ceph_dir_is_complete(ci) &&
741 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
742 spin_unlock(&ci->i_ceph_lock);
743 dout(" dir %p complete, -ENOENT\n", dir);
744 d_add(dentry, NULL);
745 di->lease_shared_gen = ci->i_shared_gen;
746 return NULL;
747 }
748 spin_unlock(&ci->i_ceph_lock);
749 }
750
751 op = ceph_snap(dir) == CEPH_SNAPDIR ?
752 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
753 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
754 if (IS_ERR(req))
755 return ERR_CAST(req);
756 req->r_dentry = dget(dentry);
757 req->r_num_caps = 2;
758
759 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
760 if (ceph_security_xattr_wanted(dir))
761 mask |= CEPH_CAP_XATTR_SHARED;
762 req->r_args.getattr.mask = cpu_to_le32(mask);
763
764 req->r_parent = dir;
765 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
766 err = ceph_mdsc_do_request(mdsc, NULL, req);
767 err = ceph_handle_snapdir(req, dentry, err);
768 dentry = ceph_finish_lookup(req, dentry, err);
769 ceph_mdsc_put_request(req); /* will dput(dentry) */
770 dout("lookup result=%p\n", dentry);
771 return dentry;
772 }
773
774 /*
775 * If we do a create but get no trace back from the MDS, follow up with
776 * a lookup (the VFS expects us to link up the provided dentry).
777 */
778 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
779 {
780 struct dentry *result = ceph_lookup(dir, dentry, 0);
781
782 if (result && !IS_ERR(result)) {
783 /*
784 * We created the item, then did a lookup, and found
785 * it was already linked to another inode we already
786 * had in our cache (and thus got spliced). To not
787 * confuse VFS (especially when inode is a directory),
788 * we don't link our dentry to that inode, return an
789 * error instead.
790 *
791 * This event should be rare and it happens only when
792 * we talk to old MDS. Recent MDS does not send traceless
793 * reply for request that creates new inode.
794 */
795 d_drop(result);
796 return -ESTALE;
797 }
798 return PTR_ERR(result);
799 }
800
801 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
802 umode_t mode, dev_t rdev)
803 {
804 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
805 struct ceph_mds_client *mdsc = fsc->mdsc;
806 struct ceph_mds_request *req;
807 struct ceph_acls_info acls = {};
808 int err;
809
810 if (ceph_snap(dir) != CEPH_NOSNAP)
811 return -EROFS;
812
813 err = ceph_pre_init_acls(dir, &mode, &acls);
814 if (err < 0)
815 return err;
816
817 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
818 dir, dentry, mode, rdev);
819 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
820 if (IS_ERR(req)) {
821 err = PTR_ERR(req);
822 goto out;
823 }
824 req->r_dentry = dget(dentry);
825 req->r_num_caps = 2;
826 req->r_parent = dir;
827 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
828 req->r_args.mknod.mode = cpu_to_le32(mode);
829 req->r_args.mknod.rdev = cpu_to_le32(rdev);
830 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
831 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
832 if (acls.pagelist) {
833 req->r_pagelist = acls.pagelist;
834 acls.pagelist = NULL;
835 }
836 err = ceph_mdsc_do_request(mdsc, dir, req);
837 if (!err && !req->r_reply_info.head->is_dentry)
838 err = ceph_handle_notrace_create(dir, dentry);
839 ceph_mdsc_put_request(req);
840 out:
841 if (!err)
842 ceph_init_inode_acls(d_inode(dentry), &acls);
843 else
844 d_drop(dentry);
845 ceph_release_acls_info(&acls);
846 return err;
847 }
848
849 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
850 bool excl)
851 {
852 return ceph_mknod(dir, dentry, mode, 0);
853 }
854
855 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
856 const char *dest)
857 {
858 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
859 struct ceph_mds_client *mdsc = fsc->mdsc;
860 struct ceph_mds_request *req;
861 int err;
862
863 if (ceph_snap(dir) != CEPH_NOSNAP)
864 return -EROFS;
865
866 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
867 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
868 if (IS_ERR(req)) {
869 err = PTR_ERR(req);
870 goto out;
871 }
872 req->r_path2 = kstrdup(dest, GFP_KERNEL);
873 if (!req->r_path2) {
874 err = -ENOMEM;
875 ceph_mdsc_put_request(req);
876 goto out;
877 }
878 req->r_parent = dir;
879 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
880 req->r_dentry = dget(dentry);
881 req->r_num_caps = 2;
882 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
883 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
884 err = ceph_mdsc_do_request(mdsc, dir, req);
885 if (!err && !req->r_reply_info.head->is_dentry)
886 err = ceph_handle_notrace_create(dir, dentry);
887 ceph_mdsc_put_request(req);
888 out:
889 if (err)
890 d_drop(dentry);
891 return err;
892 }
893
894 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
895 {
896 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
897 struct ceph_mds_client *mdsc = fsc->mdsc;
898 struct ceph_mds_request *req;
899 struct ceph_acls_info acls = {};
900 int err = -EROFS;
901 int op;
902
903 if (ceph_snap(dir) == CEPH_SNAPDIR) {
904 /* mkdir .snap/foo is a MKSNAP */
905 op = CEPH_MDS_OP_MKSNAP;
906 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
907 dentry, dentry);
908 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
909 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
910 op = CEPH_MDS_OP_MKDIR;
911 } else {
912 goto out;
913 }
914
915 mode |= S_IFDIR;
916 err = ceph_pre_init_acls(dir, &mode, &acls);
917 if (err < 0)
918 goto out;
919
920 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
921 if (IS_ERR(req)) {
922 err = PTR_ERR(req);
923 goto out;
924 }
925
926 req->r_dentry = dget(dentry);
927 req->r_num_caps = 2;
928 req->r_parent = dir;
929 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
930 req->r_args.mkdir.mode = cpu_to_le32(mode);
931 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
932 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
933 if (acls.pagelist) {
934 req->r_pagelist = acls.pagelist;
935 acls.pagelist = NULL;
936 }
937 err = ceph_mdsc_do_request(mdsc, dir, req);
938 if (!err &&
939 !req->r_reply_info.head->is_target &&
940 !req->r_reply_info.head->is_dentry)
941 err = ceph_handle_notrace_create(dir, dentry);
942 ceph_mdsc_put_request(req);
943 out:
944 if (!err)
945 ceph_init_inode_acls(d_inode(dentry), &acls);
946 else
947 d_drop(dentry);
948 ceph_release_acls_info(&acls);
949 return err;
950 }
951
952 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
953 struct dentry *dentry)
954 {
955 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
956 struct ceph_mds_client *mdsc = fsc->mdsc;
957 struct ceph_mds_request *req;
958 int err;
959
960 if (ceph_snap(dir) != CEPH_NOSNAP)
961 return -EROFS;
962
963 dout("link in dir %p old_dentry %p dentry %p\n", dir,
964 old_dentry, dentry);
965 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
966 if (IS_ERR(req)) {
967 d_drop(dentry);
968 return PTR_ERR(req);
969 }
970 req->r_dentry = dget(dentry);
971 req->r_num_caps = 2;
972 req->r_old_dentry = dget(old_dentry);
973 req->r_parent = dir;
974 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
975 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
976 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
977 /* release LINK_SHARED on source inode (mds will lock it) */
978 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
979 err = ceph_mdsc_do_request(mdsc, dir, req);
980 if (err) {
981 d_drop(dentry);
982 } else if (!req->r_reply_info.head->is_dentry) {
983 ihold(d_inode(old_dentry));
984 d_instantiate(dentry, d_inode(old_dentry));
985 }
986 ceph_mdsc_put_request(req);
987 return err;
988 }
989
990 /*
991 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
992 * looks like the link count will hit 0, drop any other caps (other
993 * than PIN) we don't specifically want (due to the file still being
994 * open).
995 */
996 static int drop_caps_for_unlink(struct inode *inode)
997 {
998 struct ceph_inode_info *ci = ceph_inode(inode);
999 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
1000
1001 spin_lock(&ci->i_ceph_lock);
1002 if (inode->i_nlink == 1) {
1003 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
1004 ci->i_ceph_flags |= CEPH_I_NODELAY;
1005 }
1006 spin_unlock(&ci->i_ceph_lock);
1007 return drop;
1008 }
1009
1010 /*
1011 * rmdir and unlink are differ only by the metadata op code
1012 */
1013 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
1014 {
1015 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
1016 struct ceph_mds_client *mdsc = fsc->mdsc;
1017 struct inode *inode = d_inode(dentry);
1018 struct ceph_mds_request *req;
1019 int err = -EROFS;
1020 int op;
1021
1022 if (ceph_snap(dir) == CEPH_SNAPDIR) {
1023 /* rmdir .snap/foo is RMSNAP */
1024 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
1025 op = CEPH_MDS_OP_RMSNAP;
1026 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
1027 dout("unlink/rmdir dir %p dn %p inode %p\n",
1028 dir, dentry, inode);
1029 op = d_is_dir(dentry) ?
1030 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
1031 } else
1032 goto out;
1033 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1034 if (IS_ERR(req)) {
1035 err = PTR_ERR(req);
1036 goto out;
1037 }
1038 req->r_dentry = dget(dentry);
1039 req->r_num_caps = 2;
1040 req->r_parent = dir;
1041 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1042 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1043 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1044 req->r_inode_drop = drop_caps_for_unlink(inode);
1045 err = ceph_mdsc_do_request(mdsc, dir, req);
1046 if (!err && !req->r_reply_info.head->is_dentry)
1047 d_delete(dentry);
1048 ceph_mdsc_put_request(req);
1049 out:
1050 return err;
1051 }
1052
1053 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
1054 struct inode *new_dir, struct dentry *new_dentry,
1055 unsigned int flags)
1056 {
1057 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
1058 struct ceph_mds_client *mdsc = fsc->mdsc;
1059 struct ceph_mds_request *req;
1060 int op = CEPH_MDS_OP_RENAME;
1061 int err;
1062
1063 if (flags)
1064 return -EINVAL;
1065
1066 if (ceph_snap(old_dir) != ceph_snap(new_dir))
1067 return -EXDEV;
1068 if (ceph_snap(old_dir) != CEPH_NOSNAP) {
1069 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
1070 op = CEPH_MDS_OP_RENAMESNAP;
1071 else
1072 return -EROFS;
1073 }
1074 dout("rename dir %p dentry %p to dir %p dentry %p\n",
1075 old_dir, old_dentry, new_dir, new_dentry);
1076 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1077 if (IS_ERR(req))
1078 return PTR_ERR(req);
1079 ihold(old_dir);
1080 req->r_dentry = dget(new_dentry);
1081 req->r_num_caps = 2;
1082 req->r_old_dentry = dget(old_dentry);
1083 req->r_old_dentry_dir = old_dir;
1084 req->r_parent = new_dir;
1085 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1086 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
1087 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
1088 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1089 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1090 /* release LINK_RDCACHE on source inode (mds will lock it) */
1091 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
1092 if (d_really_is_positive(new_dentry))
1093 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
1094 err = ceph_mdsc_do_request(mdsc, old_dir, req);
1095 if (!err && !req->r_reply_info.head->is_dentry) {
1096 /*
1097 * Normally d_move() is done by fill_trace (called by
1098 * do_request, above). If there is no trace, we need
1099 * to do it here.
1100 */
1101
1102 /* d_move screws up sibling dentries' offsets */
1103 ceph_dir_clear_complete(old_dir);
1104 ceph_dir_clear_complete(new_dir);
1105
1106 d_move(old_dentry, new_dentry);
1107
1108 /* ensure target dentry is invalidated, despite
1109 rehashing bug in vfs_rename_dir */
1110 ceph_invalidate_dentry_lease(new_dentry);
1111 }
1112 ceph_mdsc_put_request(req);
1113 return err;
1114 }
1115
1116 /*
1117 * Ensure a dentry lease will no longer revalidate.
1118 */
1119 void ceph_invalidate_dentry_lease(struct dentry *dentry)
1120 {
1121 spin_lock(&dentry->d_lock);
1122 ceph_dentry(dentry)->time = jiffies;
1123 ceph_dentry(dentry)->lease_shared_gen = 0;
1124 spin_unlock(&dentry->d_lock);
1125 }
1126
1127 /*
1128 * Check if dentry lease is valid. If not, delete the lease. Try to
1129 * renew if the least is more than half up.
1130 */
1131 static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
1132 struct inode *dir)
1133 {
1134 struct ceph_dentry_info *di;
1135 struct ceph_mds_session *s;
1136 int valid = 0;
1137 u32 gen;
1138 unsigned long ttl;
1139 struct ceph_mds_session *session = NULL;
1140 u32 seq = 0;
1141
1142 spin_lock(&dentry->d_lock);
1143 di = ceph_dentry(dentry);
1144 if (di && di->lease_session) {
1145 s = di->lease_session;
1146 spin_lock(&s->s_gen_ttl_lock);
1147 gen = s->s_cap_gen;
1148 ttl = s->s_cap_ttl;
1149 spin_unlock(&s->s_gen_ttl_lock);
1150
1151 if (di->lease_gen == gen &&
1152 time_before(jiffies, di->time) &&
1153 time_before(jiffies, ttl)) {
1154 valid = 1;
1155 if (di->lease_renew_after &&
1156 time_after(jiffies, di->lease_renew_after)) {
1157 /*
1158 * We should renew. If we're in RCU walk mode
1159 * though, we can't do that so just return
1160 * -ECHILD.
1161 */
1162 if (flags & LOOKUP_RCU) {
1163 valid = -ECHILD;
1164 } else {
1165 session = ceph_get_mds_session(s);
1166 seq = di->lease_seq;
1167 di->lease_renew_after = 0;
1168 di->lease_renew_from = jiffies;
1169 }
1170 }
1171 }
1172 }
1173 spin_unlock(&dentry->d_lock);
1174
1175 if (session) {
1176 ceph_mdsc_lease_send_msg(session, dir, dentry,
1177 CEPH_MDS_LEASE_RENEW, seq);
1178 ceph_put_mds_session(session);
1179 }
1180 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1181 return valid;
1182 }
1183
1184 /*
1185 * Check if directory-wide content lease/cap is valid.
1186 */
1187 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1188 {
1189 struct ceph_inode_info *ci = ceph_inode(dir);
1190 struct ceph_dentry_info *di = ceph_dentry(dentry);
1191 int valid = 0;
1192
1193 spin_lock(&ci->i_ceph_lock);
1194 if (ci->i_shared_gen == di->lease_shared_gen)
1195 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1196 spin_unlock(&ci->i_ceph_lock);
1197 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1198 dir, (unsigned)ci->i_shared_gen, dentry,
1199 (unsigned)di->lease_shared_gen, valid);
1200 return valid;
1201 }
1202
1203 /*
1204 * Check if cached dentry can be trusted.
1205 */
1206 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1207 {
1208 int valid = 0;
1209 struct dentry *parent;
1210 struct inode *dir;
1211
1212 if (flags & LOOKUP_RCU) {
1213 parent = READ_ONCE(dentry->d_parent);
1214 dir = d_inode_rcu(parent);
1215 if (!dir)
1216 return -ECHILD;
1217 } else {
1218 parent = dget_parent(dentry);
1219 dir = d_inode(parent);
1220 }
1221
1222 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1223 dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
1224
1225 /* always trust cached snapped dentries, snapdir dentry */
1226 if (ceph_snap(dir) != CEPH_NOSNAP) {
1227 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1228 dentry, d_inode(dentry));
1229 valid = 1;
1230 } else if (d_really_is_positive(dentry) &&
1231 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
1232 valid = 1;
1233 } else {
1234 valid = dentry_lease_is_valid(dentry, flags, dir);
1235 if (valid == -ECHILD)
1236 return valid;
1237 if (valid || dir_lease_is_valid(dir, dentry)) {
1238 if (d_really_is_positive(dentry))
1239 valid = ceph_is_any_caps(d_inode(dentry));
1240 else
1241 valid = 1;
1242 }
1243 }
1244
1245 if (!valid) {
1246 struct ceph_mds_client *mdsc =
1247 ceph_sb_to_client(dir->i_sb)->mdsc;
1248 struct ceph_mds_request *req;
1249 int op, err;
1250 u32 mask;
1251
1252 if (flags & LOOKUP_RCU)
1253 return -ECHILD;
1254
1255 op = ceph_snap(dir) == CEPH_SNAPDIR ?
1256 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
1257 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
1258 if (!IS_ERR(req)) {
1259 req->r_dentry = dget(dentry);
1260 req->r_num_caps = 2;
1261 req->r_parent = dir;
1262
1263 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1264 if (ceph_security_xattr_wanted(dir))
1265 mask |= CEPH_CAP_XATTR_SHARED;
1266 req->r_args.getattr.mask = cpu_to_le32(mask);
1267
1268 err = ceph_mdsc_do_request(mdsc, NULL, req);
1269 switch (err) {
1270 case 0:
1271 if (d_really_is_positive(dentry) &&
1272 d_inode(dentry) == req->r_target_inode)
1273 valid = 1;
1274 break;
1275 case -ENOENT:
1276 if (d_really_is_negative(dentry))
1277 valid = 1;
1278 /* Fallthrough */
1279 default:
1280 break;
1281 }
1282 ceph_mdsc_put_request(req);
1283 dout("d_revalidate %p lookup result=%d\n",
1284 dentry, err);
1285 }
1286 }
1287
1288 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1289 if (valid) {
1290 ceph_dentry_lru_touch(dentry);
1291 } else {
1292 ceph_dir_clear_complete(dir);
1293 }
1294
1295 if (!(flags & LOOKUP_RCU))
1296 dput(parent);
1297 return valid;
1298 }
1299
1300 /*
1301 * Release our ceph_dentry_info.
1302 */
1303 static void ceph_d_release(struct dentry *dentry)
1304 {
1305 struct ceph_dentry_info *di = ceph_dentry(dentry);
1306
1307 dout("d_release %p\n", dentry);
1308 ceph_dentry_lru_del(dentry);
1309
1310 spin_lock(&dentry->d_lock);
1311 dentry->d_fsdata = NULL;
1312 spin_unlock(&dentry->d_lock);
1313
1314 if (di->lease_session)
1315 ceph_put_mds_session(di->lease_session);
1316 kmem_cache_free(ceph_dentry_cachep, di);
1317 }
1318
1319 /*
1320 * When the VFS prunes a dentry from the cache, we need to clear the
1321 * complete flag on the parent directory.
1322 *
1323 * Called under dentry->d_lock.
1324 */
1325 static void ceph_d_prune(struct dentry *dentry)
1326 {
1327 dout("ceph_d_prune %p\n", dentry);
1328
1329 /* do we have a valid parent? */
1330 if (IS_ROOT(dentry))
1331 return;
1332
1333 /* if we are not hashed, we don't affect dir's completeness */
1334 if (d_unhashed(dentry))
1335 return;
1336
1337 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
1338 return;
1339
1340 /*
1341 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1342 * cleared until d_release
1343 */
1344 ceph_dir_clear_complete(d_inode(dentry->d_parent));
1345 }
1346
1347 /*
1348 * read() on a dir. This weird interface hack only works if mounted
1349 * with '-o dirstat'.
1350 */
1351 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1352 loff_t *ppos)
1353 {
1354 struct ceph_file_info *cf = file->private_data;
1355 struct inode *inode = file_inode(file);
1356 struct ceph_inode_info *ci = ceph_inode(inode);
1357 int left;
1358 const int bufsize = 1024;
1359
1360 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1361 return -EISDIR;
1362
1363 if (!cf->dir_info) {
1364 cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
1365 if (!cf->dir_info)
1366 return -ENOMEM;
1367 cf->dir_info_len =
1368 snprintf(cf->dir_info, bufsize,
1369 "entries: %20lld\n"
1370 " files: %20lld\n"
1371 " subdirs: %20lld\n"
1372 "rentries: %20lld\n"
1373 " rfiles: %20lld\n"
1374 " rsubdirs: %20lld\n"
1375 "rbytes: %20lld\n"
1376 "rctime: %10ld.%09ld\n",
1377 ci->i_files + ci->i_subdirs,
1378 ci->i_files,
1379 ci->i_subdirs,
1380 ci->i_rfiles + ci->i_rsubdirs,
1381 ci->i_rfiles,
1382 ci->i_rsubdirs,
1383 ci->i_rbytes,
1384 (long)ci->i_rctime.tv_sec,
1385 (long)ci->i_rctime.tv_nsec);
1386 }
1387
1388 if (*ppos >= cf->dir_info_len)
1389 return 0;
1390 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1391 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1392 if (left == size)
1393 return -EFAULT;
1394 *ppos += (size - left);
1395 return size - left;
1396 }
1397
1398 /*
1399 * We maintain a private dentry LRU.
1400 *
1401 * FIXME: this needs to be changed to a per-mds lru to be useful.
1402 */
1403 void ceph_dentry_lru_add(struct dentry *dn)
1404 {
1405 struct ceph_dentry_info *di = ceph_dentry(dn);
1406 struct ceph_mds_client *mdsc;
1407
1408 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
1409 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1410 spin_lock(&mdsc->dentry_lru_lock);
1411 list_add_tail(&di->lru, &mdsc->dentry_lru);
1412 mdsc->num_dentry++;
1413 spin_unlock(&mdsc->dentry_lru_lock);
1414 }
1415
1416 void ceph_dentry_lru_touch(struct dentry *dn)
1417 {
1418 struct ceph_dentry_info *di = ceph_dentry(dn);
1419 struct ceph_mds_client *mdsc;
1420
1421 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1422 di->offset);
1423 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1424 spin_lock(&mdsc->dentry_lru_lock);
1425 list_move_tail(&di->lru, &mdsc->dentry_lru);
1426 spin_unlock(&mdsc->dentry_lru_lock);
1427 }
1428
1429 void ceph_dentry_lru_del(struct dentry *dn)
1430 {
1431 struct ceph_dentry_info *di = ceph_dentry(dn);
1432 struct ceph_mds_client *mdsc;
1433
1434 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
1435 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1436 spin_lock(&mdsc->dentry_lru_lock);
1437 list_del_init(&di->lru);
1438 mdsc->num_dentry--;
1439 spin_unlock(&mdsc->dentry_lru_lock);
1440 }
1441
1442 /*
1443 * Return name hash for a given dentry. This is dependent on
1444 * the parent directory's hash function.
1445 */
1446 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1447 {
1448 struct ceph_inode_info *dci = ceph_inode(dir);
1449
1450 switch (dci->i_dir_layout.dl_dir_hash) {
1451 case 0: /* for backward compat */
1452 case CEPH_STR_HASH_LINUX:
1453 return dn->d_name.hash;
1454
1455 default:
1456 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1457 dn->d_name.name, dn->d_name.len);
1458 }
1459 }
1460
1461 const struct file_operations ceph_dir_fops = {
1462 .read = ceph_read_dir,
1463 .iterate = ceph_readdir,
1464 .llseek = ceph_dir_llseek,
1465 .open = ceph_open,
1466 .release = ceph_release,
1467 .unlocked_ioctl = ceph_ioctl,
1468 .fsync = ceph_fsync,
1469 };
1470
1471 const struct file_operations ceph_snapdir_fops = {
1472 .iterate = ceph_readdir,
1473 .llseek = ceph_dir_llseek,
1474 .open = ceph_open,
1475 .release = ceph_release,
1476 };
1477
1478 const struct inode_operations ceph_dir_iops = {
1479 .lookup = ceph_lookup,
1480 .permission = ceph_permission,
1481 .getattr = ceph_getattr,
1482 .setattr = ceph_setattr,
1483 .listxattr = ceph_listxattr,
1484 .get_acl = ceph_get_acl,
1485 .set_acl = ceph_set_acl,
1486 .mknod = ceph_mknod,
1487 .symlink = ceph_symlink,
1488 .mkdir = ceph_mkdir,
1489 .link = ceph_link,
1490 .unlink = ceph_unlink,
1491 .rmdir = ceph_unlink,
1492 .rename = ceph_rename,
1493 .create = ceph_create,
1494 .atomic_open = ceph_atomic_open,
1495 };
1496
1497 const struct inode_operations ceph_snapdir_iops = {
1498 .lookup = ceph_lookup,
1499 .permission = ceph_permission,
1500 .getattr = ceph_getattr,
1501 .mkdir = ceph_mkdir,
1502 .rmdir = ceph_unlink,
1503 .rename = ceph_rename,
1504 };
1505
1506 const struct dentry_operations ceph_dentry_ops = {
1507 .d_revalidate = ceph_d_revalidate,
1508 .d_release = ceph_d_release,
1509 .d_prune = ceph_d_prune,
1510 .d_init = ceph_d_init,
1511 };