]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ceph/dir.c
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/xattr.h>
9
10 #include "super.h"
11 #include "mds_client.h"
12
13 /*
14 * Directory operations: readdir, lookup, create, link, unlink,
15 * rename, etc.
16 */
17
18 /*
19 * Ceph MDS operations are specified in terms of a base ino and
20 * relative path. Thus, the client can specify an operation on a
21 * specific inode (e.g., a getattr due to fstat(2)), or as a path
22 * relative to, say, the root directory.
23 *
24 * Normally, we limit ourselves to strict inode ops (no path component)
25 * or dentry operations (a single path component relative to an ino). The
26 * exception to this is open_root_dentry(), which will open the mount
27 * point by name.
28 */
29
30 const struct dentry_operations ceph_dentry_ops;
31
32 /*
33 * Initialize ceph dentry state.
34 */
35 static int ceph_d_init(struct dentry *dentry)
36 {
37 struct ceph_dentry_info *di;
38
39 di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
40 if (!di)
41 return -ENOMEM; /* oh well */
42
43 di->dentry = dentry;
44 di->lease_session = NULL;
45 di->time = jiffies;
46 dentry->d_fsdata = di;
47 ceph_dentry_lru_add(dentry);
48 return 0;
49 }
50
51 /*
52 * for f_pos for readdir:
53 * - hash order:
54 * (0xff << 52) | ((24 bits hash) << 28) |
55 * (the nth entry has hash collision);
56 * - frag+name order;
57 * ((frag value) << 28) | (the nth entry in frag);
58 */
59 #define OFFSET_BITS 28
60 #define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
61 #define HASH_ORDER (0xffull << (OFFSET_BITS + 24))
62 loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
63 {
64 loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
65 if (hash_order)
66 fpos |= HASH_ORDER;
67 return fpos;
68 }
69
70 static bool is_hash_order(loff_t p)
71 {
72 return (p & HASH_ORDER) == HASH_ORDER;
73 }
74
75 static unsigned fpos_frag(loff_t p)
76 {
77 return p >> OFFSET_BITS;
78 }
79
80 static unsigned fpos_hash(loff_t p)
81 {
82 return ceph_frag_value(fpos_frag(p));
83 }
84
85 static unsigned fpos_off(loff_t p)
86 {
87 return p & OFFSET_MASK;
88 }
89
90 static int fpos_cmp(loff_t l, loff_t r)
91 {
92 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
93 if (v)
94 return v;
95 return (int)(fpos_off(l) - fpos_off(r));
96 }
97
98 /*
99 * make note of the last dentry we read, so we can
100 * continue at the same lexicographical point,
101 * regardless of what dir changes take place on the
102 * server.
103 */
104 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
105 int len, unsigned next_offset)
106 {
107 char *buf = kmalloc(len+1, GFP_KERNEL);
108 if (!buf)
109 return -ENOMEM;
110 kfree(fi->last_name);
111 fi->last_name = buf;
112 memcpy(fi->last_name, name, len);
113 fi->last_name[len] = 0;
114 fi->next_offset = next_offset;
115 dout("note_last_dentry '%s'\n", fi->last_name);
116 return 0;
117 }
118
119
120 static struct dentry *
121 __dcache_find_get_entry(struct dentry *parent, u64 idx,
122 struct ceph_readdir_cache_control *cache_ctl)
123 {
124 struct inode *dir = d_inode(parent);
125 struct dentry *dentry;
126 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
127 loff_t ptr_pos = idx * sizeof(struct dentry *);
128 pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
129
130 if (ptr_pos >= i_size_read(dir))
131 return NULL;
132
133 if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
134 ceph_readdir_cache_release(cache_ctl);
135 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
136 if (!cache_ctl->page) {
137 dout(" page %lu not found\n", ptr_pgoff);
138 return ERR_PTR(-EAGAIN);
139 }
140 /* reading/filling the cache are serialized by
141 i_mutex, no need to use page lock */
142 unlock_page(cache_ctl->page);
143 cache_ctl->dentries = kmap(cache_ctl->page);
144 }
145
146 cache_ctl->index = idx & idx_mask;
147
148 rcu_read_lock();
149 spin_lock(&parent->d_lock);
150 /* check i_size again here, because empty directory can be
151 * marked as complete while not holding the i_mutex. */
152 if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
153 dentry = cache_ctl->dentries[cache_ctl->index];
154 else
155 dentry = NULL;
156 spin_unlock(&parent->d_lock);
157 if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
158 dentry = NULL;
159 rcu_read_unlock();
160 return dentry ? : ERR_PTR(-EAGAIN);
161 }
162
163 /*
164 * When possible, we try to satisfy a readdir by peeking at the
165 * dcache. We make this work by carefully ordering dentries on
166 * d_child when we initially get results back from the MDS, and
167 * falling back to a "normal" sync readdir if any dentries in the dir
168 * are dropped.
169 *
170 * Complete dir indicates that we have all dentries in the dir. It is
171 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
172 * the MDS if/when the directory is modified).
173 */
174 static int __dcache_readdir(struct file *file, struct dir_context *ctx,
175 u32 shared_gen)
176 {
177 struct ceph_file_info *fi = file->private_data;
178 struct dentry *parent = file->f_path.dentry;
179 struct inode *dir = d_inode(parent);
180 struct dentry *dentry, *last = NULL;
181 struct ceph_dentry_info *di;
182 struct ceph_readdir_cache_control cache_ctl = {};
183 u64 idx = 0;
184 int err = 0;
185
186 dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
187
188 /* search start position */
189 if (ctx->pos > 2) {
190 u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
191 while (count > 0) {
192 u64 step = count >> 1;
193 dentry = __dcache_find_get_entry(parent, idx + step,
194 &cache_ctl);
195 if (!dentry) {
196 /* use linar search */
197 idx = 0;
198 break;
199 }
200 if (IS_ERR(dentry)) {
201 err = PTR_ERR(dentry);
202 goto out;
203 }
204 di = ceph_dentry(dentry);
205 spin_lock(&dentry->d_lock);
206 if (fpos_cmp(di->offset, ctx->pos) < 0) {
207 idx += step + 1;
208 count -= step + 1;
209 } else {
210 count = step;
211 }
212 spin_unlock(&dentry->d_lock);
213 dput(dentry);
214 }
215
216 dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
217 }
218
219
220 for (;;) {
221 bool emit_dentry = false;
222 dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
223 if (!dentry) {
224 fi->flags |= CEPH_F_ATEND;
225 err = 0;
226 break;
227 }
228 if (IS_ERR(dentry)) {
229 err = PTR_ERR(dentry);
230 goto out;
231 }
232
233 di = ceph_dentry(dentry);
234 spin_lock(&dentry->d_lock);
235 if (di->lease_shared_gen == shared_gen &&
236 d_really_is_positive(dentry) &&
237 fpos_cmp(ctx->pos, di->offset) <= 0) {
238 emit_dentry = true;
239 }
240 spin_unlock(&dentry->d_lock);
241
242 if (emit_dentry) {
243 dout(" %llx dentry %p %pd %p\n", di->offset,
244 dentry, dentry, d_inode(dentry));
245 ctx->pos = di->offset;
246 if (!dir_emit(ctx, dentry->d_name.name,
247 dentry->d_name.len,
248 ceph_translate_ino(dentry->d_sb,
249 d_inode(dentry)->i_ino),
250 d_inode(dentry)->i_mode >> 12)) {
251 dput(dentry);
252 err = 0;
253 break;
254 }
255 ctx->pos++;
256
257 if (last)
258 dput(last);
259 last = dentry;
260 } else {
261 dput(dentry);
262 }
263 }
264 out:
265 ceph_readdir_cache_release(&cache_ctl);
266 if (last) {
267 int ret;
268 di = ceph_dentry(last);
269 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
270 fpos_off(di->offset) + 1);
271 if (ret < 0)
272 err = ret;
273 dput(last);
274 /* last_name no longer match cache index */
275 if (fi->readdir_cache_idx >= 0) {
276 fi->readdir_cache_idx = -1;
277 fi->dir_release_count = 0;
278 }
279 }
280 return err;
281 }
282
283 static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos)
284 {
285 if (!fi->last_readdir)
286 return true;
287 if (is_hash_order(pos))
288 return !ceph_frag_contains_value(fi->frag, fpos_hash(pos));
289 else
290 return fi->frag != fpos_frag(pos);
291 }
292
293 static int ceph_readdir(struct file *file, struct dir_context *ctx)
294 {
295 struct ceph_file_info *fi = file->private_data;
296 struct inode *inode = file_inode(file);
297 struct ceph_inode_info *ci = ceph_inode(inode);
298 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
299 struct ceph_mds_client *mdsc = fsc->mdsc;
300 int i;
301 int err;
302 unsigned frag = -1;
303 struct ceph_mds_reply_info_parsed *rinfo;
304
305 dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
306 if (fi->flags & CEPH_F_ATEND)
307 return 0;
308
309 /* always start with . and .. */
310 if (ctx->pos == 0) {
311 dout("readdir off 0 -> '.'\n");
312 if (!dir_emit(ctx, ".", 1,
313 ceph_translate_ino(inode->i_sb, inode->i_ino),
314 inode->i_mode >> 12))
315 return 0;
316 ctx->pos = 1;
317 }
318 if (ctx->pos == 1) {
319 ino_t ino = parent_ino(file->f_path.dentry);
320 dout("readdir off 1 -> '..'\n");
321 if (!dir_emit(ctx, "..", 2,
322 ceph_translate_ino(inode->i_sb, ino),
323 inode->i_mode >> 12))
324 return 0;
325 ctx->pos = 2;
326 }
327
328 /* can we use the dcache? */
329 spin_lock(&ci->i_ceph_lock);
330 if (ceph_test_mount_opt(fsc, DCACHE) &&
331 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
332 ceph_snap(inode) != CEPH_SNAPDIR &&
333 __ceph_dir_is_complete_ordered(ci) &&
334 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
335 u32 shared_gen = ci->i_shared_gen;
336 spin_unlock(&ci->i_ceph_lock);
337 err = __dcache_readdir(file, ctx, shared_gen);
338 if (err != -EAGAIN)
339 return err;
340 } else {
341 spin_unlock(&ci->i_ceph_lock);
342 }
343
344 /* proceed with a normal readdir */
345 more:
346 /* do we have the correct frag content buffered? */
347 if (need_send_readdir(fi, ctx->pos)) {
348 struct ceph_mds_request *req;
349 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
350 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
351
352 /* discard old result, if any */
353 if (fi->last_readdir) {
354 ceph_mdsc_put_request(fi->last_readdir);
355 fi->last_readdir = NULL;
356 }
357
358 if (is_hash_order(ctx->pos)) {
359 /* fragtree isn't always accurate. choose frag
360 * based on previous reply when possible. */
361 if (frag == (unsigned)-1)
362 frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
363 NULL, NULL);
364 } else {
365 frag = fpos_frag(ctx->pos);
366 }
367
368 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
369 ceph_vinop(inode), frag, fi->last_name);
370 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
371 if (IS_ERR(req))
372 return PTR_ERR(req);
373 err = ceph_alloc_readdir_reply_buffer(req, inode);
374 if (err) {
375 ceph_mdsc_put_request(req);
376 return err;
377 }
378 /* hints to request -> mds selection code */
379 req->r_direct_mode = USE_AUTH_MDS;
380 req->r_direct_hash = ceph_frag_value(frag);
381 __set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
382 if (fi->last_name) {
383 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
384 if (!req->r_path2) {
385 ceph_mdsc_put_request(req);
386 return -ENOMEM;
387 }
388 } else if (is_hash_order(ctx->pos)) {
389 req->r_args.readdir.offset_hash =
390 cpu_to_le32(fpos_hash(ctx->pos));
391 }
392
393 req->r_dir_release_cnt = fi->dir_release_count;
394 req->r_dir_ordered_cnt = fi->dir_ordered_count;
395 req->r_readdir_cache_idx = fi->readdir_cache_idx;
396 req->r_readdir_offset = fi->next_offset;
397 req->r_args.readdir.frag = cpu_to_le32(frag);
398 req->r_args.readdir.flags =
399 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
400
401 req->r_inode = inode;
402 ihold(inode);
403 req->r_dentry = dget(file->f_path.dentry);
404 err = ceph_mdsc_do_request(mdsc, NULL, req);
405 if (err < 0) {
406 ceph_mdsc_put_request(req);
407 return err;
408 }
409 dout("readdir got and parsed readdir result=%d on "
410 "frag %x, end=%d, complete=%d, hash_order=%d\n",
411 err, frag,
412 (int)req->r_reply_info.dir_end,
413 (int)req->r_reply_info.dir_complete,
414 (int)req->r_reply_info.hash_order);
415
416 rinfo = &req->r_reply_info;
417 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
418 frag = le32_to_cpu(rinfo->dir_dir->frag);
419 if (!rinfo->hash_order) {
420 fi->next_offset = req->r_readdir_offset;
421 /* adjust ctx->pos to beginning of frag */
422 ctx->pos = ceph_make_fpos(frag,
423 fi->next_offset,
424 false);
425 }
426 }
427
428 fi->frag = frag;
429 fi->last_readdir = req;
430
431 if (test_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags)) {
432 fi->readdir_cache_idx = req->r_readdir_cache_idx;
433 if (fi->readdir_cache_idx < 0) {
434 /* preclude from marking dir ordered */
435 fi->dir_ordered_count = 0;
436 } else if (ceph_frag_is_leftmost(frag) &&
437 fi->next_offset == 2) {
438 /* note dir version at start of readdir so
439 * we can tell if any dentries get dropped */
440 fi->dir_release_count = req->r_dir_release_cnt;
441 fi->dir_ordered_count = req->r_dir_ordered_cnt;
442 }
443 } else {
444 dout("readdir !did_prepopulate");
445 /* disable readdir cache */
446 fi->readdir_cache_idx = -1;
447 /* preclude from marking dir complete */
448 fi->dir_release_count = 0;
449 }
450
451 /* note next offset and last dentry name */
452 if (rinfo->dir_nr > 0) {
453 struct ceph_mds_reply_dir_entry *rde =
454 rinfo->dir_entries + (rinfo->dir_nr-1);
455 unsigned next_offset = req->r_reply_info.dir_end ?
456 2 : (fpos_off(rde->offset) + 1);
457 err = note_last_dentry(fi, rde->name, rde->name_len,
458 next_offset);
459 if (err)
460 return err;
461 } else if (req->r_reply_info.dir_end) {
462 fi->next_offset = 2;
463 /* keep last name */
464 }
465 }
466
467 rinfo = &fi->last_readdir->r_reply_info;
468 dout("readdir frag %x num %d pos %llx chunk first %llx\n",
469 fi->frag, rinfo->dir_nr, ctx->pos,
470 rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
471
472 i = 0;
473 /* search start position */
474 if (rinfo->dir_nr > 0) {
475 int step, nr = rinfo->dir_nr;
476 while (nr > 0) {
477 step = nr >> 1;
478 if (rinfo->dir_entries[i + step].offset < ctx->pos) {
479 i += step + 1;
480 nr -= step + 1;
481 } else {
482 nr = step;
483 }
484 }
485 }
486 for (; i < rinfo->dir_nr; i++) {
487 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
488 struct ceph_vino vino;
489 ino_t ino;
490 u32 ftype;
491
492 BUG_ON(rde->offset < ctx->pos);
493
494 ctx->pos = rde->offset;
495 dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
496 i, rinfo->dir_nr, ctx->pos,
497 rde->name_len, rde->name, &rde->inode.in);
498
499 BUG_ON(!rde->inode.in);
500 ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
501 vino.ino = le64_to_cpu(rde->inode.in->ino);
502 vino.snap = le64_to_cpu(rde->inode.in->snapid);
503 ino = ceph_vino_to_ino(vino);
504
505 if (!dir_emit(ctx, rde->name, rde->name_len,
506 ceph_translate_ino(inode->i_sb, ino), ftype)) {
507 dout("filldir stopping us...\n");
508 return 0;
509 }
510 ctx->pos++;
511 }
512
513 ceph_mdsc_put_request(fi->last_readdir);
514 fi->last_readdir = NULL;
515
516 if (fi->next_offset > 2) {
517 frag = fi->frag;
518 goto more;
519 }
520
521 /* more frags? */
522 if (!ceph_frag_is_rightmost(fi->frag)) {
523 frag = ceph_frag_next(fi->frag);
524 if (is_hash_order(ctx->pos)) {
525 loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
526 fi->next_offset, true);
527 if (new_pos > ctx->pos)
528 ctx->pos = new_pos;
529 /* keep last_name */
530 } else {
531 ctx->pos = ceph_make_fpos(frag, fi->next_offset, false);
532 kfree(fi->last_name);
533 fi->last_name = NULL;
534 }
535 dout("readdir next frag is %x\n", frag);
536 goto more;
537 }
538 fi->flags |= CEPH_F_ATEND;
539
540 /*
541 * if dir_release_count still matches the dir, no dentries
542 * were released during the whole readdir, and we should have
543 * the complete dir contents in our cache.
544 */
545 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
546 spin_lock(&ci->i_ceph_lock);
547 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
548 dout(" marking %p complete and ordered\n", inode);
549 /* use i_size to track number of entries in
550 * readdir cache */
551 BUG_ON(fi->readdir_cache_idx < 0);
552 i_size_write(inode, fi->readdir_cache_idx *
553 sizeof(struct dentry*));
554 } else {
555 dout(" marking %p complete\n", inode);
556 }
557 __ceph_dir_set_complete(ci, fi->dir_release_count,
558 fi->dir_ordered_count);
559 spin_unlock(&ci->i_ceph_lock);
560 }
561
562 dout("readdir %p file %p done.\n", inode, file);
563 return 0;
564 }
565
566 static void reset_readdir(struct ceph_file_info *fi)
567 {
568 if (fi->last_readdir) {
569 ceph_mdsc_put_request(fi->last_readdir);
570 fi->last_readdir = NULL;
571 }
572 kfree(fi->last_name);
573 fi->last_name = NULL;
574 fi->dir_release_count = 0;
575 fi->readdir_cache_idx = -1;
576 fi->next_offset = 2; /* compensate for . and .. */
577 fi->flags &= ~CEPH_F_ATEND;
578 }
579
580 /*
581 * discard buffered readdir content on seekdir(0), or seek to new frag,
582 * or seek prior to current chunk
583 */
584 static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
585 {
586 struct ceph_mds_reply_info_parsed *rinfo;
587 loff_t chunk_offset;
588 if (new_pos == 0)
589 return true;
590 if (is_hash_order(new_pos)) {
591 /* no need to reset last_name for a forward seek when
592 * dentries are sotred in hash order */
593 } else if (fi->frag != fpos_frag(new_pos)) {
594 return true;
595 }
596 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
597 if (!rinfo || !rinfo->dir_nr)
598 return true;
599 chunk_offset = rinfo->dir_entries[0].offset;
600 return new_pos < chunk_offset ||
601 is_hash_order(new_pos) != is_hash_order(chunk_offset);
602 }
603
604 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
605 {
606 struct ceph_file_info *fi = file->private_data;
607 struct inode *inode = file->f_mapping->host;
608 loff_t retval;
609
610 inode_lock(inode);
611 retval = -EINVAL;
612 switch (whence) {
613 case SEEK_CUR:
614 offset += file->f_pos;
615 case SEEK_SET:
616 break;
617 case SEEK_END:
618 retval = -EOPNOTSUPP;
619 default:
620 goto out;
621 }
622
623 if (offset >= 0) {
624 if (need_reset_readdir(fi, offset)) {
625 dout("dir_llseek dropping %p content\n", file);
626 reset_readdir(fi);
627 } else if (is_hash_order(offset) && offset > file->f_pos) {
628 /* for hash offset, we don't know if a forward seek
629 * is within same frag */
630 fi->dir_release_count = 0;
631 fi->readdir_cache_idx = -1;
632 }
633
634 if (offset != file->f_pos) {
635 file->f_pos = offset;
636 file->f_version = 0;
637 fi->flags &= ~CEPH_F_ATEND;
638 }
639 retval = offset;
640 }
641 out:
642 inode_unlock(inode);
643 return retval;
644 }
645
646 /*
647 * Handle lookups for the hidden .snap directory.
648 */
649 int ceph_handle_snapdir(struct ceph_mds_request *req,
650 struct dentry *dentry, int err)
651 {
652 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
653 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
654
655 /* .snap dir? */
656 if (err == -ENOENT &&
657 ceph_snap(parent) == CEPH_NOSNAP &&
658 strcmp(dentry->d_name.name,
659 fsc->mount_options->snapdir_name) == 0) {
660 struct inode *inode = ceph_get_snapdir(parent);
661 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
662 dentry, dentry, inode);
663 BUG_ON(!d_unhashed(dentry));
664 d_add(dentry, inode);
665 err = 0;
666 }
667 return err;
668 }
669
670 /*
671 * Figure out final result of a lookup/open request.
672 *
673 * Mainly, make sure we return the final req->r_dentry (if it already
674 * existed) in place of the original VFS-provided dentry when they
675 * differ.
676 *
677 * Gracefully handle the case where the MDS replies with -ENOENT and
678 * no trace (which it may do, at its discretion, e.g., if it doesn't
679 * care to issue a lease on the negative dentry).
680 */
681 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
682 struct dentry *dentry, int err)
683 {
684 if (err == -ENOENT) {
685 /* no trace? */
686 err = 0;
687 if (!req->r_reply_info.head->is_dentry) {
688 dout("ENOENT and no trace, dentry %p inode %p\n",
689 dentry, d_inode(dentry));
690 if (d_really_is_positive(dentry)) {
691 d_drop(dentry);
692 err = -ENOENT;
693 } else {
694 d_add(dentry, NULL);
695 }
696 }
697 }
698 if (err)
699 dentry = ERR_PTR(err);
700 else if (dentry != req->r_dentry)
701 dentry = dget(req->r_dentry); /* we got spliced */
702 else
703 dentry = NULL;
704 return dentry;
705 }
706
707 static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
708 {
709 return ceph_ino(inode) == CEPH_INO_ROOT &&
710 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
711 }
712
713 /*
714 * Look up a single dir entry. If there is a lookup intent, inform
715 * the MDS so that it gets our 'caps wanted' value in a single op.
716 */
717 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
718 unsigned int flags)
719 {
720 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
721 struct ceph_mds_client *mdsc = fsc->mdsc;
722 struct ceph_mds_request *req;
723 int op;
724 int mask;
725 int err;
726
727 dout("lookup %p dentry %p '%pd'\n",
728 dir, dentry, dentry);
729
730 if (dentry->d_name.len > NAME_MAX)
731 return ERR_PTR(-ENAMETOOLONG);
732
733 /* can we conclude ENOENT locally? */
734 if (d_really_is_negative(dentry)) {
735 struct ceph_inode_info *ci = ceph_inode(dir);
736 struct ceph_dentry_info *di = ceph_dentry(dentry);
737
738 spin_lock(&ci->i_ceph_lock);
739 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
740 if (strncmp(dentry->d_name.name,
741 fsc->mount_options->snapdir_name,
742 dentry->d_name.len) &&
743 !is_root_ceph_dentry(dir, dentry) &&
744 ceph_test_mount_opt(fsc, DCACHE) &&
745 __ceph_dir_is_complete(ci) &&
746 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
747 spin_unlock(&ci->i_ceph_lock);
748 dout(" dir %p complete, -ENOENT\n", dir);
749 d_add(dentry, NULL);
750 di->lease_shared_gen = ci->i_shared_gen;
751 return NULL;
752 }
753 spin_unlock(&ci->i_ceph_lock);
754 }
755
756 op = ceph_snap(dir) == CEPH_SNAPDIR ?
757 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
758 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
759 if (IS_ERR(req))
760 return ERR_CAST(req);
761 req->r_dentry = dget(dentry);
762 req->r_num_caps = 2;
763
764 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
765 if (ceph_security_xattr_wanted(dir))
766 mask |= CEPH_CAP_XATTR_SHARED;
767 req->r_args.getattr.mask = cpu_to_le32(mask);
768
769 req->r_parent = dir;
770 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
771 err = ceph_mdsc_do_request(mdsc, NULL, req);
772 err = ceph_handle_snapdir(req, dentry, err);
773 dentry = ceph_finish_lookup(req, dentry, err);
774 ceph_mdsc_put_request(req); /* will dput(dentry) */
775 dout("lookup result=%p\n", dentry);
776 return dentry;
777 }
778
779 /*
780 * If we do a create but get no trace back from the MDS, follow up with
781 * a lookup (the VFS expects us to link up the provided dentry).
782 */
783 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
784 {
785 struct dentry *result = ceph_lookup(dir, dentry, 0);
786
787 if (result && !IS_ERR(result)) {
788 /*
789 * We created the item, then did a lookup, and found
790 * it was already linked to another inode we already
791 * had in our cache (and thus got spliced). To not
792 * confuse VFS (especially when inode is a directory),
793 * we don't link our dentry to that inode, return an
794 * error instead.
795 *
796 * This event should be rare and it happens only when
797 * we talk to old MDS. Recent MDS does not send traceless
798 * reply for request that creates new inode.
799 */
800 d_drop(result);
801 return -ESTALE;
802 }
803 return PTR_ERR(result);
804 }
805
806 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
807 umode_t mode, dev_t rdev)
808 {
809 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
810 struct ceph_mds_client *mdsc = fsc->mdsc;
811 struct ceph_mds_request *req;
812 struct ceph_acls_info acls = {};
813 int err;
814
815 if (ceph_snap(dir) != CEPH_NOSNAP)
816 return -EROFS;
817
818 err = ceph_pre_init_acls(dir, &mode, &acls);
819 if (err < 0)
820 return err;
821
822 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
823 dir, dentry, mode, rdev);
824 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
825 if (IS_ERR(req)) {
826 err = PTR_ERR(req);
827 goto out;
828 }
829 req->r_dentry = dget(dentry);
830 req->r_num_caps = 2;
831 req->r_parent = dir;
832 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
833 req->r_args.mknod.mode = cpu_to_le32(mode);
834 req->r_args.mknod.rdev = cpu_to_le32(rdev);
835 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
836 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
837 if (acls.pagelist) {
838 req->r_pagelist = acls.pagelist;
839 acls.pagelist = NULL;
840 }
841 err = ceph_mdsc_do_request(mdsc, dir, req);
842 if (!err && !req->r_reply_info.head->is_dentry)
843 err = ceph_handle_notrace_create(dir, dentry);
844 ceph_mdsc_put_request(req);
845 out:
846 if (!err)
847 ceph_init_inode_acls(d_inode(dentry), &acls);
848 else
849 d_drop(dentry);
850 ceph_release_acls_info(&acls);
851 return err;
852 }
853
854 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
855 bool excl)
856 {
857 return ceph_mknod(dir, dentry, mode, 0);
858 }
859
860 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
861 const char *dest)
862 {
863 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
864 struct ceph_mds_client *mdsc = fsc->mdsc;
865 struct ceph_mds_request *req;
866 int err;
867
868 if (ceph_snap(dir) != CEPH_NOSNAP)
869 return -EROFS;
870
871 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
872 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
873 if (IS_ERR(req)) {
874 err = PTR_ERR(req);
875 goto out;
876 }
877 req->r_path2 = kstrdup(dest, GFP_KERNEL);
878 if (!req->r_path2) {
879 err = -ENOMEM;
880 ceph_mdsc_put_request(req);
881 goto out;
882 }
883 req->r_parent = dir;
884 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
885 req->r_dentry = dget(dentry);
886 req->r_num_caps = 2;
887 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
888 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
889 err = ceph_mdsc_do_request(mdsc, dir, req);
890 if (!err && !req->r_reply_info.head->is_dentry)
891 err = ceph_handle_notrace_create(dir, dentry);
892 ceph_mdsc_put_request(req);
893 out:
894 if (err)
895 d_drop(dentry);
896 return err;
897 }
898
899 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
900 {
901 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
902 struct ceph_mds_client *mdsc = fsc->mdsc;
903 struct ceph_mds_request *req;
904 struct ceph_acls_info acls = {};
905 int err = -EROFS;
906 int op;
907
908 if (ceph_snap(dir) == CEPH_SNAPDIR) {
909 /* mkdir .snap/foo is a MKSNAP */
910 op = CEPH_MDS_OP_MKSNAP;
911 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
912 dentry, dentry);
913 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
914 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
915 op = CEPH_MDS_OP_MKDIR;
916 } else {
917 goto out;
918 }
919
920 mode |= S_IFDIR;
921 err = ceph_pre_init_acls(dir, &mode, &acls);
922 if (err < 0)
923 goto out;
924
925 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
926 if (IS_ERR(req)) {
927 err = PTR_ERR(req);
928 goto out;
929 }
930
931 req->r_dentry = dget(dentry);
932 req->r_num_caps = 2;
933 req->r_parent = dir;
934 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
935 req->r_args.mkdir.mode = cpu_to_le32(mode);
936 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
937 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
938 if (acls.pagelist) {
939 req->r_pagelist = acls.pagelist;
940 acls.pagelist = NULL;
941 }
942 err = ceph_mdsc_do_request(mdsc, dir, req);
943 if (!err &&
944 !req->r_reply_info.head->is_target &&
945 !req->r_reply_info.head->is_dentry)
946 err = ceph_handle_notrace_create(dir, dentry);
947 ceph_mdsc_put_request(req);
948 out:
949 if (!err)
950 ceph_init_inode_acls(d_inode(dentry), &acls);
951 else
952 d_drop(dentry);
953 ceph_release_acls_info(&acls);
954 return err;
955 }
956
957 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
958 struct dentry *dentry)
959 {
960 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
961 struct ceph_mds_client *mdsc = fsc->mdsc;
962 struct ceph_mds_request *req;
963 int err;
964
965 if (ceph_snap(dir) != CEPH_NOSNAP)
966 return -EROFS;
967
968 dout("link in dir %p old_dentry %p dentry %p\n", dir,
969 old_dentry, dentry);
970 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
971 if (IS_ERR(req)) {
972 d_drop(dentry);
973 return PTR_ERR(req);
974 }
975 req->r_dentry = dget(dentry);
976 req->r_num_caps = 2;
977 req->r_old_dentry = dget(old_dentry);
978 req->r_parent = dir;
979 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
980 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
981 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
982 /* release LINK_SHARED on source inode (mds will lock it) */
983 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
984 err = ceph_mdsc_do_request(mdsc, dir, req);
985 if (err) {
986 d_drop(dentry);
987 } else if (!req->r_reply_info.head->is_dentry) {
988 ihold(d_inode(old_dentry));
989 d_instantiate(dentry, d_inode(old_dentry));
990 }
991 ceph_mdsc_put_request(req);
992 return err;
993 }
994
995 /*
996 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
997 * looks like the link count will hit 0, drop any other caps (other
998 * than PIN) we don't specifically want (due to the file still being
999 * open).
1000 */
1001 static int drop_caps_for_unlink(struct inode *inode)
1002 {
1003 struct ceph_inode_info *ci = ceph_inode(inode);
1004 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
1005
1006 spin_lock(&ci->i_ceph_lock);
1007 if (inode->i_nlink == 1) {
1008 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
1009 ci->i_ceph_flags |= CEPH_I_NODELAY;
1010 }
1011 spin_unlock(&ci->i_ceph_lock);
1012 return drop;
1013 }
1014
1015 /*
1016 * rmdir and unlink are differ only by the metadata op code
1017 */
1018 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
1019 {
1020 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
1021 struct ceph_mds_client *mdsc = fsc->mdsc;
1022 struct inode *inode = d_inode(dentry);
1023 struct ceph_mds_request *req;
1024 int err = -EROFS;
1025 int op;
1026
1027 if (ceph_snap(dir) == CEPH_SNAPDIR) {
1028 /* rmdir .snap/foo is RMSNAP */
1029 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
1030 op = CEPH_MDS_OP_RMSNAP;
1031 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
1032 dout("unlink/rmdir dir %p dn %p inode %p\n",
1033 dir, dentry, inode);
1034 op = d_is_dir(dentry) ?
1035 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
1036 } else
1037 goto out;
1038 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1039 if (IS_ERR(req)) {
1040 err = PTR_ERR(req);
1041 goto out;
1042 }
1043 req->r_dentry = dget(dentry);
1044 req->r_num_caps = 2;
1045 req->r_parent = dir;
1046 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1047 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1048 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1049 req->r_inode_drop = drop_caps_for_unlink(inode);
1050 err = ceph_mdsc_do_request(mdsc, dir, req);
1051 if (!err && !req->r_reply_info.head->is_dentry)
1052 d_delete(dentry);
1053 ceph_mdsc_put_request(req);
1054 out:
1055 return err;
1056 }
1057
1058 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
1059 struct inode *new_dir, struct dentry *new_dentry,
1060 unsigned int flags)
1061 {
1062 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
1063 struct ceph_mds_client *mdsc = fsc->mdsc;
1064 struct ceph_mds_request *req;
1065 int op = CEPH_MDS_OP_RENAME;
1066 int err;
1067
1068 if (flags)
1069 return -EINVAL;
1070
1071 if (ceph_snap(old_dir) != ceph_snap(new_dir))
1072 return -EXDEV;
1073 if (ceph_snap(old_dir) != CEPH_NOSNAP) {
1074 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
1075 op = CEPH_MDS_OP_RENAMESNAP;
1076 else
1077 return -EROFS;
1078 }
1079 dout("rename dir %p dentry %p to dir %p dentry %p\n",
1080 old_dir, old_dentry, new_dir, new_dentry);
1081 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1082 if (IS_ERR(req))
1083 return PTR_ERR(req);
1084 ihold(old_dir);
1085 req->r_dentry = dget(new_dentry);
1086 req->r_num_caps = 2;
1087 req->r_old_dentry = dget(old_dentry);
1088 req->r_old_dentry_dir = old_dir;
1089 req->r_parent = new_dir;
1090 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
1091 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
1092 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
1093 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1094 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1095 /* release LINK_RDCACHE on source inode (mds will lock it) */
1096 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
1097 if (d_really_is_positive(new_dentry))
1098 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
1099 err = ceph_mdsc_do_request(mdsc, old_dir, req);
1100 if (!err && !req->r_reply_info.head->is_dentry) {
1101 /*
1102 * Normally d_move() is done by fill_trace (called by
1103 * do_request, above). If there is no trace, we need
1104 * to do it here.
1105 */
1106
1107 /* d_move screws up sibling dentries' offsets */
1108 ceph_dir_clear_complete(old_dir);
1109 ceph_dir_clear_complete(new_dir);
1110
1111 d_move(old_dentry, new_dentry);
1112
1113 /* ensure target dentry is invalidated, despite
1114 rehashing bug in vfs_rename_dir */
1115 ceph_invalidate_dentry_lease(new_dentry);
1116 }
1117 ceph_mdsc_put_request(req);
1118 return err;
1119 }
1120
1121 /*
1122 * Ensure a dentry lease will no longer revalidate.
1123 */
1124 void ceph_invalidate_dentry_lease(struct dentry *dentry)
1125 {
1126 spin_lock(&dentry->d_lock);
1127 ceph_dentry(dentry)->time = jiffies;
1128 ceph_dentry(dentry)->lease_shared_gen = 0;
1129 spin_unlock(&dentry->d_lock);
1130 }
1131
1132 /*
1133 * Check if dentry lease is valid. If not, delete the lease. Try to
1134 * renew if the least is more than half up.
1135 */
1136 static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
1137 struct inode *dir)
1138 {
1139 struct ceph_dentry_info *di;
1140 struct ceph_mds_session *s;
1141 int valid = 0;
1142 u32 gen;
1143 unsigned long ttl;
1144 struct ceph_mds_session *session = NULL;
1145 u32 seq = 0;
1146
1147 spin_lock(&dentry->d_lock);
1148 di = ceph_dentry(dentry);
1149 if (di && di->lease_session) {
1150 s = di->lease_session;
1151 spin_lock(&s->s_gen_ttl_lock);
1152 gen = s->s_cap_gen;
1153 ttl = s->s_cap_ttl;
1154 spin_unlock(&s->s_gen_ttl_lock);
1155
1156 if (di->lease_gen == gen &&
1157 time_before(jiffies, di->time) &&
1158 time_before(jiffies, ttl)) {
1159 valid = 1;
1160 if (di->lease_renew_after &&
1161 time_after(jiffies, di->lease_renew_after)) {
1162 /*
1163 * We should renew. If we're in RCU walk mode
1164 * though, we can't do that so just return
1165 * -ECHILD.
1166 */
1167 if (flags & LOOKUP_RCU) {
1168 valid = -ECHILD;
1169 } else {
1170 session = ceph_get_mds_session(s);
1171 seq = di->lease_seq;
1172 di->lease_renew_after = 0;
1173 di->lease_renew_from = jiffies;
1174 }
1175 }
1176 }
1177 }
1178 spin_unlock(&dentry->d_lock);
1179
1180 if (session) {
1181 ceph_mdsc_lease_send_msg(session, dir, dentry,
1182 CEPH_MDS_LEASE_RENEW, seq);
1183 ceph_put_mds_session(session);
1184 }
1185 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1186 return valid;
1187 }
1188
1189 /*
1190 * Check if directory-wide content lease/cap is valid.
1191 */
1192 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1193 {
1194 struct ceph_inode_info *ci = ceph_inode(dir);
1195 struct ceph_dentry_info *di = ceph_dentry(dentry);
1196 int valid = 0;
1197
1198 spin_lock(&ci->i_ceph_lock);
1199 if (ci->i_shared_gen == di->lease_shared_gen)
1200 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1201 spin_unlock(&ci->i_ceph_lock);
1202 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1203 dir, (unsigned)ci->i_shared_gen, dentry,
1204 (unsigned)di->lease_shared_gen, valid);
1205 return valid;
1206 }
1207
1208 /*
1209 * Check if cached dentry can be trusted.
1210 */
1211 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1212 {
1213 int valid = 0;
1214 struct dentry *parent;
1215 struct inode *dir;
1216
1217 if (flags & LOOKUP_RCU) {
1218 parent = READ_ONCE(dentry->d_parent);
1219 dir = d_inode_rcu(parent);
1220 if (!dir)
1221 return -ECHILD;
1222 } else {
1223 parent = dget_parent(dentry);
1224 dir = d_inode(parent);
1225 }
1226
1227 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1228 dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
1229
1230 /* always trust cached snapped dentries, snapdir dentry */
1231 if (ceph_snap(dir) != CEPH_NOSNAP) {
1232 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1233 dentry, d_inode(dentry));
1234 valid = 1;
1235 } else if (d_really_is_positive(dentry) &&
1236 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
1237 valid = 1;
1238 } else {
1239 valid = dentry_lease_is_valid(dentry, flags, dir);
1240 if (valid == -ECHILD)
1241 return valid;
1242 if (valid || dir_lease_is_valid(dir, dentry)) {
1243 if (d_really_is_positive(dentry))
1244 valid = ceph_is_any_caps(d_inode(dentry));
1245 else
1246 valid = 1;
1247 }
1248 }
1249
1250 if (!valid) {
1251 struct ceph_mds_client *mdsc =
1252 ceph_sb_to_client(dir->i_sb)->mdsc;
1253 struct ceph_mds_request *req;
1254 int op, err;
1255 u32 mask;
1256
1257 if (flags & LOOKUP_RCU)
1258 return -ECHILD;
1259
1260 op = ceph_snap(dir) == CEPH_SNAPDIR ?
1261 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
1262 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
1263 if (!IS_ERR(req)) {
1264 req->r_dentry = dget(dentry);
1265 req->r_num_caps = 2;
1266 req->r_parent = dir;
1267
1268 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1269 if (ceph_security_xattr_wanted(dir))
1270 mask |= CEPH_CAP_XATTR_SHARED;
1271 req->r_args.getattr.mask = cpu_to_le32(mask);
1272
1273 err = ceph_mdsc_do_request(mdsc, NULL, req);
1274 switch (err) {
1275 case 0:
1276 if (d_really_is_positive(dentry) &&
1277 d_inode(dentry) == req->r_target_inode)
1278 valid = 1;
1279 break;
1280 case -ENOENT:
1281 if (d_really_is_negative(dentry))
1282 valid = 1;
1283 /* Fallthrough */
1284 default:
1285 break;
1286 }
1287 ceph_mdsc_put_request(req);
1288 dout("d_revalidate %p lookup result=%d\n",
1289 dentry, err);
1290 }
1291 }
1292
1293 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1294 if (valid) {
1295 ceph_dentry_lru_touch(dentry);
1296 } else {
1297 ceph_dir_clear_complete(dir);
1298 }
1299
1300 if (!(flags & LOOKUP_RCU))
1301 dput(parent);
1302 return valid;
1303 }
1304
1305 /*
1306 * Release our ceph_dentry_info.
1307 */
1308 static void ceph_d_release(struct dentry *dentry)
1309 {
1310 struct ceph_dentry_info *di = ceph_dentry(dentry);
1311
1312 dout("d_release %p\n", dentry);
1313 ceph_dentry_lru_del(dentry);
1314
1315 spin_lock(&dentry->d_lock);
1316 dentry->d_fsdata = NULL;
1317 spin_unlock(&dentry->d_lock);
1318
1319 if (di->lease_session)
1320 ceph_put_mds_session(di->lease_session);
1321 kmem_cache_free(ceph_dentry_cachep, di);
1322 }
1323
1324 /*
1325 * When the VFS prunes a dentry from the cache, we need to clear the
1326 * complete flag on the parent directory.
1327 *
1328 * Called under dentry->d_lock.
1329 */
1330 static void ceph_d_prune(struct dentry *dentry)
1331 {
1332 dout("ceph_d_prune %p\n", dentry);
1333
1334 /* do we have a valid parent? */
1335 if (IS_ROOT(dentry))
1336 return;
1337
1338 /* if we are not hashed, we don't affect dir's completeness */
1339 if (d_unhashed(dentry))
1340 return;
1341
1342 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
1343 return;
1344
1345 /*
1346 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1347 * cleared until d_release
1348 */
1349 ceph_dir_clear_complete(d_inode(dentry->d_parent));
1350 }
1351
1352 /*
1353 * read() on a dir. This weird interface hack only works if mounted
1354 * with '-o dirstat'.
1355 */
1356 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1357 loff_t *ppos)
1358 {
1359 struct ceph_file_info *cf = file->private_data;
1360 struct inode *inode = file_inode(file);
1361 struct ceph_inode_info *ci = ceph_inode(inode);
1362 int left;
1363 const int bufsize = 1024;
1364
1365 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1366 return -EISDIR;
1367
1368 if (!cf->dir_info) {
1369 cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
1370 if (!cf->dir_info)
1371 return -ENOMEM;
1372 cf->dir_info_len =
1373 snprintf(cf->dir_info, bufsize,
1374 "entries: %20lld\n"
1375 " files: %20lld\n"
1376 " subdirs: %20lld\n"
1377 "rentries: %20lld\n"
1378 " rfiles: %20lld\n"
1379 " rsubdirs: %20lld\n"
1380 "rbytes: %20lld\n"
1381 "rctime: %10ld.%09ld\n",
1382 ci->i_files + ci->i_subdirs,
1383 ci->i_files,
1384 ci->i_subdirs,
1385 ci->i_rfiles + ci->i_rsubdirs,
1386 ci->i_rfiles,
1387 ci->i_rsubdirs,
1388 ci->i_rbytes,
1389 (long)ci->i_rctime.tv_sec,
1390 (long)ci->i_rctime.tv_nsec);
1391 }
1392
1393 if (*ppos >= cf->dir_info_len)
1394 return 0;
1395 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1396 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1397 if (left == size)
1398 return -EFAULT;
1399 *ppos += (size - left);
1400 return size - left;
1401 }
1402
1403 /*
1404 * We maintain a private dentry LRU.
1405 *
1406 * FIXME: this needs to be changed to a per-mds lru to be useful.
1407 */
1408 void ceph_dentry_lru_add(struct dentry *dn)
1409 {
1410 struct ceph_dentry_info *di = ceph_dentry(dn);
1411 struct ceph_mds_client *mdsc;
1412
1413 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
1414 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1415 spin_lock(&mdsc->dentry_lru_lock);
1416 list_add_tail(&di->lru, &mdsc->dentry_lru);
1417 mdsc->num_dentry++;
1418 spin_unlock(&mdsc->dentry_lru_lock);
1419 }
1420
1421 void ceph_dentry_lru_touch(struct dentry *dn)
1422 {
1423 struct ceph_dentry_info *di = ceph_dentry(dn);
1424 struct ceph_mds_client *mdsc;
1425
1426 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1427 di->offset);
1428 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1429 spin_lock(&mdsc->dentry_lru_lock);
1430 list_move_tail(&di->lru, &mdsc->dentry_lru);
1431 spin_unlock(&mdsc->dentry_lru_lock);
1432 }
1433
1434 void ceph_dentry_lru_del(struct dentry *dn)
1435 {
1436 struct ceph_dentry_info *di = ceph_dentry(dn);
1437 struct ceph_mds_client *mdsc;
1438
1439 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
1440 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1441 spin_lock(&mdsc->dentry_lru_lock);
1442 list_del_init(&di->lru);
1443 mdsc->num_dentry--;
1444 spin_unlock(&mdsc->dentry_lru_lock);
1445 }
1446
1447 /*
1448 * Return name hash for a given dentry. This is dependent on
1449 * the parent directory's hash function.
1450 */
1451 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1452 {
1453 struct ceph_inode_info *dci = ceph_inode(dir);
1454
1455 switch (dci->i_dir_layout.dl_dir_hash) {
1456 case 0: /* for backward compat */
1457 case CEPH_STR_HASH_LINUX:
1458 return dn->d_name.hash;
1459
1460 default:
1461 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1462 dn->d_name.name, dn->d_name.len);
1463 }
1464 }
1465
1466 const struct file_operations ceph_dir_fops = {
1467 .read = ceph_read_dir,
1468 .iterate = ceph_readdir,
1469 .llseek = ceph_dir_llseek,
1470 .open = ceph_open,
1471 .release = ceph_release,
1472 .unlocked_ioctl = ceph_ioctl,
1473 .fsync = ceph_fsync,
1474 };
1475
1476 const struct file_operations ceph_snapdir_fops = {
1477 .iterate = ceph_readdir,
1478 .llseek = ceph_dir_llseek,
1479 .open = ceph_open,
1480 .release = ceph_release,
1481 };
1482
1483 const struct inode_operations ceph_dir_iops = {
1484 .lookup = ceph_lookup,
1485 .permission = ceph_permission,
1486 .getattr = ceph_getattr,
1487 .setattr = ceph_setattr,
1488 .listxattr = ceph_listxattr,
1489 .get_acl = ceph_get_acl,
1490 .set_acl = ceph_set_acl,
1491 .mknod = ceph_mknod,
1492 .symlink = ceph_symlink,
1493 .mkdir = ceph_mkdir,
1494 .link = ceph_link,
1495 .unlink = ceph_unlink,
1496 .rmdir = ceph_unlink,
1497 .rename = ceph_rename,
1498 .create = ceph_create,
1499 .atomic_open = ceph_atomic_open,
1500 };
1501
1502 const struct inode_operations ceph_snapdir_iops = {
1503 .lookup = ceph_lookup,
1504 .permission = ceph_permission,
1505 .getattr = ceph_getattr,
1506 .mkdir = ceph_mkdir,
1507 .rmdir = ceph_unlink,
1508 .rename = ceph_rename,
1509 };
1510
1511 const struct dentry_operations ceph_dentry_ops = {
1512 .d_revalidate = ceph_d_revalidate,
1513 .d_release = ceph_d_release,
1514 .d_prune = ceph_d_prune,
1515 .d_init = ceph_d_init,
1516 };