]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ceph/dir.c
ceph: don't forbid marking directory complete after forward seek
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8
9 #include "super.h"
10 #include "mds_client.h"
11
12 /*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17 /*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29 const struct dentry_operations ceph_dentry_ops;
30
31 /*
32 * Initialize ceph dentry state.
33 */
34 int ceph_init_dentry(struct dentry *dentry)
35 {
36 struct ceph_dentry_info *di;
37
38 if (dentry->d_fsdata)
39 return 0;
40
41 di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
42 if (!di)
43 return -ENOMEM; /* oh well */
44
45 spin_lock(&dentry->d_lock);
46 if (dentry->d_fsdata) {
47 /* lost a race */
48 kmem_cache_free(ceph_dentry_cachep, di);
49 goto out_unlock;
50 }
51
52 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
53 d_set_d_op(dentry, &ceph_dentry_ops);
54 else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
55 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
56 else
57 d_set_d_op(dentry, &ceph_snap_dentry_ops);
58
59 di->dentry = dentry;
60 di->lease_session = NULL;
61 dentry->d_time = jiffies;
62 /* avoid reordering d_fsdata setup so that the check above is safe */
63 smp_mb();
64 dentry->d_fsdata = di;
65 ceph_dentry_lru_add(dentry);
66 out_unlock:
67 spin_unlock(&dentry->d_lock);
68 return 0;
69 }
70
71 /*
72 * for readdir, we encode the directory frag and offset within that
73 * frag into f_pos.
74 */
75 static unsigned fpos_frag(loff_t p)
76 {
77 return p >> 32;
78 }
79 static unsigned fpos_off(loff_t p)
80 {
81 return p & 0xffffffff;
82 }
83
84 static int fpos_cmp(loff_t l, loff_t r)
85 {
86 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
87 if (v)
88 return v;
89 return (int)(fpos_off(l) - fpos_off(r));
90 }
91
92 /*
93 * make note of the last dentry we read, so we can
94 * continue at the same lexicographical point,
95 * regardless of what dir changes take place on the
96 * server.
97 */
98 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
99 int len, unsigned next_offset)
100 {
101 char *buf = kmalloc(len+1, GFP_KERNEL);
102 if (!buf)
103 return -ENOMEM;
104 kfree(fi->last_name);
105 fi->last_name = buf;
106 memcpy(fi->last_name, name, len);
107 fi->last_name[len] = 0;
108 fi->next_offset = next_offset;
109 dout("note_last_dentry '%s'\n", fi->last_name);
110 return 0;
111 }
112
113
114 static struct dentry *
115 __dcache_find_get_entry(struct dentry *parent, u64 idx,
116 struct ceph_readdir_cache_control *cache_ctl)
117 {
118 struct inode *dir = d_inode(parent);
119 struct dentry *dentry;
120 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
121 loff_t ptr_pos = idx * sizeof(struct dentry *);
122 pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
123
124 if (ptr_pos >= i_size_read(dir))
125 return NULL;
126
127 if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
128 ceph_readdir_cache_release(cache_ctl);
129 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
130 if (!cache_ctl->page) {
131 dout(" page %lu not found\n", ptr_pgoff);
132 return ERR_PTR(-EAGAIN);
133 }
134 /* reading/filling the cache are serialized by
135 i_mutex, no need to use page lock */
136 unlock_page(cache_ctl->page);
137 cache_ctl->dentries = kmap(cache_ctl->page);
138 }
139
140 cache_ctl->index = idx & idx_mask;
141
142 rcu_read_lock();
143 spin_lock(&parent->d_lock);
144 /* check i_size again here, because empty directory can be
145 * marked as complete while not holding the i_mutex. */
146 if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
147 dentry = cache_ctl->dentries[cache_ctl->index];
148 else
149 dentry = NULL;
150 spin_unlock(&parent->d_lock);
151 if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
152 dentry = NULL;
153 rcu_read_unlock();
154 return dentry ? : ERR_PTR(-EAGAIN);
155 }
156
157 /*
158 * When possible, we try to satisfy a readdir by peeking at the
159 * dcache. We make this work by carefully ordering dentries on
160 * d_child when we initially get results back from the MDS, and
161 * falling back to a "normal" sync readdir if any dentries in the dir
162 * are dropped.
163 *
164 * Complete dir indicates that we have all dentries in the dir. It is
165 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
166 * the MDS if/when the directory is modified).
167 */
168 static int __dcache_readdir(struct file *file, struct dir_context *ctx,
169 u32 shared_gen)
170 {
171 struct ceph_file_info *fi = file->private_data;
172 struct dentry *parent = file->f_path.dentry;
173 struct inode *dir = d_inode(parent);
174 struct dentry *dentry, *last = NULL;
175 struct ceph_dentry_info *di;
176 struct ceph_readdir_cache_control cache_ctl = {};
177 u64 idx = 0;
178 int err = 0;
179
180 dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos);
181
182 /* search start position */
183 if (ctx->pos > 2) {
184 u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
185 while (count > 0) {
186 u64 step = count >> 1;
187 dentry = __dcache_find_get_entry(parent, idx + step,
188 &cache_ctl);
189 if (!dentry) {
190 /* use linar search */
191 idx = 0;
192 break;
193 }
194 if (IS_ERR(dentry)) {
195 err = PTR_ERR(dentry);
196 goto out;
197 }
198 di = ceph_dentry(dentry);
199 spin_lock(&dentry->d_lock);
200 if (fpos_cmp(di->offset, ctx->pos) < 0) {
201 idx += step + 1;
202 count -= step + 1;
203 } else {
204 count = step;
205 }
206 spin_unlock(&dentry->d_lock);
207 dput(dentry);
208 }
209
210 dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
211 }
212
213
214 for (;;) {
215 bool emit_dentry = false;
216 dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
217 if (!dentry) {
218 fi->flags |= CEPH_F_ATEND;
219 err = 0;
220 break;
221 }
222 if (IS_ERR(dentry)) {
223 err = PTR_ERR(dentry);
224 goto out;
225 }
226
227 di = ceph_dentry(dentry);
228 spin_lock(&dentry->d_lock);
229 if (di->lease_shared_gen == shared_gen &&
230 d_really_is_positive(dentry) &&
231 fpos_cmp(ctx->pos, di->offset) <= 0) {
232 emit_dentry = true;
233 }
234 spin_unlock(&dentry->d_lock);
235
236 if (emit_dentry) {
237 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
238 dentry, dentry, d_inode(dentry));
239 ctx->pos = di->offset;
240 if (!dir_emit(ctx, dentry->d_name.name,
241 dentry->d_name.len,
242 ceph_translate_ino(dentry->d_sb,
243 d_inode(dentry)->i_ino),
244 d_inode(dentry)->i_mode >> 12)) {
245 dput(dentry);
246 err = 0;
247 break;
248 }
249 ctx->pos++;
250
251 if (last)
252 dput(last);
253 last = dentry;
254 } else {
255 dput(dentry);
256 }
257 }
258 out:
259 ceph_readdir_cache_release(&cache_ctl);
260 if (last) {
261 int ret;
262 di = ceph_dentry(last);
263 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len,
264 fpos_off(di->offset) + 1);
265 if (ret < 0)
266 err = ret;
267 dput(last);
268 }
269 return err;
270 }
271
272 static int ceph_readdir(struct file *file, struct dir_context *ctx)
273 {
274 struct ceph_file_info *fi = file->private_data;
275 struct inode *inode = file_inode(file);
276 struct ceph_inode_info *ci = ceph_inode(inode);
277 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
278 struct ceph_mds_client *mdsc = fsc->mdsc;
279 unsigned frag = fpos_frag(ctx->pos);
280 int i;
281 int err;
282 u32 ftype;
283 struct ceph_mds_reply_info_parsed *rinfo;
284
285 dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
286 if (fi->flags & CEPH_F_ATEND)
287 return 0;
288
289 /* always start with . and .. */
290 if (ctx->pos == 0) {
291 dout("readdir off 0 -> '.'\n");
292 if (!dir_emit(ctx, ".", 1,
293 ceph_translate_ino(inode->i_sb, inode->i_ino),
294 inode->i_mode >> 12))
295 return 0;
296 ctx->pos = 1;
297 }
298 if (ctx->pos == 1) {
299 ino_t ino = parent_ino(file->f_path.dentry);
300 dout("readdir off 1 -> '..'\n");
301 if (!dir_emit(ctx, "..", 2,
302 ceph_translate_ino(inode->i_sb, ino),
303 inode->i_mode >> 12))
304 return 0;
305 ctx->pos = 2;
306 }
307
308 /* can we use the dcache? */
309 spin_lock(&ci->i_ceph_lock);
310 if (ceph_test_mount_opt(fsc, DCACHE) &&
311 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
312 ceph_snap(inode) != CEPH_SNAPDIR &&
313 __ceph_dir_is_complete_ordered(ci) &&
314 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
315 u32 shared_gen = ci->i_shared_gen;
316 spin_unlock(&ci->i_ceph_lock);
317 err = __dcache_readdir(file, ctx, shared_gen);
318 if (err != -EAGAIN)
319 return err;
320 frag = fpos_frag(ctx->pos);
321 } else {
322 spin_unlock(&ci->i_ceph_lock);
323 }
324
325 /* proceed with a normal readdir */
326 more:
327 /* do we have the correct frag content buffered? */
328 if (fi->frag != frag || fi->last_readdir == NULL) {
329 struct ceph_mds_request *req;
330 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
331 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
332
333 /* discard old result, if any */
334 if (fi->last_readdir) {
335 ceph_mdsc_put_request(fi->last_readdir);
336 fi->last_readdir = NULL;
337 }
338
339 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
340 ceph_vinop(inode), frag, fi->last_name);
341 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
342 if (IS_ERR(req))
343 return PTR_ERR(req);
344 err = ceph_alloc_readdir_reply_buffer(req, inode);
345 if (err) {
346 ceph_mdsc_put_request(req);
347 return err;
348 }
349 /* hints to request -> mds selection code */
350 req->r_direct_mode = USE_AUTH_MDS;
351 req->r_direct_hash = ceph_frag_value(frag);
352 req->r_direct_is_hash = true;
353 if (fi->last_name) {
354 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
355 if (!req->r_path2) {
356 ceph_mdsc_put_request(req);
357 return -ENOMEM;
358 }
359 }
360 req->r_dir_release_cnt = fi->dir_release_count;
361 req->r_dir_ordered_cnt = fi->dir_ordered_count;
362 req->r_readdir_cache_idx = fi->readdir_cache_idx;
363 req->r_readdir_offset = fi->next_offset;
364 req->r_args.readdir.frag = cpu_to_le32(frag);
365 req->r_args.readdir.flags =
366 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
367
368 req->r_inode = inode;
369 ihold(inode);
370 req->r_dentry = dget(file->f_path.dentry);
371 err = ceph_mdsc_do_request(mdsc, NULL, req);
372 if (err < 0) {
373 ceph_mdsc_put_request(req);
374 return err;
375 }
376 dout("readdir got and parsed readdir result=%d"
377 " on frag %x, end=%d, complete=%d\n", err, frag,
378 (int)req->r_reply_info.dir_end,
379 (int)req->r_reply_info.dir_complete);
380
381
382 /* note next offset and last dentry name */
383 rinfo = &req->r_reply_info;
384 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
385 frag = le32_to_cpu(rinfo->dir_dir->frag);
386 fi->next_offset = req->r_readdir_offset;
387 /* adjust ctx->pos to beginning of frag */
388 ctx->pos = ceph_make_fpos(frag, fi->next_offset);
389 }
390
391 fi->frag = frag;
392 fi->last_readdir = req;
393
394 if (req->r_did_prepopulate) {
395 fi->readdir_cache_idx = req->r_readdir_cache_idx;
396 if (fi->readdir_cache_idx < 0) {
397 /* preclude from marking dir ordered */
398 fi->dir_ordered_count = 0;
399 } else if (ceph_frag_is_leftmost(frag) &&
400 fi->next_offset == 2) {
401 /* note dir version at start of readdir so
402 * we can tell if any dentries get dropped */
403 fi->dir_release_count = req->r_dir_release_cnt;
404 fi->dir_ordered_count = req->r_dir_ordered_cnt;
405 }
406 } else {
407 dout("readdir !did_prepopulate");
408 /* disable readdir cache */
409 fi->readdir_cache_idx = -1;
410 /* preclude from marking dir complete */
411 fi->dir_release_count = 0;
412 }
413
414 if (req->r_reply_info.dir_end) {
415 kfree(fi->last_name);
416 fi->last_name = NULL;
417 fi->next_offset = 2;
418 } else {
419 struct ceph_mds_reply_dir_entry *rde =
420 rinfo->dir_entries + (rinfo->dir_nr-1);
421 err = note_last_dentry(fi, rde->name, rde->name_len,
422 fpos_off(rde->offset) + 1);
423 if (err)
424 return err;
425 }
426 }
427
428 rinfo = &fi->last_readdir->r_reply_info;
429 dout("readdir frag %x num %d pos %llx chunk first %llx\n",
430 frag, rinfo->dir_nr, ctx->pos,
431 rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
432
433 i = 0;
434 /* search start position */
435 if (rinfo->dir_nr > 0) {
436 int step, nr = rinfo->dir_nr;
437 while (nr > 0) {
438 step = nr >> 1;
439 if (rinfo->dir_entries[i + step].offset < ctx->pos) {
440 i += step + 1;
441 nr -= step + 1;
442 } else {
443 nr = step;
444 }
445 }
446 }
447 for (; i < rinfo->dir_nr; i++) {
448 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
449 struct ceph_vino vino;
450 ino_t ino;
451
452 BUG_ON(rde->offset < ctx->pos);
453
454 ctx->pos = rde->offset;
455 dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
456 i, rinfo->dir_nr, ctx->pos,
457 rde->name_len, rde->name, &rde->inode.in);
458
459 BUG_ON(!rde->inode.in);
460 ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
461 vino.ino = le64_to_cpu(rde->inode.in->ino);
462 vino.snap = le64_to_cpu(rde->inode.in->snapid);
463 ino = ceph_vino_to_ino(vino);
464
465 if (!dir_emit(ctx, rde->name, rde->name_len,
466 ceph_translate_ino(inode->i_sb, ino), ftype)) {
467 dout("filldir stopping us...\n");
468 return 0;
469 }
470 ctx->pos++;
471 }
472
473 if (fi->last_name) {
474 ceph_mdsc_put_request(fi->last_readdir);
475 fi->last_readdir = NULL;
476 goto more;
477 }
478
479 /* more frags? */
480 if (!ceph_frag_is_rightmost(frag)) {
481 frag = ceph_frag_next(frag);
482 ctx->pos = ceph_make_fpos(frag, 2);
483 dout("readdir next frag is %x\n", frag);
484 goto more;
485 }
486 fi->flags |= CEPH_F_ATEND;
487
488 /*
489 * if dir_release_count still matches the dir, no dentries
490 * were released during the whole readdir, and we should have
491 * the complete dir contents in our cache.
492 */
493 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) {
494 spin_lock(&ci->i_ceph_lock);
495 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) {
496 dout(" marking %p complete and ordered\n", inode);
497 /* use i_size to track number of entries in
498 * readdir cache */
499 BUG_ON(fi->readdir_cache_idx < 0);
500 i_size_write(inode, fi->readdir_cache_idx *
501 sizeof(struct dentry*));
502 } else {
503 dout(" marking %p complete\n", inode);
504 }
505 __ceph_dir_set_complete(ci, fi->dir_release_count,
506 fi->dir_ordered_count);
507 spin_unlock(&ci->i_ceph_lock);
508 }
509
510 dout("readdir %p file %p done.\n", inode, file);
511 return 0;
512 }
513
514 static void reset_readdir(struct ceph_file_info *fi)
515 {
516 if (fi->last_readdir) {
517 ceph_mdsc_put_request(fi->last_readdir);
518 fi->last_readdir = NULL;
519 }
520 kfree(fi->last_name);
521 fi->last_name = NULL;
522 fi->dir_release_count = 0;
523 fi->readdir_cache_idx = -1;
524 fi->next_offset = 2; /* compensate for . and .. */
525 fi->flags &= ~CEPH_F_ATEND;
526 }
527
528 /*
529 * discard buffered readdir content on seekdir(0), or seek to new frag,
530 * or seek prior to current chunk
531 */
532 static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
533 {
534 struct ceph_mds_reply_info_parsed *rinfo;
535 if (new_pos == 0)
536 return true;
537 if (fpos_frag(new_pos) != fi->frag)
538 return true;
539 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
540 if (!rinfo || !rinfo->dir_nr)
541 return true;
542 return new_pos < rinfo->dir_entries[0].offset;;
543 }
544
545 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
546 {
547 struct ceph_file_info *fi = file->private_data;
548 struct inode *inode = file->f_mapping->host;
549 loff_t retval;
550
551 inode_lock(inode);
552 retval = -EINVAL;
553 switch (whence) {
554 case SEEK_CUR:
555 offset += file->f_pos;
556 case SEEK_SET:
557 break;
558 case SEEK_END:
559 retval = -EOPNOTSUPP;
560 default:
561 goto out;
562 }
563
564 if (offset >= 0) {
565 if (offset != file->f_pos) {
566 file->f_pos = offset;
567 file->f_version = 0;
568 fi->flags &= ~CEPH_F_ATEND;
569 }
570 retval = offset;
571
572 if (need_reset_readdir(fi, offset)) {
573 dout("dir_llseek dropping %p content\n", file);
574 reset_readdir(fi);
575 }
576 }
577 out:
578 inode_unlock(inode);
579 return retval;
580 }
581
582 /*
583 * Handle lookups for the hidden .snap directory.
584 */
585 int ceph_handle_snapdir(struct ceph_mds_request *req,
586 struct dentry *dentry, int err)
587 {
588 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
589 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */
590
591 /* .snap dir? */
592 if (err == -ENOENT &&
593 ceph_snap(parent) == CEPH_NOSNAP &&
594 strcmp(dentry->d_name.name,
595 fsc->mount_options->snapdir_name) == 0) {
596 struct inode *inode = ceph_get_snapdir(parent);
597 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
598 dentry, dentry, inode);
599 BUG_ON(!d_unhashed(dentry));
600 d_add(dentry, inode);
601 err = 0;
602 }
603 return err;
604 }
605
606 /*
607 * Figure out final result of a lookup/open request.
608 *
609 * Mainly, make sure we return the final req->r_dentry (if it already
610 * existed) in place of the original VFS-provided dentry when they
611 * differ.
612 *
613 * Gracefully handle the case where the MDS replies with -ENOENT and
614 * no trace (which it may do, at its discretion, e.g., if it doesn't
615 * care to issue a lease on the negative dentry).
616 */
617 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
618 struct dentry *dentry, int err)
619 {
620 if (err == -ENOENT) {
621 /* no trace? */
622 err = 0;
623 if (!req->r_reply_info.head->is_dentry) {
624 dout("ENOENT and no trace, dentry %p inode %p\n",
625 dentry, d_inode(dentry));
626 if (d_really_is_positive(dentry)) {
627 d_drop(dentry);
628 err = -ENOENT;
629 } else {
630 d_add(dentry, NULL);
631 }
632 }
633 }
634 if (err)
635 dentry = ERR_PTR(err);
636 else if (dentry != req->r_dentry)
637 dentry = dget(req->r_dentry); /* we got spliced */
638 else
639 dentry = NULL;
640 return dentry;
641 }
642
643 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
644 {
645 return ceph_ino(inode) == CEPH_INO_ROOT &&
646 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
647 }
648
649 /*
650 * Look up a single dir entry. If there is a lookup intent, inform
651 * the MDS so that it gets our 'caps wanted' value in a single op.
652 */
653 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
654 unsigned int flags)
655 {
656 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
657 struct ceph_mds_client *mdsc = fsc->mdsc;
658 struct ceph_mds_request *req;
659 int op;
660 int mask;
661 int err;
662
663 dout("lookup %p dentry %p '%pd'\n",
664 dir, dentry, dentry);
665
666 if (dentry->d_name.len > NAME_MAX)
667 return ERR_PTR(-ENAMETOOLONG);
668
669 err = ceph_init_dentry(dentry);
670 if (err < 0)
671 return ERR_PTR(err);
672
673 /* can we conclude ENOENT locally? */
674 if (d_really_is_negative(dentry)) {
675 struct ceph_inode_info *ci = ceph_inode(dir);
676 struct ceph_dentry_info *di = ceph_dentry(dentry);
677
678 spin_lock(&ci->i_ceph_lock);
679 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
680 if (strncmp(dentry->d_name.name,
681 fsc->mount_options->snapdir_name,
682 dentry->d_name.len) &&
683 !is_root_ceph_dentry(dir, dentry) &&
684 ceph_test_mount_opt(fsc, DCACHE) &&
685 __ceph_dir_is_complete(ci) &&
686 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
687 spin_unlock(&ci->i_ceph_lock);
688 dout(" dir %p complete, -ENOENT\n", dir);
689 d_add(dentry, NULL);
690 di->lease_shared_gen = ci->i_shared_gen;
691 return NULL;
692 }
693 spin_unlock(&ci->i_ceph_lock);
694 }
695
696 op = ceph_snap(dir) == CEPH_SNAPDIR ?
697 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
698 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
699 if (IS_ERR(req))
700 return ERR_CAST(req);
701 req->r_dentry = dget(dentry);
702 req->r_num_caps = 2;
703
704 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
705 if (ceph_security_xattr_wanted(dir))
706 mask |= CEPH_CAP_XATTR_SHARED;
707 req->r_args.getattr.mask = cpu_to_le32(mask);
708
709 req->r_locked_dir = dir;
710 err = ceph_mdsc_do_request(mdsc, NULL, req);
711 err = ceph_handle_snapdir(req, dentry, err);
712 dentry = ceph_finish_lookup(req, dentry, err);
713 ceph_mdsc_put_request(req); /* will dput(dentry) */
714 dout("lookup result=%p\n", dentry);
715 return dentry;
716 }
717
718 /*
719 * If we do a create but get no trace back from the MDS, follow up with
720 * a lookup (the VFS expects us to link up the provided dentry).
721 */
722 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
723 {
724 struct dentry *result = ceph_lookup(dir, dentry, 0);
725
726 if (result && !IS_ERR(result)) {
727 /*
728 * We created the item, then did a lookup, and found
729 * it was already linked to another inode we already
730 * had in our cache (and thus got spliced). To not
731 * confuse VFS (especially when inode is a directory),
732 * we don't link our dentry to that inode, return an
733 * error instead.
734 *
735 * This event should be rare and it happens only when
736 * we talk to old MDS. Recent MDS does not send traceless
737 * reply for request that creates new inode.
738 */
739 d_drop(result);
740 return -ESTALE;
741 }
742 return PTR_ERR(result);
743 }
744
745 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
746 umode_t mode, dev_t rdev)
747 {
748 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
749 struct ceph_mds_client *mdsc = fsc->mdsc;
750 struct ceph_mds_request *req;
751 struct ceph_acls_info acls = {};
752 int err;
753
754 if (ceph_snap(dir) != CEPH_NOSNAP)
755 return -EROFS;
756
757 err = ceph_pre_init_acls(dir, &mode, &acls);
758 if (err < 0)
759 return err;
760
761 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
762 dir, dentry, mode, rdev);
763 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
764 if (IS_ERR(req)) {
765 err = PTR_ERR(req);
766 goto out;
767 }
768 req->r_dentry = dget(dentry);
769 req->r_num_caps = 2;
770 req->r_locked_dir = dir;
771 req->r_args.mknod.mode = cpu_to_le32(mode);
772 req->r_args.mknod.rdev = cpu_to_le32(rdev);
773 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
774 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
775 if (acls.pagelist) {
776 req->r_pagelist = acls.pagelist;
777 acls.pagelist = NULL;
778 }
779 err = ceph_mdsc_do_request(mdsc, dir, req);
780 if (!err && !req->r_reply_info.head->is_dentry)
781 err = ceph_handle_notrace_create(dir, dentry);
782 ceph_mdsc_put_request(req);
783 out:
784 if (!err)
785 ceph_init_inode_acls(d_inode(dentry), &acls);
786 else
787 d_drop(dentry);
788 ceph_release_acls_info(&acls);
789 return err;
790 }
791
792 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
793 bool excl)
794 {
795 return ceph_mknod(dir, dentry, mode, 0);
796 }
797
798 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
799 const char *dest)
800 {
801 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
802 struct ceph_mds_client *mdsc = fsc->mdsc;
803 struct ceph_mds_request *req;
804 int err;
805
806 if (ceph_snap(dir) != CEPH_NOSNAP)
807 return -EROFS;
808
809 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
810 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
811 if (IS_ERR(req)) {
812 err = PTR_ERR(req);
813 goto out;
814 }
815 req->r_path2 = kstrdup(dest, GFP_KERNEL);
816 if (!req->r_path2) {
817 err = -ENOMEM;
818 ceph_mdsc_put_request(req);
819 goto out;
820 }
821 req->r_locked_dir = dir;
822 req->r_dentry = dget(dentry);
823 req->r_num_caps = 2;
824 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
825 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
826 err = ceph_mdsc_do_request(mdsc, dir, req);
827 if (!err && !req->r_reply_info.head->is_dentry)
828 err = ceph_handle_notrace_create(dir, dentry);
829 ceph_mdsc_put_request(req);
830 out:
831 if (err)
832 d_drop(dentry);
833 return err;
834 }
835
836 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
837 {
838 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
839 struct ceph_mds_client *mdsc = fsc->mdsc;
840 struct ceph_mds_request *req;
841 struct ceph_acls_info acls = {};
842 int err = -EROFS;
843 int op;
844
845 if (ceph_snap(dir) == CEPH_SNAPDIR) {
846 /* mkdir .snap/foo is a MKSNAP */
847 op = CEPH_MDS_OP_MKSNAP;
848 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
849 dentry, dentry);
850 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
851 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
852 op = CEPH_MDS_OP_MKDIR;
853 } else {
854 goto out;
855 }
856
857 mode |= S_IFDIR;
858 err = ceph_pre_init_acls(dir, &mode, &acls);
859 if (err < 0)
860 goto out;
861
862 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
863 if (IS_ERR(req)) {
864 err = PTR_ERR(req);
865 goto out;
866 }
867
868 req->r_dentry = dget(dentry);
869 req->r_num_caps = 2;
870 req->r_locked_dir = dir;
871 req->r_args.mkdir.mode = cpu_to_le32(mode);
872 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
873 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
874 if (acls.pagelist) {
875 req->r_pagelist = acls.pagelist;
876 acls.pagelist = NULL;
877 }
878 err = ceph_mdsc_do_request(mdsc, dir, req);
879 if (!err &&
880 !req->r_reply_info.head->is_target &&
881 !req->r_reply_info.head->is_dentry)
882 err = ceph_handle_notrace_create(dir, dentry);
883 ceph_mdsc_put_request(req);
884 out:
885 if (!err)
886 ceph_init_inode_acls(d_inode(dentry), &acls);
887 else
888 d_drop(dentry);
889 ceph_release_acls_info(&acls);
890 return err;
891 }
892
893 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
894 struct dentry *dentry)
895 {
896 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
897 struct ceph_mds_client *mdsc = fsc->mdsc;
898 struct ceph_mds_request *req;
899 int err;
900
901 if (ceph_snap(dir) != CEPH_NOSNAP)
902 return -EROFS;
903
904 dout("link in dir %p old_dentry %p dentry %p\n", dir,
905 old_dentry, dentry);
906 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
907 if (IS_ERR(req)) {
908 d_drop(dentry);
909 return PTR_ERR(req);
910 }
911 req->r_dentry = dget(dentry);
912 req->r_num_caps = 2;
913 req->r_old_dentry = dget(old_dentry);
914 req->r_locked_dir = dir;
915 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
916 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
917 /* release LINK_SHARED on source inode (mds will lock it) */
918 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
919 err = ceph_mdsc_do_request(mdsc, dir, req);
920 if (err) {
921 d_drop(dentry);
922 } else if (!req->r_reply_info.head->is_dentry) {
923 ihold(d_inode(old_dentry));
924 d_instantiate(dentry, d_inode(old_dentry));
925 }
926 ceph_mdsc_put_request(req);
927 return err;
928 }
929
930 /*
931 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
932 * looks like the link count will hit 0, drop any other caps (other
933 * than PIN) we don't specifically want (due to the file still being
934 * open).
935 */
936 static int drop_caps_for_unlink(struct inode *inode)
937 {
938 struct ceph_inode_info *ci = ceph_inode(inode);
939 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
940
941 spin_lock(&ci->i_ceph_lock);
942 if (inode->i_nlink == 1) {
943 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
944 ci->i_ceph_flags |= CEPH_I_NODELAY;
945 }
946 spin_unlock(&ci->i_ceph_lock);
947 return drop;
948 }
949
950 /*
951 * rmdir and unlink are differ only by the metadata op code
952 */
953 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
954 {
955 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
956 struct ceph_mds_client *mdsc = fsc->mdsc;
957 struct inode *inode = d_inode(dentry);
958 struct ceph_mds_request *req;
959 int err = -EROFS;
960 int op;
961
962 if (ceph_snap(dir) == CEPH_SNAPDIR) {
963 /* rmdir .snap/foo is RMSNAP */
964 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
965 op = CEPH_MDS_OP_RMSNAP;
966 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
967 dout("unlink/rmdir dir %p dn %p inode %p\n",
968 dir, dentry, inode);
969 op = d_is_dir(dentry) ?
970 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
971 } else
972 goto out;
973 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
974 if (IS_ERR(req)) {
975 err = PTR_ERR(req);
976 goto out;
977 }
978 req->r_dentry = dget(dentry);
979 req->r_num_caps = 2;
980 req->r_locked_dir = dir;
981 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
982 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
983 req->r_inode_drop = drop_caps_for_unlink(inode);
984 err = ceph_mdsc_do_request(mdsc, dir, req);
985 if (!err && !req->r_reply_info.head->is_dentry)
986 d_delete(dentry);
987 ceph_mdsc_put_request(req);
988 out:
989 return err;
990 }
991
992 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
993 struct inode *new_dir, struct dentry *new_dentry)
994 {
995 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
996 struct ceph_mds_client *mdsc = fsc->mdsc;
997 struct ceph_mds_request *req;
998 int op = CEPH_MDS_OP_RENAME;
999 int err;
1000
1001 if (ceph_snap(old_dir) != ceph_snap(new_dir))
1002 return -EXDEV;
1003 if (ceph_snap(old_dir) != CEPH_NOSNAP) {
1004 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR)
1005 op = CEPH_MDS_OP_RENAMESNAP;
1006 else
1007 return -EROFS;
1008 }
1009 dout("rename dir %p dentry %p to dir %p dentry %p\n",
1010 old_dir, old_dentry, new_dir, new_dentry);
1011 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
1012 if (IS_ERR(req))
1013 return PTR_ERR(req);
1014 ihold(old_dir);
1015 req->r_dentry = dget(new_dentry);
1016 req->r_num_caps = 2;
1017 req->r_old_dentry = dget(old_dentry);
1018 req->r_old_dentry_dir = old_dir;
1019 req->r_locked_dir = new_dir;
1020 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
1021 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
1022 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
1023 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
1024 /* release LINK_RDCACHE on source inode (mds will lock it) */
1025 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
1026 if (d_really_is_positive(new_dentry))
1027 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
1028 err = ceph_mdsc_do_request(mdsc, old_dir, req);
1029 if (!err && !req->r_reply_info.head->is_dentry) {
1030 /*
1031 * Normally d_move() is done by fill_trace (called by
1032 * do_request, above). If there is no trace, we need
1033 * to do it here.
1034 */
1035
1036 /* d_move screws up sibling dentries' offsets */
1037 ceph_dir_clear_complete(old_dir);
1038 ceph_dir_clear_complete(new_dir);
1039
1040 d_move(old_dentry, new_dentry);
1041
1042 /* ensure target dentry is invalidated, despite
1043 rehashing bug in vfs_rename_dir */
1044 ceph_invalidate_dentry_lease(new_dentry);
1045 }
1046 ceph_mdsc_put_request(req);
1047 return err;
1048 }
1049
1050 /*
1051 * Ensure a dentry lease will no longer revalidate.
1052 */
1053 void ceph_invalidate_dentry_lease(struct dentry *dentry)
1054 {
1055 spin_lock(&dentry->d_lock);
1056 dentry->d_time = jiffies;
1057 ceph_dentry(dentry)->lease_shared_gen = 0;
1058 spin_unlock(&dentry->d_lock);
1059 }
1060
1061 /*
1062 * Check if dentry lease is valid. If not, delete the lease. Try to
1063 * renew if the least is more than half up.
1064 */
1065 static int dentry_lease_is_valid(struct dentry *dentry)
1066 {
1067 struct ceph_dentry_info *di;
1068 struct ceph_mds_session *s;
1069 int valid = 0;
1070 u32 gen;
1071 unsigned long ttl;
1072 struct ceph_mds_session *session = NULL;
1073 struct inode *dir = NULL;
1074 u32 seq = 0;
1075
1076 spin_lock(&dentry->d_lock);
1077 di = ceph_dentry(dentry);
1078 if (di->lease_session) {
1079 s = di->lease_session;
1080 spin_lock(&s->s_gen_ttl_lock);
1081 gen = s->s_cap_gen;
1082 ttl = s->s_cap_ttl;
1083 spin_unlock(&s->s_gen_ttl_lock);
1084
1085 if (di->lease_gen == gen &&
1086 time_before(jiffies, dentry->d_time) &&
1087 time_before(jiffies, ttl)) {
1088 valid = 1;
1089 if (di->lease_renew_after &&
1090 time_after(jiffies, di->lease_renew_after)) {
1091 /* we should renew */
1092 dir = d_inode(dentry->d_parent);
1093 session = ceph_get_mds_session(s);
1094 seq = di->lease_seq;
1095 di->lease_renew_after = 0;
1096 di->lease_renew_from = jiffies;
1097 }
1098 }
1099 }
1100 spin_unlock(&dentry->d_lock);
1101
1102 if (session) {
1103 ceph_mdsc_lease_send_msg(session, dir, dentry,
1104 CEPH_MDS_LEASE_RENEW, seq);
1105 ceph_put_mds_session(session);
1106 }
1107 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1108 return valid;
1109 }
1110
1111 /*
1112 * Check if directory-wide content lease/cap is valid.
1113 */
1114 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1115 {
1116 struct ceph_inode_info *ci = ceph_inode(dir);
1117 struct ceph_dentry_info *di = ceph_dentry(dentry);
1118 int valid = 0;
1119
1120 spin_lock(&ci->i_ceph_lock);
1121 if (ci->i_shared_gen == di->lease_shared_gen)
1122 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1123 spin_unlock(&ci->i_ceph_lock);
1124 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1125 dir, (unsigned)ci->i_shared_gen, dentry,
1126 (unsigned)di->lease_shared_gen, valid);
1127 return valid;
1128 }
1129
1130 /*
1131 * Check if cached dentry can be trusted.
1132 */
1133 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1134 {
1135 int valid = 0;
1136 struct dentry *parent;
1137 struct inode *dir;
1138
1139 if (flags & LOOKUP_RCU)
1140 return -ECHILD;
1141
1142 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1143 dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
1144
1145 parent = dget_parent(dentry);
1146 dir = d_inode(parent);
1147
1148 /* always trust cached snapped dentries, snapdir dentry */
1149 if (ceph_snap(dir) != CEPH_NOSNAP) {
1150 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1151 dentry, d_inode(dentry));
1152 valid = 1;
1153 } else if (d_really_is_positive(dentry) &&
1154 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
1155 valid = 1;
1156 } else if (dentry_lease_is_valid(dentry) ||
1157 dir_lease_is_valid(dir, dentry)) {
1158 if (d_really_is_positive(dentry))
1159 valid = ceph_is_any_caps(d_inode(dentry));
1160 else
1161 valid = 1;
1162 }
1163
1164 if (!valid) {
1165 struct ceph_mds_client *mdsc =
1166 ceph_sb_to_client(dir->i_sb)->mdsc;
1167 struct ceph_mds_request *req;
1168 int op, mask, err;
1169
1170 op = ceph_snap(dir) == CEPH_SNAPDIR ?
1171 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
1172 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
1173 if (!IS_ERR(req)) {
1174 req->r_dentry = dget(dentry);
1175 req->r_num_caps = 2;
1176
1177 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1178 if (ceph_security_xattr_wanted(dir))
1179 mask |= CEPH_CAP_XATTR_SHARED;
1180 req->r_args.getattr.mask = mask;
1181
1182 req->r_locked_dir = dir;
1183 err = ceph_mdsc_do_request(mdsc, NULL, req);
1184 if (err == 0 || err == -ENOENT) {
1185 if (dentry == req->r_dentry) {
1186 valid = !d_unhashed(dentry);
1187 } else {
1188 d_invalidate(req->r_dentry);
1189 err = -EAGAIN;
1190 }
1191 }
1192 ceph_mdsc_put_request(req);
1193 dout("d_revalidate %p lookup result=%d\n",
1194 dentry, err);
1195 }
1196 }
1197
1198 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1199 if (valid) {
1200 ceph_dentry_lru_touch(dentry);
1201 } else {
1202 ceph_dir_clear_complete(dir);
1203 }
1204
1205 dput(parent);
1206 return valid;
1207 }
1208
1209 /*
1210 * Release our ceph_dentry_info.
1211 */
1212 static void ceph_d_release(struct dentry *dentry)
1213 {
1214 struct ceph_dentry_info *di = ceph_dentry(dentry);
1215
1216 dout("d_release %p\n", dentry);
1217 ceph_dentry_lru_del(dentry);
1218 if (di->lease_session)
1219 ceph_put_mds_session(di->lease_session);
1220 kmem_cache_free(ceph_dentry_cachep, di);
1221 dentry->d_fsdata = NULL;
1222 }
1223
1224 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1225 unsigned int flags)
1226 {
1227 /*
1228 * Eventually, we'll want to revalidate snapped metadata
1229 * too... probably...
1230 */
1231 return 1;
1232 }
1233
1234 /*
1235 * When the VFS prunes a dentry from the cache, we need to clear the
1236 * complete flag on the parent directory.
1237 *
1238 * Called under dentry->d_lock.
1239 */
1240 static void ceph_d_prune(struct dentry *dentry)
1241 {
1242 dout("ceph_d_prune %p\n", dentry);
1243
1244 /* do we have a valid parent? */
1245 if (IS_ROOT(dentry))
1246 return;
1247
1248 /* if we are not hashed, we don't affect dir's completeness */
1249 if (d_unhashed(dentry))
1250 return;
1251
1252 /*
1253 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1254 * cleared until d_release
1255 */
1256 ceph_dir_clear_complete(d_inode(dentry->d_parent));
1257 }
1258
1259 /*
1260 * read() on a dir. This weird interface hack only works if mounted
1261 * with '-o dirstat'.
1262 */
1263 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1264 loff_t *ppos)
1265 {
1266 struct ceph_file_info *cf = file->private_data;
1267 struct inode *inode = file_inode(file);
1268 struct ceph_inode_info *ci = ceph_inode(inode);
1269 int left;
1270 const int bufsize = 1024;
1271
1272 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1273 return -EISDIR;
1274
1275 if (!cf->dir_info) {
1276 cf->dir_info = kmalloc(bufsize, GFP_KERNEL);
1277 if (!cf->dir_info)
1278 return -ENOMEM;
1279 cf->dir_info_len =
1280 snprintf(cf->dir_info, bufsize,
1281 "entries: %20lld\n"
1282 " files: %20lld\n"
1283 " subdirs: %20lld\n"
1284 "rentries: %20lld\n"
1285 " rfiles: %20lld\n"
1286 " rsubdirs: %20lld\n"
1287 "rbytes: %20lld\n"
1288 "rctime: %10ld.%09ld\n",
1289 ci->i_files + ci->i_subdirs,
1290 ci->i_files,
1291 ci->i_subdirs,
1292 ci->i_rfiles + ci->i_rsubdirs,
1293 ci->i_rfiles,
1294 ci->i_rsubdirs,
1295 ci->i_rbytes,
1296 (long)ci->i_rctime.tv_sec,
1297 (long)ci->i_rctime.tv_nsec);
1298 }
1299
1300 if (*ppos >= cf->dir_info_len)
1301 return 0;
1302 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1303 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1304 if (left == size)
1305 return -EFAULT;
1306 *ppos += (size - left);
1307 return size - left;
1308 }
1309
1310 /*
1311 * We maintain a private dentry LRU.
1312 *
1313 * FIXME: this needs to be changed to a per-mds lru to be useful.
1314 */
1315 void ceph_dentry_lru_add(struct dentry *dn)
1316 {
1317 struct ceph_dentry_info *di = ceph_dentry(dn);
1318 struct ceph_mds_client *mdsc;
1319
1320 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
1321 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1322 spin_lock(&mdsc->dentry_lru_lock);
1323 list_add_tail(&di->lru, &mdsc->dentry_lru);
1324 mdsc->num_dentry++;
1325 spin_unlock(&mdsc->dentry_lru_lock);
1326 }
1327
1328 void ceph_dentry_lru_touch(struct dentry *dn)
1329 {
1330 struct ceph_dentry_info *di = ceph_dentry(dn);
1331 struct ceph_mds_client *mdsc;
1332
1333 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1334 di->offset);
1335 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1336 spin_lock(&mdsc->dentry_lru_lock);
1337 list_move_tail(&di->lru, &mdsc->dentry_lru);
1338 spin_unlock(&mdsc->dentry_lru_lock);
1339 }
1340
1341 void ceph_dentry_lru_del(struct dentry *dn)
1342 {
1343 struct ceph_dentry_info *di = ceph_dentry(dn);
1344 struct ceph_mds_client *mdsc;
1345
1346 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
1347 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1348 spin_lock(&mdsc->dentry_lru_lock);
1349 list_del_init(&di->lru);
1350 mdsc->num_dentry--;
1351 spin_unlock(&mdsc->dentry_lru_lock);
1352 }
1353
1354 /*
1355 * Return name hash for a given dentry. This is dependent on
1356 * the parent directory's hash function.
1357 */
1358 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1359 {
1360 struct ceph_inode_info *dci = ceph_inode(dir);
1361
1362 switch (dci->i_dir_layout.dl_dir_hash) {
1363 case 0: /* for backward compat */
1364 case CEPH_STR_HASH_LINUX:
1365 return dn->d_name.hash;
1366
1367 default:
1368 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1369 dn->d_name.name, dn->d_name.len);
1370 }
1371 }
1372
1373 const struct file_operations ceph_dir_fops = {
1374 .read = ceph_read_dir,
1375 .iterate = ceph_readdir,
1376 .llseek = ceph_dir_llseek,
1377 .open = ceph_open,
1378 .release = ceph_release,
1379 .unlocked_ioctl = ceph_ioctl,
1380 .fsync = ceph_fsync,
1381 };
1382
1383 const struct file_operations ceph_snapdir_fops = {
1384 .iterate = ceph_readdir,
1385 .llseek = ceph_dir_llseek,
1386 .open = ceph_open,
1387 .release = ceph_release,
1388 };
1389
1390 const struct inode_operations ceph_dir_iops = {
1391 .lookup = ceph_lookup,
1392 .permission = ceph_permission,
1393 .getattr = ceph_getattr,
1394 .setattr = ceph_setattr,
1395 .setxattr = ceph_setxattr,
1396 .getxattr = ceph_getxattr,
1397 .listxattr = ceph_listxattr,
1398 .removexattr = ceph_removexattr,
1399 .get_acl = ceph_get_acl,
1400 .set_acl = ceph_set_acl,
1401 .mknod = ceph_mknod,
1402 .symlink = ceph_symlink,
1403 .mkdir = ceph_mkdir,
1404 .link = ceph_link,
1405 .unlink = ceph_unlink,
1406 .rmdir = ceph_unlink,
1407 .rename = ceph_rename,
1408 .create = ceph_create,
1409 .atomic_open = ceph_atomic_open,
1410 };
1411
1412 const struct inode_operations ceph_snapdir_iops = {
1413 .lookup = ceph_lookup,
1414 .permission = ceph_permission,
1415 .getattr = ceph_getattr,
1416 .mkdir = ceph_mkdir,
1417 .rmdir = ceph_unlink,
1418 .rename = ceph_rename,
1419 };
1420
1421 const struct dentry_operations ceph_dentry_ops = {
1422 .d_revalidate = ceph_d_revalidate,
1423 .d_release = ceph_d_release,
1424 .d_prune = ceph_d_prune,
1425 };
1426
1427 const struct dentry_operations ceph_snapdir_dentry_ops = {
1428 .d_revalidate = ceph_snapdir_d_revalidate,
1429 .d_release = ceph_d_release,
1430 };
1431
1432 const struct dentry_operations ceph_snap_dentry_ops = {
1433 .d_release = ceph_d_release,
1434 .d_prune = ceph_d_prune,
1435 };