]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/ceph/dir.c
kill f_dentry uses
[mirror_ubuntu-zesty-kernel.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8
9 #include "super.h"
10 #include "mds_client.h"
11
12 /*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17 /*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29 const struct inode_operations ceph_dir_iops;
30 const struct file_operations ceph_dir_fops;
31 const struct dentry_operations ceph_dentry_ops;
32
33 /*
34 * Initialize ceph dentry state.
35 */
36 int ceph_init_dentry(struct dentry *dentry)
37 {
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
51 goto out_unlock;
52 }
53
54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
55 d_set_d_op(dentry, &ceph_dentry_ops);
56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
58 else
59 d_set_d_op(dentry, &ceph_snap_dentry_ops);
60
61 di->dentry = dentry;
62 di->lease_session = NULL;
63 dentry->d_time = jiffies;
64 /* avoid reordering d_fsdata setup so that the check above is safe */
65 smp_mb();
66 dentry->d_fsdata = di;
67 ceph_dentry_lru_add(dentry);
68 out_unlock:
69 spin_unlock(&dentry->d_lock);
70 return 0;
71 }
72
73 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
74 {
75 struct inode *inode = NULL;
76
77 if (!dentry)
78 return NULL;
79
80 spin_lock(&dentry->d_lock);
81 if (!IS_ROOT(dentry)) {
82 inode = dentry->d_parent->d_inode;
83 ihold(inode);
84 }
85 spin_unlock(&dentry->d_lock);
86 return inode;
87 }
88
89
90 /*
91 * for readdir, we encode the directory frag and offset within that
92 * frag into f_pos.
93 */
94 static unsigned fpos_frag(loff_t p)
95 {
96 return p >> 32;
97 }
98 static unsigned fpos_off(loff_t p)
99 {
100 return p & 0xffffffff;
101 }
102
103 static int fpos_cmp(loff_t l, loff_t r)
104 {
105 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
106 if (v)
107 return v;
108 return (int)(fpos_off(l) - fpos_off(r));
109 }
110
111 /*
112 * When possible, we try to satisfy a readdir by peeking at the
113 * dcache. We make this work by carefully ordering dentries on
114 * d_child when we initially get results back from the MDS, and
115 * falling back to a "normal" sync readdir if any dentries in the dir
116 * are dropped.
117 *
118 * Complete dir indicates that we have all dentries in the dir. It is
119 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
120 * the MDS if/when the directory is modified).
121 */
122 static int __dcache_readdir(struct file *file, struct dir_context *ctx,
123 u32 shared_gen)
124 {
125 struct ceph_file_info *fi = file->private_data;
126 struct dentry *parent = file->f_path.dentry;
127 struct inode *dir = parent->d_inode;
128 struct list_head *p;
129 struct dentry *dentry, *last;
130 struct ceph_dentry_info *di;
131 int err = 0;
132
133 /* claim ref on last dentry we returned */
134 last = fi->dentry;
135 fi->dentry = NULL;
136
137 dout("__dcache_readdir %p v%u at %llu (last %p)\n",
138 dir, shared_gen, ctx->pos, last);
139
140 spin_lock(&parent->d_lock);
141
142 /* start at beginning? */
143 if (ctx->pos == 2 || last == NULL ||
144 fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
145 if (list_empty(&parent->d_subdirs))
146 goto out_unlock;
147 p = parent->d_subdirs.prev;
148 dout(" initial p %p/%p\n", p->prev, p->next);
149 } else {
150 p = last->d_child.prev;
151 }
152
153 more:
154 dentry = list_entry(p, struct dentry, d_child);
155 di = ceph_dentry(dentry);
156 while (1) {
157 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
158 d_unhashed(dentry) ? "!hashed" : "hashed",
159 parent->d_subdirs.prev, parent->d_subdirs.next);
160 if (p == &parent->d_subdirs) {
161 fi->flags |= CEPH_F_ATEND;
162 goto out_unlock;
163 }
164 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
165 if (di->lease_shared_gen == shared_gen &&
166 !d_unhashed(dentry) && dentry->d_inode &&
167 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
168 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
169 fpos_cmp(ctx->pos, di->offset) <= 0)
170 break;
171 dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry,
172 dentry, di->offset,
173 ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
174 !dentry->d_inode ? " null" : "");
175 spin_unlock(&dentry->d_lock);
176 p = p->prev;
177 dentry = list_entry(p, struct dentry, d_child);
178 di = ceph_dentry(dentry);
179 }
180
181 dget_dlock(dentry);
182 spin_unlock(&dentry->d_lock);
183 spin_unlock(&parent->d_lock);
184
185 /* make sure a dentry wasn't dropped while we didn't have parent lock */
186 if (!ceph_dir_is_complete(dir)) {
187 dout(" lost dir complete on %p; falling back to mds\n", dir);
188 dput(dentry);
189 err = -EAGAIN;
190 goto out;
191 }
192
193 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
194 dentry, dentry, dentry->d_inode);
195 if (!dir_emit(ctx, dentry->d_name.name,
196 dentry->d_name.len,
197 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
198 dentry->d_inode->i_mode >> 12)) {
199 if (last) {
200 /* remember our position */
201 fi->dentry = last;
202 fi->next_offset = fpos_off(di->offset);
203 }
204 dput(dentry);
205 return 0;
206 }
207
208 ctx->pos = di->offset + 1;
209
210 if (last)
211 dput(last);
212 last = dentry;
213
214 spin_lock(&parent->d_lock);
215 p = p->prev; /* advance to next dentry */
216 goto more;
217
218 out_unlock:
219 spin_unlock(&parent->d_lock);
220 out:
221 if (last)
222 dput(last);
223 return err;
224 }
225
226 /*
227 * make note of the last dentry we read, so we can
228 * continue at the same lexicographical point,
229 * regardless of what dir changes take place on the
230 * server.
231 */
232 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
233 int len)
234 {
235 kfree(fi->last_name);
236 fi->last_name = kmalloc(len+1, GFP_NOFS);
237 if (!fi->last_name)
238 return -ENOMEM;
239 memcpy(fi->last_name, name, len);
240 fi->last_name[len] = 0;
241 dout("note_last_dentry '%s'\n", fi->last_name);
242 return 0;
243 }
244
245 static int ceph_readdir(struct file *file, struct dir_context *ctx)
246 {
247 struct ceph_file_info *fi = file->private_data;
248 struct inode *inode = file_inode(file);
249 struct ceph_inode_info *ci = ceph_inode(inode);
250 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
251 struct ceph_mds_client *mdsc = fsc->mdsc;
252 unsigned frag = fpos_frag(ctx->pos);
253 int off = fpos_off(ctx->pos);
254 int err;
255 u32 ftype;
256 struct ceph_mds_reply_info_parsed *rinfo;
257
258 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
259 if (fi->flags & CEPH_F_ATEND)
260 return 0;
261
262 /* always start with . and .. */
263 if (ctx->pos == 0) {
264 /* note dir version at start of readdir so we can tell
265 * if any dentries get dropped */
266 fi->dir_release_count = atomic_read(&ci->i_release_count);
267
268 dout("readdir off 0 -> '.'\n");
269 if (!dir_emit(ctx, ".", 1,
270 ceph_translate_ino(inode->i_sb, inode->i_ino),
271 inode->i_mode >> 12))
272 return 0;
273 ctx->pos = 1;
274 off = 1;
275 }
276 if (ctx->pos == 1) {
277 ino_t ino = parent_ino(file->f_path.dentry);
278 dout("readdir off 1 -> '..'\n");
279 if (!dir_emit(ctx, "..", 2,
280 ceph_translate_ino(inode->i_sb, ino),
281 inode->i_mode >> 12))
282 return 0;
283 ctx->pos = 2;
284 off = 2;
285 }
286
287 /* can we use the dcache? */
288 spin_lock(&ci->i_ceph_lock);
289 if ((ctx->pos == 2 || fi->dentry) &&
290 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
291 ceph_snap(inode) != CEPH_SNAPDIR &&
292 __ceph_dir_is_complete(ci) &&
293 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
294 u32 shared_gen = ci->i_shared_gen;
295 spin_unlock(&ci->i_ceph_lock);
296 err = __dcache_readdir(file, ctx, shared_gen);
297 if (err != -EAGAIN)
298 return err;
299 frag = fpos_frag(ctx->pos);
300 off = fpos_off(ctx->pos);
301 } else {
302 spin_unlock(&ci->i_ceph_lock);
303 }
304 if (fi->dentry) {
305 err = note_last_dentry(fi, fi->dentry->d_name.name,
306 fi->dentry->d_name.len);
307 if (err)
308 return err;
309 dput(fi->dentry);
310 fi->dentry = NULL;
311 }
312
313 /* proceed with a normal readdir */
314
315 more:
316 /* do we have the correct frag content buffered? */
317 if (fi->frag != frag || fi->last_readdir == NULL) {
318 struct ceph_mds_request *req;
319 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
320 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
321
322 /* discard old result, if any */
323 if (fi->last_readdir) {
324 ceph_mdsc_put_request(fi->last_readdir);
325 fi->last_readdir = NULL;
326 }
327
328 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
329 ceph_vinop(inode), frag, fi->last_name);
330 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
331 if (IS_ERR(req))
332 return PTR_ERR(req);
333 err = ceph_alloc_readdir_reply_buffer(req, inode);
334 if (err) {
335 ceph_mdsc_put_request(req);
336 return err;
337 }
338 req->r_inode = inode;
339 ihold(inode);
340 req->r_dentry = dget(file->f_path.dentry);
341 /* hints to request -> mds selection code */
342 req->r_direct_mode = USE_AUTH_MDS;
343 req->r_direct_hash = ceph_frag_value(frag);
344 req->r_direct_is_hash = true;
345 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
346 req->r_readdir_offset = fi->next_offset;
347 req->r_args.readdir.frag = cpu_to_le32(frag);
348 err = ceph_mdsc_do_request(mdsc, NULL, req);
349 if (err < 0) {
350 ceph_mdsc_put_request(req);
351 return err;
352 }
353 dout("readdir got and parsed readdir result=%d"
354 " on frag %x, end=%d, complete=%d\n", err, frag,
355 (int)req->r_reply_info.dir_end,
356 (int)req->r_reply_info.dir_complete);
357
358 if (!req->r_did_prepopulate) {
359 dout("readdir !did_prepopulate");
360 /* preclude from marking dir complete */
361 fi->dir_release_count--;
362 }
363
364 /* note next offset and last dentry name */
365 rinfo = &req->r_reply_info;
366 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
367 frag = le32_to_cpu(rinfo->dir_dir->frag);
368 if (ceph_frag_is_leftmost(frag))
369 fi->next_offset = 2;
370 else
371 fi->next_offset = 0;
372 off = fi->next_offset;
373 }
374 fi->frag = frag;
375 fi->offset = fi->next_offset;
376 fi->last_readdir = req;
377
378 if (req->r_reply_info.dir_end) {
379 kfree(fi->last_name);
380 fi->last_name = NULL;
381 if (ceph_frag_is_rightmost(frag))
382 fi->next_offset = 2;
383 else
384 fi->next_offset = 0;
385 } else {
386 err = note_last_dentry(fi,
387 rinfo->dir_dname[rinfo->dir_nr-1],
388 rinfo->dir_dname_len[rinfo->dir_nr-1]);
389 if (err)
390 return err;
391 fi->next_offset += rinfo->dir_nr;
392 }
393 }
394
395 rinfo = &fi->last_readdir->r_reply_info;
396 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
397 rinfo->dir_nr, off, fi->offset);
398
399 ctx->pos = ceph_make_fpos(frag, off);
400 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
401 struct ceph_mds_reply_inode *in =
402 rinfo->dir_in[off - fi->offset].in;
403 struct ceph_vino vino;
404 ino_t ino;
405
406 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
407 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
408 rinfo->dir_dname_len[off - fi->offset],
409 rinfo->dir_dname[off - fi->offset], in);
410 BUG_ON(!in);
411 ftype = le32_to_cpu(in->mode) >> 12;
412 vino.ino = le64_to_cpu(in->ino);
413 vino.snap = le64_to_cpu(in->snapid);
414 ino = ceph_vino_to_ino(vino);
415 if (!dir_emit(ctx,
416 rinfo->dir_dname[off - fi->offset],
417 rinfo->dir_dname_len[off - fi->offset],
418 ceph_translate_ino(inode->i_sb, ino), ftype)) {
419 dout("filldir stopping us...\n");
420 return 0;
421 }
422 off++;
423 ctx->pos++;
424 }
425
426 if (fi->last_name) {
427 ceph_mdsc_put_request(fi->last_readdir);
428 fi->last_readdir = NULL;
429 goto more;
430 }
431
432 /* more frags? */
433 if (!ceph_frag_is_rightmost(frag)) {
434 frag = ceph_frag_next(frag);
435 off = 0;
436 ctx->pos = ceph_make_fpos(frag, off);
437 dout("readdir next frag is %x\n", frag);
438 goto more;
439 }
440 fi->flags |= CEPH_F_ATEND;
441
442 /*
443 * if dir_release_count still matches the dir, no dentries
444 * were released during the whole readdir, and we should have
445 * the complete dir contents in our cache.
446 */
447 spin_lock(&ci->i_ceph_lock);
448 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
449 dout(" marking %p complete\n", inode);
450 __ceph_dir_set_complete(ci, fi->dir_release_count);
451 }
452 spin_unlock(&ci->i_ceph_lock);
453
454 dout("readdir %p file %p done.\n", inode, file);
455 return 0;
456 }
457
458 static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
459 {
460 if (fi->last_readdir) {
461 ceph_mdsc_put_request(fi->last_readdir);
462 fi->last_readdir = NULL;
463 }
464 kfree(fi->last_name);
465 fi->last_name = NULL;
466 if (ceph_frag_is_leftmost(frag))
467 fi->next_offset = 2; /* compensate for . and .. */
468 else
469 fi->next_offset = 0;
470 if (fi->dentry) {
471 dput(fi->dentry);
472 fi->dentry = NULL;
473 }
474 fi->flags &= ~CEPH_F_ATEND;
475 }
476
477 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
478 {
479 struct ceph_file_info *fi = file->private_data;
480 struct inode *inode = file->f_mapping->host;
481 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
482 loff_t retval;
483
484 mutex_lock(&inode->i_mutex);
485 retval = -EINVAL;
486 switch (whence) {
487 case SEEK_END:
488 offset += inode->i_size + 2; /* FIXME */
489 break;
490 case SEEK_CUR:
491 offset += file->f_pos;
492 case SEEK_SET:
493 break;
494 default:
495 goto out;
496 }
497
498 if (offset >= 0) {
499 if (offset != file->f_pos) {
500 file->f_pos = offset;
501 file->f_version = 0;
502 fi->flags &= ~CEPH_F_ATEND;
503 }
504 retval = offset;
505
506 /*
507 * discard buffered readdir content on seekdir(0), or
508 * seek to new frag, or seek prior to current chunk.
509 */
510 if (offset == 0 ||
511 fpos_frag(offset) != fi->frag ||
512 fpos_off(offset) < fi->offset) {
513 dout("dir_llseek dropping %p content\n", file);
514 reset_readdir(fi, fpos_frag(offset));
515 }
516
517 /* bump dir_release_count if we did a forward seek */
518 if (fpos_cmp(offset, old_offset) > 0)
519 fi->dir_release_count--;
520 }
521 out:
522 mutex_unlock(&inode->i_mutex);
523 return retval;
524 }
525
526 /*
527 * Handle lookups for the hidden .snap directory.
528 */
529 int ceph_handle_snapdir(struct ceph_mds_request *req,
530 struct dentry *dentry, int err)
531 {
532 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
533 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
534
535 /* .snap dir? */
536 if (err == -ENOENT &&
537 ceph_snap(parent) == CEPH_NOSNAP &&
538 strcmp(dentry->d_name.name,
539 fsc->mount_options->snapdir_name) == 0) {
540 struct inode *inode = ceph_get_snapdir(parent);
541 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n",
542 dentry, dentry, inode);
543 BUG_ON(!d_unhashed(dentry));
544 d_add(dentry, inode);
545 err = 0;
546 }
547 return err;
548 }
549
550 /*
551 * Figure out final result of a lookup/open request.
552 *
553 * Mainly, make sure we return the final req->r_dentry (if it already
554 * existed) in place of the original VFS-provided dentry when they
555 * differ.
556 *
557 * Gracefully handle the case where the MDS replies with -ENOENT and
558 * no trace (which it may do, at its discretion, e.g., if it doesn't
559 * care to issue a lease on the negative dentry).
560 */
561 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
562 struct dentry *dentry, int err)
563 {
564 if (err == -ENOENT) {
565 /* no trace? */
566 err = 0;
567 if (!req->r_reply_info.head->is_dentry) {
568 dout("ENOENT and no trace, dentry %p inode %p\n",
569 dentry, dentry->d_inode);
570 if (dentry->d_inode) {
571 d_drop(dentry);
572 err = -ENOENT;
573 } else {
574 d_add(dentry, NULL);
575 }
576 }
577 }
578 if (err)
579 dentry = ERR_PTR(err);
580 else if (dentry != req->r_dentry)
581 dentry = dget(req->r_dentry); /* we got spliced */
582 else
583 dentry = NULL;
584 return dentry;
585 }
586
587 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
588 {
589 return ceph_ino(inode) == CEPH_INO_ROOT &&
590 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
591 }
592
593 /*
594 * Look up a single dir entry. If there is a lookup intent, inform
595 * the MDS so that it gets our 'caps wanted' value in a single op.
596 */
597 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
598 unsigned int flags)
599 {
600 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
601 struct ceph_mds_client *mdsc = fsc->mdsc;
602 struct ceph_mds_request *req;
603 int op;
604 int err;
605
606 dout("lookup %p dentry %p '%pd'\n",
607 dir, dentry, dentry);
608
609 if (dentry->d_name.len > NAME_MAX)
610 return ERR_PTR(-ENAMETOOLONG);
611
612 err = ceph_init_dentry(dentry);
613 if (err < 0)
614 return ERR_PTR(err);
615
616 /* can we conclude ENOENT locally? */
617 if (dentry->d_inode == NULL) {
618 struct ceph_inode_info *ci = ceph_inode(dir);
619 struct ceph_dentry_info *di = ceph_dentry(dentry);
620
621 spin_lock(&ci->i_ceph_lock);
622 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
623 if (strncmp(dentry->d_name.name,
624 fsc->mount_options->snapdir_name,
625 dentry->d_name.len) &&
626 !is_root_ceph_dentry(dir, dentry) &&
627 __ceph_dir_is_complete(ci) &&
628 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
629 spin_unlock(&ci->i_ceph_lock);
630 dout(" dir %p complete, -ENOENT\n", dir);
631 d_add(dentry, NULL);
632 di->lease_shared_gen = ci->i_shared_gen;
633 return NULL;
634 }
635 spin_unlock(&ci->i_ceph_lock);
636 }
637
638 op = ceph_snap(dir) == CEPH_SNAPDIR ?
639 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
640 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
641 if (IS_ERR(req))
642 return ERR_CAST(req);
643 req->r_dentry = dget(dentry);
644 req->r_num_caps = 2;
645 /* we only need inode linkage */
646 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
647 req->r_locked_dir = dir;
648 err = ceph_mdsc_do_request(mdsc, NULL, req);
649 err = ceph_handle_snapdir(req, dentry, err);
650 dentry = ceph_finish_lookup(req, dentry, err);
651 ceph_mdsc_put_request(req); /* will dput(dentry) */
652 dout("lookup result=%p\n", dentry);
653 return dentry;
654 }
655
656 /*
657 * If we do a create but get no trace back from the MDS, follow up with
658 * a lookup (the VFS expects us to link up the provided dentry).
659 */
660 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
661 {
662 struct dentry *result = ceph_lookup(dir, dentry, 0);
663
664 if (result && !IS_ERR(result)) {
665 /*
666 * We created the item, then did a lookup, and found
667 * it was already linked to another inode we already
668 * had in our cache (and thus got spliced). Link our
669 * dentry to that inode, but don't hash it, just in
670 * case the VFS wants to dereference it.
671 */
672 BUG_ON(!result->d_inode);
673 d_instantiate(dentry, result->d_inode);
674 return 0;
675 }
676 return PTR_ERR(result);
677 }
678
679 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
680 umode_t mode, dev_t rdev)
681 {
682 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
683 struct ceph_mds_client *mdsc = fsc->mdsc;
684 struct ceph_mds_request *req;
685 struct ceph_acls_info acls = {};
686 int err;
687
688 if (ceph_snap(dir) != CEPH_NOSNAP)
689 return -EROFS;
690
691 err = ceph_pre_init_acls(dir, &mode, &acls);
692 if (err < 0)
693 return err;
694
695 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
696 dir, dentry, mode, rdev);
697 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
698 if (IS_ERR(req)) {
699 err = PTR_ERR(req);
700 goto out;
701 }
702 req->r_dentry = dget(dentry);
703 req->r_num_caps = 2;
704 req->r_locked_dir = dir;
705 req->r_args.mknod.mode = cpu_to_le32(mode);
706 req->r_args.mknod.rdev = cpu_to_le32(rdev);
707 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
708 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
709 if (acls.pagelist) {
710 req->r_pagelist = acls.pagelist;
711 acls.pagelist = NULL;
712 }
713 err = ceph_mdsc_do_request(mdsc, dir, req);
714 if (!err && !req->r_reply_info.head->is_dentry)
715 err = ceph_handle_notrace_create(dir, dentry);
716 ceph_mdsc_put_request(req);
717 out:
718 if (!err)
719 ceph_init_inode_acls(dentry->d_inode, &acls);
720 else
721 d_drop(dentry);
722 ceph_release_acls_info(&acls);
723 return err;
724 }
725
726 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
727 bool excl)
728 {
729 return ceph_mknod(dir, dentry, mode, 0);
730 }
731
732 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
733 const char *dest)
734 {
735 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
736 struct ceph_mds_client *mdsc = fsc->mdsc;
737 struct ceph_mds_request *req;
738 int err;
739
740 if (ceph_snap(dir) != CEPH_NOSNAP)
741 return -EROFS;
742
743 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
744 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
745 if (IS_ERR(req)) {
746 err = PTR_ERR(req);
747 goto out;
748 }
749 req->r_dentry = dget(dentry);
750 req->r_num_caps = 2;
751 req->r_path2 = kstrdup(dest, GFP_NOFS);
752 req->r_locked_dir = dir;
753 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
754 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
755 err = ceph_mdsc_do_request(mdsc, dir, req);
756 if (!err && !req->r_reply_info.head->is_dentry)
757 err = ceph_handle_notrace_create(dir, dentry);
758 ceph_mdsc_put_request(req);
759 out:
760 if (err)
761 d_drop(dentry);
762 return err;
763 }
764
765 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
766 {
767 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
768 struct ceph_mds_client *mdsc = fsc->mdsc;
769 struct ceph_mds_request *req;
770 struct ceph_acls_info acls = {};
771 int err = -EROFS;
772 int op;
773
774 if (ceph_snap(dir) == CEPH_SNAPDIR) {
775 /* mkdir .snap/foo is a MKSNAP */
776 op = CEPH_MDS_OP_MKSNAP;
777 dout("mksnap dir %p snap '%pd' dn %p\n", dir,
778 dentry, dentry);
779 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
780 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
781 op = CEPH_MDS_OP_MKDIR;
782 } else {
783 goto out;
784 }
785
786 mode |= S_IFDIR;
787 err = ceph_pre_init_acls(dir, &mode, &acls);
788 if (err < 0)
789 goto out;
790
791 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
792 if (IS_ERR(req)) {
793 err = PTR_ERR(req);
794 goto out;
795 }
796
797 req->r_dentry = dget(dentry);
798 req->r_num_caps = 2;
799 req->r_locked_dir = dir;
800 req->r_args.mkdir.mode = cpu_to_le32(mode);
801 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
802 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
803 if (acls.pagelist) {
804 req->r_pagelist = acls.pagelist;
805 acls.pagelist = NULL;
806 }
807 err = ceph_mdsc_do_request(mdsc, dir, req);
808 if (!err && !req->r_reply_info.head->is_dentry)
809 err = ceph_handle_notrace_create(dir, dentry);
810 ceph_mdsc_put_request(req);
811 out:
812 if (!err)
813 ceph_init_inode_acls(dentry->d_inode, &acls);
814 else
815 d_drop(dentry);
816 ceph_release_acls_info(&acls);
817 return err;
818 }
819
820 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
821 struct dentry *dentry)
822 {
823 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
824 struct ceph_mds_client *mdsc = fsc->mdsc;
825 struct ceph_mds_request *req;
826 int err;
827
828 if (ceph_snap(dir) != CEPH_NOSNAP)
829 return -EROFS;
830
831 dout("link in dir %p old_dentry %p dentry %p\n", dir,
832 old_dentry, dentry);
833 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
834 if (IS_ERR(req)) {
835 d_drop(dentry);
836 return PTR_ERR(req);
837 }
838 req->r_dentry = dget(dentry);
839 req->r_num_caps = 2;
840 req->r_old_dentry = dget(old_dentry);
841 req->r_locked_dir = dir;
842 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
843 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
844 /* release LINK_SHARED on source inode (mds will lock it) */
845 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
846 err = ceph_mdsc_do_request(mdsc, dir, req);
847 if (err) {
848 d_drop(dentry);
849 } else if (!req->r_reply_info.head->is_dentry) {
850 ihold(old_dentry->d_inode);
851 d_instantiate(dentry, old_dentry->d_inode);
852 }
853 ceph_mdsc_put_request(req);
854 return err;
855 }
856
857 /*
858 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
859 * looks like the link count will hit 0, drop any other caps (other
860 * than PIN) we don't specifically want (due to the file still being
861 * open).
862 */
863 static int drop_caps_for_unlink(struct inode *inode)
864 {
865 struct ceph_inode_info *ci = ceph_inode(inode);
866 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
867
868 spin_lock(&ci->i_ceph_lock);
869 if (inode->i_nlink == 1) {
870 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
871 ci->i_ceph_flags |= CEPH_I_NODELAY;
872 }
873 spin_unlock(&ci->i_ceph_lock);
874 return drop;
875 }
876
877 /*
878 * rmdir and unlink are differ only by the metadata op code
879 */
880 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
881 {
882 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
883 struct ceph_mds_client *mdsc = fsc->mdsc;
884 struct inode *inode = dentry->d_inode;
885 struct ceph_mds_request *req;
886 int err = -EROFS;
887 int op;
888
889 if (ceph_snap(dir) == CEPH_SNAPDIR) {
890 /* rmdir .snap/foo is RMSNAP */
891 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
892 op = CEPH_MDS_OP_RMSNAP;
893 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
894 dout("unlink/rmdir dir %p dn %p inode %p\n",
895 dir, dentry, inode);
896 op = S_ISDIR(dentry->d_inode->i_mode) ?
897 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
898 } else
899 goto out;
900 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
901 if (IS_ERR(req)) {
902 err = PTR_ERR(req);
903 goto out;
904 }
905 req->r_dentry = dget(dentry);
906 req->r_num_caps = 2;
907 req->r_locked_dir = dir;
908 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
909 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
910 req->r_inode_drop = drop_caps_for_unlink(inode);
911 err = ceph_mdsc_do_request(mdsc, dir, req);
912 if (!err && !req->r_reply_info.head->is_dentry)
913 d_delete(dentry);
914 ceph_mdsc_put_request(req);
915 out:
916 return err;
917 }
918
919 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
920 struct inode *new_dir, struct dentry *new_dentry)
921 {
922 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
923 struct ceph_mds_client *mdsc = fsc->mdsc;
924 struct ceph_mds_request *req;
925 int err;
926
927 if (ceph_snap(old_dir) != ceph_snap(new_dir))
928 return -EXDEV;
929 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
930 ceph_snap(new_dir) != CEPH_NOSNAP)
931 return -EROFS;
932 dout("rename dir %p dentry %p to dir %p dentry %p\n",
933 old_dir, old_dentry, new_dir, new_dentry);
934 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
935 if (IS_ERR(req))
936 return PTR_ERR(req);
937 ihold(old_dir);
938 req->r_dentry = dget(new_dentry);
939 req->r_num_caps = 2;
940 req->r_old_dentry = dget(old_dentry);
941 req->r_old_dentry_dir = old_dir;
942 req->r_locked_dir = new_dir;
943 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
944 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
945 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
946 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
947 /* release LINK_RDCACHE on source inode (mds will lock it) */
948 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
949 if (new_dentry->d_inode)
950 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
951 err = ceph_mdsc_do_request(mdsc, old_dir, req);
952 if (!err && !req->r_reply_info.head->is_dentry) {
953 /*
954 * Normally d_move() is done by fill_trace (called by
955 * do_request, above). If there is no trace, we need
956 * to do it here.
957 */
958
959 d_move(old_dentry, new_dentry);
960
961 /* ensure target dentry is invalidated, despite
962 rehashing bug in vfs_rename_dir */
963 ceph_invalidate_dentry_lease(new_dentry);
964
965 /* d_move screws up sibling dentries' offsets */
966 ceph_dir_clear_complete(old_dir);
967 ceph_dir_clear_complete(new_dir);
968
969 }
970 ceph_mdsc_put_request(req);
971 return err;
972 }
973
974 /*
975 * Ensure a dentry lease will no longer revalidate.
976 */
977 void ceph_invalidate_dentry_lease(struct dentry *dentry)
978 {
979 spin_lock(&dentry->d_lock);
980 dentry->d_time = jiffies;
981 ceph_dentry(dentry)->lease_shared_gen = 0;
982 spin_unlock(&dentry->d_lock);
983 }
984
985 /*
986 * Check if dentry lease is valid. If not, delete the lease. Try to
987 * renew if the least is more than half up.
988 */
989 static int dentry_lease_is_valid(struct dentry *dentry)
990 {
991 struct ceph_dentry_info *di;
992 struct ceph_mds_session *s;
993 int valid = 0;
994 u32 gen;
995 unsigned long ttl;
996 struct ceph_mds_session *session = NULL;
997 struct inode *dir = NULL;
998 u32 seq = 0;
999
1000 spin_lock(&dentry->d_lock);
1001 di = ceph_dentry(dentry);
1002 if (di->lease_session) {
1003 s = di->lease_session;
1004 spin_lock(&s->s_gen_ttl_lock);
1005 gen = s->s_cap_gen;
1006 ttl = s->s_cap_ttl;
1007 spin_unlock(&s->s_gen_ttl_lock);
1008
1009 if (di->lease_gen == gen &&
1010 time_before(jiffies, dentry->d_time) &&
1011 time_before(jiffies, ttl)) {
1012 valid = 1;
1013 if (di->lease_renew_after &&
1014 time_after(jiffies, di->lease_renew_after)) {
1015 /* we should renew */
1016 dir = dentry->d_parent->d_inode;
1017 session = ceph_get_mds_session(s);
1018 seq = di->lease_seq;
1019 di->lease_renew_after = 0;
1020 di->lease_renew_from = jiffies;
1021 }
1022 }
1023 }
1024 spin_unlock(&dentry->d_lock);
1025
1026 if (session) {
1027 ceph_mdsc_lease_send_msg(session, dir, dentry,
1028 CEPH_MDS_LEASE_RENEW, seq);
1029 ceph_put_mds_session(session);
1030 }
1031 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1032 return valid;
1033 }
1034
1035 /*
1036 * Check if directory-wide content lease/cap is valid.
1037 */
1038 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1039 {
1040 struct ceph_inode_info *ci = ceph_inode(dir);
1041 struct ceph_dentry_info *di = ceph_dentry(dentry);
1042 int valid = 0;
1043
1044 spin_lock(&ci->i_ceph_lock);
1045 if (ci->i_shared_gen == di->lease_shared_gen)
1046 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1047 spin_unlock(&ci->i_ceph_lock);
1048 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1049 dir, (unsigned)ci->i_shared_gen, dentry,
1050 (unsigned)di->lease_shared_gen, valid);
1051 return valid;
1052 }
1053
1054 /*
1055 * Check if cached dentry can be trusted.
1056 */
1057 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1058 {
1059 int valid = 0;
1060 struct inode *dir;
1061
1062 if (flags & LOOKUP_RCU)
1063 return -ECHILD;
1064
1065 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1066 dentry, dentry->d_inode, ceph_dentry(dentry)->offset);
1067
1068 dir = ceph_get_dentry_parent_inode(dentry);
1069
1070 /* always trust cached snapped dentries, snapdir dentry */
1071 if (ceph_snap(dir) != CEPH_NOSNAP) {
1072 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1073 dentry, dentry->d_inode);
1074 valid = 1;
1075 } else if (dentry->d_inode &&
1076 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1077 valid = 1;
1078 } else if (dentry_lease_is_valid(dentry) ||
1079 dir_lease_is_valid(dir, dentry)) {
1080 if (dentry->d_inode)
1081 valid = ceph_is_any_caps(dentry->d_inode);
1082 else
1083 valid = 1;
1084 }
1085
1086 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1087 if (valid) {
1088 ceph_dentry_lru_touch(dentry);
1089 } else {
1090 ceph_dir_clear_complete(dir);
1091 }
1092 iput(dir);
1093 return valid;
1094 }
1095
1096 /*
1097 * Release our ceph_dentry_info.
1098 */
1099 static void ceph_d_release(struct dentry *dentry)
1100 {
1101 struct ceph_dentry_info *di = ceph_dentry(dentry);
1102
1103 dout("d_release %p\n", dentry);
1104 ceph_dentry_lru_del(dentry);
1105 if (di->lease_session)
1106 ceph_put_mds_session(di->lease_session);
1107 kmem_cache_free(ceph_dentry_cachep, di);
1108 dentry->d_fsdata = NULL;
1109 }
1110
1111 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1112 unsigned int flags)
1113 {
1114 /*
1115 * Eventually, we'll want to revalidate snapped metadata
1116 * too... probably...
1117 */
1118 return 1;
1119 }
1120
1121 /*
1122 * When the VFS prunes a dentry from the cache, we need to clear the
1123 * complete flag on the parent directory.
1124 *
1125 * Called under dentry->d_lock.
1126 */
1127 static void ceph_d_prune(struct dentry *dentry)
1128 {
1129 dout("ceph_d_prune %p\n", dentry);
1130
1131 /* do we have a valid parent? */
1132 if (IS_ROOT(dentry))
1133 return;
1134
1135 /* if we are not hashed, we don't affect dir's completeness */
1136 if (d_unhashed(dentry))
1137 return;
1138
1139 /*
1140 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1141 * cleared until d_release
1142 */
1143 ceph_dir_clear_complete(dentry->d_parent->d_inode);
1144 }
1145
1146 /*
1147 * read() on a dir. This weird interface hack only works if mounted
1148 * with '-o dirstat'.
1149 */
1150 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1151 loff_t *ppos)
1152 {
1153 struct ceph_file_info *cf = file->private_data;
1154 struct inode *inode = file_inode(file);
1155 struct ceph_inode_info *ci = ceph_inode(inode);
1156 int left;
1157 const int bufsize = 1024;
1158
1159 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1160 return -EISDIR;
1161
1162 if (!cf->dir_info) {
1163 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
1164 if (!cf->dir_info)
1165 return -ENOMEM;
1166 cf->dir_info_len =
1167 snprintf(cf->dir_info, bufsize,
1168 "entries: %20lld\n"
1169 " files: %20lld\n"
1170 " subdirs: %20lld\n"
1171 "rentries: %20lld\n"
1172 " rfiles: %20lld\n"
1173 " rsubdirs: %20lld\n"
1174 "rbytes: %20lld\n"
1175 "rctime: %10ld.%09ld\n",
1176 ci->i_files + ci->i_subdirs,
1177 ci->i_files,
1178 ci->i_subdirs,
1179 ci->i_rfiles + ci->i_rsubdirs,
1180 ci->i_rfiles,
1181 ci->i_rsubdirs,
1182 ci->i_rbytes,
1183 (long)ci->i_rctime.tv_sec,
1184 (long)ci->i_rctime.tv_nsec);
1185 }
1186
1187 if (*ppos >= cf->dir_info_len)
1188 return 0;
1189 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1190 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1191 if (left == size)
1192 return -EFAULT;
1193 *ppos += (size - left);
1194 return size - left;
1195 }
1196
1197 /*
1198 * an fsync() on a dir will wait for any uncommitted directory
1199 * operations to commit.
1200 */
1201 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1202 int datasync)
1203 {
1204 struct inode *inode = file_inode(file);
1205 struct ceph_inode_info *ci = ceph_inode(inode);
1206 struct list_head *head = &ci->i_unsafe_dirops;
1207 struct ceph_mds_request *req;
1208 u64 last_tid;
1209 int ret = 0;
1210
1211 dout("dir_fsync %p\n", inode);
1212 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1213 if (ret)
1214 return ret;
1215 mutex_lock(&inode->i_mutex);
1216
1217 spin_lock(&ci->i_unsafe_lock);
1218 if (list_empty(head))
1219 goto out;
1220
1221 req = list_entry(head->prev,
1222 struct ceph_mds_request, r_unsafe_dir_item);
1223 last_tid = req->r_tid;
1224
1225 do {
1226 ceph_mdsc_get_request(req);
1227 spin_unlock(&ci->i_unsafe_lock);
1228
1229 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1230 inode, req->r_tid, last_tid);
1231 if (req->r_timeout) {
1232 ret = wait_for_completion_timeout(
1233 &req->r_safe_completion, req->r_timeout);
1234 if (ret > 0)
1235 ret = 0;
1236 else if (ret == 0)
1237 ret = -EIO; /* timed out */
1238 } else {
1239 wait_for_completion(&req->r_safe_completion);
1240 }
1241 ceph_mdsc_put_request(req);
1242
1243 spin_lock(&ci->i_unsafe_lock);
1244 if (ret || list_empty(head))
1245 break;
1246 req = list_entry(head->next,
1247 struct ceph_mds_request, r_unsafe_dir_item);
1248 } while (req->r_tid < last_tid);
1249 out:
1250 spin_unlock(&ci->i_unsafe_lock);
1251 mutex_unlock(&inode->i_mutex);
1252
1253 return ret;
1254 }
1255
1256 /*
1257 * We maintain a private dentry LRU.
1258 *
1259 * FIXME: this needs to be changed to a per-mds lru to be useful.
1260 */
1261 void ceph_dentry_lru_add(struct dentry *dn)
1262 {
1263 struct ceph_dentry_info *di = ceph_dentry(dn);
1264 struct ceph_mds_client *mdsc;
1265
1266 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn);
1267 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1268 spin_lock(&mdsc->dentry_lru_lock);
1269 list_add_tail(&di->lru, &mdsc->dentry_lru);
1270 mdsc->num_dentry++;
1271 spin_unlock(&mdsc->dentry_lru_lock);
1272 }
1273
1274 void ceph_dentry_lru_touch(struct dentry *dn)
1275 {
1276 struct ceph_dentry_info *di = ceph_dentry(dn);
1277 struct ceph_mds_client *mdsc;
1278
1279 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn,
1280 di->offset);
1281 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1282 spin_lock(&mdsc->dentry_lru_lock);
1283 list_move_tail(&di->lru, &mdsc->dentry_lru);
1284 spin_unlock(&mdsc->dentry_lru_lock);
1285 }
1286
1287 void ceph_dentry_lru_del(struct dentry *dn)
1288 {
1289 struct ceph_dentry_info *di = ceph_dentry(dn);
1290 struct ceph_mds_client *mdsc;
1291
1292 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn);
1293 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1294 spin_lock(&mdsc->dentry_lru_lock);
1295 list_del_init(&di->lru);
1296 mdsc->num_dentry--;
1297 spin_unlock(&mdsc->dentry_lru_lock);
1298 }
1299
1300 /*
1301 * Return name hash for a given dentry. This is dependent on
1302 * the parent directory's hash function.
1303 */
1304 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1305 {
1306 struct ceph_inode_info *dci = ceph_inode(dir);
1307
1308 switch (dci->i_dir_layout.dl_dir_hash) {
1309 case 0: /* for backward compat */
1310 case CEPH_STR_HASH_LINUX:
1311 return dn->d_name.hash;
1312
1313 default:
1314 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1315 dn->d_name.name, dn->d_name.len);
1316 }
1317 }
1318
1319 const struct file_operations ceph_dir_fops = {
1320 .read = ceph_read_dir,
1321 .iterate = ceph_readdir,
1322 .llseek = ceph_dir_llseek,
1323 .open = ceph_open,
1324 .release = ceph_release,
1325 .unlocked_ioctl = ceph_ioctl,
1326 .fsync = ceph_dir_fsync,
1327 };
1328
1329 const struct inode_operations ceph_dir_iops = {
1330 .lookup = ceph_lookup,
1331 .permission = ceph_permission,
1332 .getattr = ceph_getattr,
1333 .setattr = ceph_setattr,
1334 .setxattr = ceph_setxattr,
1335 .getxattr = ceph_getxattr,
1336 .listxattr = ceph_listxattr,
1337 .removexattr = ceph_removexattr,
1338 .get_acl = ceph_get_acl,
1339 .set_acl = ceph_set_acl,
1340 .mknod = ceph_mknod,
1341 .symlink = ceph_symlink,
1342 .mkdir = ceph_mkdir,
1343 .link = ceph_link,
1344 .unlink = ceph_unlink,
1345 .rmdir = ceph_unlink,
1346 .rename = ceph_rename,
1347 .create = ceph_create,
1348 .atomic_open = ceph_atomic_open,
1349 };
1350
1351 const struct dentry_operations ceph_dentry_ops = {
1352 .d_revalidate = ceph_d_revalidate,
1353 .d_release = ceph_d_release,
1354 .d_prune = ceph_d_prune,
1355 };
1356
1357 const struct dentry_operations ceph_snapdir_dentry_ops = {
1358 .d_revalidate = ceph_snapdir_d_revalidate,
1359 .d_release = ceph_d_release,
1360 };
1361
1362 const struct dentry_operations ceph_snap_dentry_ops = {
1363 .d_release = ceph_d_release,
1364 .d_prune = ceph_d_prune,
1365 };