]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
drivers: fix up various ->llseek() implementations
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
2817b000
SW
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
3d14c5d2 10#include "mds_client.h"
2817b000
SW
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29const struct inode_operations ceph_dir_iops;
30const struct file_operations ceph_dir_fops;
52dfb8ac 31const struct dentry_operations ceph_dentry_ops;
2817b000
SW
32
33/*
34 * Initialize ceph dentry state.
35 */
36int ceph_init_dentry(struct dentry *dentry)
37{
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
92cf7652
SW
43 if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
44 ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
fb045adb 45 d_set_d_op(dentry, &ceph_dentry_ops);
2817b000 46 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
fb045adb 47 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
2817b000 48 else
fb045adb 49 d_set_d_op(dentry, &ceph_snap_dentry_ops);
2817b000 50
36e21687 51 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
2817b000
SW
52 if (!di)
53 return -ENOMEM; /* oh well */
54
55 spin_lock(&dentry->d_lock);
8c6efb58
SW
56 if (dentry->d_fsdata) {
57 /* lost a race */
58 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 59 goto out_unlock;
8c6efb58 60 }
2817b000
SW
61 di->dentry = dentry;
62 di->lease_session = NULL;
63 dentry->d_fsdata = di;
64 dentry->d_time = jiffies;
65 ceph_dentry_lru_add(dentry);
66out_unlock:
67 spin_unlock(&dentry->d_lock);
68 return 0;
69}
70
71
72
73/*
74 * for readdir, we encode the directory frag and offset within that
75 * frag into f_pos.
76 */
77static unsigned fpos_frag(loff_t p)
78{
79 return p >> 32;
80}
81static unsigned fpos_off(loff_t p)
82{
83 return p & 0xffffffff;
84}
85
86/*
87 * When possible, we try to satisfy a readdir by peeking at the
88 * dcache. We make this work by carefully ordering dentries on
89 * d_u.d_child when we initially get results back from the MDS, and
90 * falling back to a "normal" sync readdir if any dentries in the dir
91 * are dropped.
92 *
93 * I_COMPLETE tells indicates we have all dentries in the dir. It is
94 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
95 * the MDS if/when the directory is modified).
96 */
97static int __dcache_readdir(struct file *filp,
98 void *dirent, filldir_t filldir)
99{
2817b000
SW
100 struct ceph_file_info *fi = filp->private_data;
101 struct dentry *parent = filp->f_dentry;
102 struct inode *dir = parent->d_inode;
103 struct list_head *p;
104 struct dentry *dentry, *last;
105 struct ceph_dentry_info *di;
106 int err = 0;
107
108 /* claim ref on last dentry we returned */
109 last = fi->dentry;
110 fi->dentry = NULL;
111
112 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
113 last);
114
2fd6b7f5 115 spin_lock(&parent->d_lock);
2817b000
SW
116
117 /* start at beginning? */
884ea892
SW
118 if (filp->f_pos == 2 || last == NULL ||
119 filp->f_pos < ceph_dentry(last)->offset) {
2817b000
SW
120 if (list_empty(&parent->d_subdirs))
121 goto out_unlock;
122 p = parent->d_subdirs.prev;
123 dout(" initial p %p/%p\n", p->prev, p->next);
124 } else {
125 p = last->d_u.d_child.prev;
126 }
127
128more:
129 dentry = list_entry(p, struct dentry, d_u.d_child);
130 di = ceph_dentry(dentry);
131 while (1) {
1cd3935b
SW
132 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
133 d_unhashed(dentry) ? "!hashed" : "hashed",
2817b000
SW
134 parent->d_subdirs.prev, parent->d_subdirs.next);
135 if (p == &parent->d_subdirs) {
136 fi->at_end = 1;
137 goto out_unlock;
138 }
2fd6b7f5 139 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2817b000 140 if (!d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 141 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 142 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
2817b000
SW
143 filp->f_pos <= di->offset)
144 break;
145 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
146 dentry->d_name.len, dentry->d_name.name, di->offset,
147 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
148 !dentry->d_inode ? " null" : "");
da502956 149 spin_unlock(&dentry->d_lock);
2817b000
SW
150 p = p->prev;
151 dentry = list_entry(p, struct dentry, d_u.d_child);
152 di = ceph_dentry(dentry);
153 }
154
da502956 155 dget_dlock(dentry);
b7ab39f6 156 spin_unlock(&dentry->d_lock);
2fd6b7f5 157 spin_unlock(&parent->d_lock);
2817b000
SW
158
159 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
160 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
161 filp->f_pos = di->offset;
162 err = filldir(dirent, dentry->d_name.name,
163 dentry->d_name.len, di->offset,
ad1fee96 164 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
2817b000
SW
165 dentry->d_inode->i_mode >> 12);
166
167 if (last) {
168 if (err < 0) {
169 /* remember our position */
170 fi->dentry = last;
171 fi->next_offset = di->offset;
172 } else {
173 dput(last);
174 }
2817b000 175 }
f5b06628
SW
176 last = dentry;
177
2817b000 178 if (err < 0)
efa4c120 179 goto out;
2817b000 180
2817b000
SW
181 filp->f_pos++;
182
b5c84bf6 183 /* make sure a dentry wasn't dropped while we didn't have parent lock */
efa4c120
SW
184 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
185 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
186 err = -EAGAIN;
187 goto out;
188 }
189
2fd6b7f5 190 spin_lock(&parent->d_lock);
efa4c120
SW
191 p = p->prev; /* advance to next dentry */
192 goto more;
2817b000
SW
193
194out_unlock:
2fd6b7f5 195 spin_unlock(&parent->d_lock);
efa4c120
SW
196out:
197 if (last)
2817b000 198 dput(last);
2817b000
SW
199 return err;
200}
201
202/*
203 * make note of the last dentry we read, so we can
204 * continue at the same lexicographical point,
205 * regardless of what dir changes take place on the
206 * server.
207 */
208static int note_last_dentry(struct ceph_file_info *fi, const char *name,
209 int len)
210{
211 kfree(fi->last_name);
212 fi->last_name = kmalloc(len+1, GFP_NOFS);
213 if (!fi->last_name)
214 return -ENOMEM;
215 memcpy(fi->last_name, name, len);
216 fi->last_name[len] = 0;
217 dout("note_last_dentry '%s'\n", fi->last_name);
218 return 0;
219}
220
221static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
222{
223 struct ceph_file_info *fi = filp->private_data;
224 struct inode *inode = filp->f_dentry->d_inode;
225 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
226 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
227 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
228 unsigned frag = fpos_frag(filp->f_pos);
229 int off = fpos_off(filp->f_pos);
230 int err;
231 u32 ftype;
232 struct ceph_mds_reply_info_parsed *rinfo;
3d14c5d2
YS
233 const int max_entries = fsc->mount_options->max_readdir;
234 const int max_bytes = fsc->mount_options->max_readdir_bytes;
2817b000
SW
235
236 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
237 if (fi->at_end)
238 return 0;
239
240 /* always start with . and .. */
241 if (filp->f_pos == 0) {
242 /* note dir version at start of readdir so we can tell
243 * if any dentries get dropped */
244 fi->dir_release_count = ci->i_release_count;
245
246 dout("readdir off 0 -> '.'\n");
247 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
ad1fee96
YS
248 ceph_translate_ino(inode->i_sb, inode->i_ino),
249 inode->i_mode >> 12) < 0)
2817b000
SW
250 return 0;
251 filp->f_pos = 1;
252 off = 1;
253 }
254 if (filp->f_pos == 1) {
b85fd6bd 255 ino_t ino = parent_ino(filp->f_dentry);
2817b000
SW
256 dout("readdir off 1 -> '..'\n");
257 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
ad1fee96 258 ceph_translate_ino(inode->i_sb, ino),
2817b000
SW
259 inode->i_mode >> 12) < 0)
260 return 0;
261 filp->f_pos = 2;
262 off = 2;
263 }
264
265 /* can we use the dcache? */
266 spin_lock(&inode->i_lock);
267 if ((filp->f_pos == 2 || fi->dentry) &&
3d14c5d2 268 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
a0dff78d 269 ceph_snap(inode) != CEPH_SNAPDIR &&
2817b000
SW
270 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
271 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
efa4c120 272 spin_unlock(&inode->i_lock);
2817b000 273 err = __dcache_readdir(filp, dirent, filldir);
efa4c120 274 if (err != -EAGAIN)
2817b000 275 return err;
efa4c120
SW
276 } else {
277 spin_unlock(&inode->i_lock);
2817b000 278 }
2817b000
SW
279 if (fi->dentry) {
280 err = note_last_dentry(fi, fi->dentry->d_name.name,
281 fi->dentry->d_name.len);
282 if (err)
283 return err;
284 dput(fi->dentry);
285 fi->dentry = NULL;
286 }
287
288 /* proceed with a normal readdir */
289
290more:
291 /* do we have the correct frag content buffered? */
292 if (fi->frag != frag || fi->last_readdir == NULL) {
293 struct ceph_mds_request *req;
294 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
295 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
296
297 /* discard old result, if any */
393f6620 298 if (fi->last_readdir) {
2817b000 299 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
300 fi->last_readdir = NULL;
301 }
2817b000
SW
302
303 /* requery frag tree, as the frag topology may have changed */
304 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
305
306 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
307 ceph_vinop(inode), frag, fi->last_name);
308 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
309 if (IS_ERR(req))
310 return PTR_ERR(req);
70b666c3
SW
311 req->r_inode = inode;
312 ihold(inode);
2817b000
SW
313 req->r_dentry = dget(filp->f_dentry);
314 /* hints to request -> mds selection code */
315 req->r_direct_mode = USE_AUTH_MDS;
316 req->r_direct_hash = ceph_frag_value(frag);
317 req->r_direct_is_hash = true;
318 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
319 req->r_readdir_offset = fi->next_offset;
320 req->r_args.readdir.frag = cpu_to_le32(frag);
321 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
23804d91 322 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
e1e4dd0c 323 req->r_num_caps = max_entries + 1;
2817b000
SW
324 err = ceph_mdsc_do_request(mdsc, NULL, req);
325 if (err < 0) {
326 ceph_mdsc_put_request(req);
327 return err;
328 }
329 dout("readdir got and parsed readdir result=%d"
330 " on frag %x, end=%d, complete=%d\n", err, frag,
331 (int)req->r_reply_info.dir_end,
332 (int)req->r_reply_info.dir_complete);
333
334 if (!req->r_did_prepopulate) {
335 dout("readdir !did_prepopulate");
336 fi->dir_release_count--; /* preclude I_COMPLETE */
337 }
338
339 /* note next offset and last dentry name */
340 fi->offset = fi->next_offset;
341 fi->last_readdir = req;
342
343 if (req->r_reply_info.dir_end) {
344 kfree(fi->last_name);
345 fi->last_name = NULL;
7b88dadc
SW
346 if (ceph_frag_is_rightmost(frag))
347 fi->next_offset = 2;
348 else
349 fi->next_offset = 0;
2817b000
SW
350 } else {
351 rinfo = &req->r_reply_info;
352 err = note_last_dentry(fi,
353 rinfo->dir_dname[rinfo->dir_nr-1],
354 rinfo->dir_dname_len[rinfo->dir_nr-1]);
355 if (err)
356 return err;
357 fi->next_offset += rinfo->dir_nr;
358 }
359 }
360
361 rinfo = &fi->last_readdir->r_reply_info;
362 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
363 rinfo->dir_nr, off, fi->offset);
da39822c 364 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
2817b000
SW
365 u64 pos = ceph_make_fpos(frag, off);
366 struct ceph_mds_reply_inode *in =
367 rinfo->dir_in[off - fi->offset].in;
3105c19c
SW
368 struct ceph_vino vino;
369 ino_t ino;
370
2817b000
SW
371 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
372 off, off - fi->offset, rinfo->dir_nr, pos,
373 rinfo->dir_dname_len[off - fi->offset],
374 rinfo->dir_dname[off - fi->offset], in);
375 BUG_ON(!in);
376 ftype = le32_to_cpu(in->mode) >> 12;
3105c19c
SW
377 vino.ino = le64_to_cpu(in->ino);
378 vino.snap = le64_to_cpu(in->snapid);
379 ino = ceph_vino_to_ino(vino);
2817b000
SW
380 if (filldir(dirent,
381 rinfo->dir_dname[off - fi->offset],
382 rinfo->dir_dname_len[off - fi->offset],
ad1fee96
YS
383 pos,
384 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
2817b000
SW
385 dout("filldir stopping us...\n");
386 return 0;
387 }
388 off++;
389 filp->f_pos = pos + 1;
390 }
391
392 if (fi->last_name) {
393 ceph_mdsc_put_request(fi->last_readdir);
394 fi->last_readdir = NULL;
395 goto more;
396 }
397
398 /* more frags? */
399 if (!ceph_frag_is_rightmost(frag)) {
400 frag = ceph_frag_next(frag);
401 off = 0;
402 filp->f_pos = ceph_make_fpos(frag, off);
403 dout("readdir next frag is %x\n", frag);
404 goto more;
405 }
406 fi->at_end = 1;
407
408 /*
409 * if dir_release_count still matches the dir, no dentries
410 * were released during the whole readdir, and we should have
411 * the complete dir contents in our cache.
412 */
413 spin_lock(&inode->i_lock);
414 if (ci->i_release_count == fi->dir_release_count) {
415 dout(" marking %p complete\n", inode);
b545cc15 416 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
2817b000
SW
417 ci->i_max_offset = filp->f_pos;
418 }
419 spin_unlock(&inode->i_lock);
420
421 dout("readdir %p filp %p done.\n", inode, filp);
422 return 0;
423}
424
425static void reset_readdir(struct ceph_file_info *fi)
426{
427 if (fi->last_readdir) {
428 ceph_mdsc_put_request(fi->last_readdir);
429 fi->last_readdir = NULL;
430 }
431 kfree(fi->last_name);
a1629c3b 432 fi->last_name = NULL;
2817b000
SW
433 fi->next_offset = 2; /* compensate for . and .. */
434 if (fi->dentry) {
435 dput(fi->dentry);
436 fi->dentry = NULL;
437 }
438 fi->at_end = 0;
439}
440
441static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
442{
443 struct ceph_file_info *fi = file->private_data;
444 struct inode *inode = file->f_mapping->host;
445 loff_t old_offset = offset;
446 loff_t retval;
447
448 mutex_lock(&inode->i_mutex);
06222e49 449 retval = -EINVAL;
2817b000
SW
450 switch (origin) {
451 case SEEK_END:
452 offset += inode->i_size + 2; /* FIXME */
453 break;
454 case SEEK_CUR:
455 offset += file->f_pos;
06222e49
JB
456 case SEEK_SET:
457 break;
458 default:
459 goto out;
2817b000 460 }
06222e49 461
2817b000
SW
462 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
463 if (offset != file->f_pos) {
464 file->f_pos = offset;
465 file->f_version = 0;
466 fi->at_end = 0;
467 }
468 retval = offset;
469
470 /*
471 * discard buffered readdir content on seekdir(0), or
472 * seek to new frag, or seek prior to current chunk.
473 */
474 if (offset == 0 ||
475 fpos_frag(offset) != fpos_frag(old_offset) ||
476 fpos_off(offset) < fi->offset) {
477 dout("dir_llseek dropping %p content\n", file);
478 reset_readdir(fi);
479 }
480
481 /* bump dir_release_count if we did a forward seek */
482 if (offset > old_offset)
483 fi->dir_release_count--;
484 }
06222e49 485out:
2817b000
SW
486 mutex_unlock(&inode->i_mutex);
487 return retval;
488}
489
490/*
491 * Process result of a lookup/open request.
492 *
493 * Mainly, make sure we return the final req->r_dentry (if it already
494 * existed) in place of the original VFS-provided dentry when they
495 * differ.
496 *
497 * Gracefully handle the case where the MDS replies with -ENOENT and
498 * no trace (which it may do, at its discretion, e.g., if it doesn't
499 * care to issue a lease on the negative dentry).
500 */
501struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
502 struct dentry *dentry, int err)
503{
3d14c5d2 504 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
2817b000
SW
505 struct inode *parent = dentry->d_parent->d_inode;
506
507 /* .snap dir? */
508 if (err == -ENOENT &&
455cec0a 509 ceph_snap(parent) == CEPH_NOSNAP &&
6b805185 510 strcmp(dentry->d_name.name,
3d14c5d2 511 fsc->mount_options->snapdir_name) == 0) {
2817b000
SW
512 struct inode *inode = ceph_get_snapdir(parent);
513 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
514 dentry, dentry->d_name.len, dentry->d_name.name, inode);
9358c6d4 515 BUG_ON(!d_unhashed(dentry));
2817b000
SW
516 d_add(dentry, inode);
517 err = 0;
518 }
519
520 if (err == -ENOENT) {
521 /* no trace? */
522 err = 0;
523 if (!req->r_reply_info.head->is_dentry) {
524 dout("ENOENT and no trace, dentry %p inode %p\n",
525 dentry, dentry->d_inode);
526 if (dentry->d_inode) {
527 d_drop(dentry);
528 err = -ENOENT;
529 } else {
530 d_add(dentry, NULL);
531 }
532 }
533 }
534 if (err)
535 dentry = ERR_PTR(err);
536 else if (dentry != req->r_dentry)
537 dentry = dget(req->r_dentry); /* we got spliced */
538 else
539 dentry = NULL;
540 return dentry;
541}
542
1d1de916
SW
543static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
544{
545 return ceph_ino(inode) == CEPH_INO_ROOT &&
546 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
547}
548
2817b000
SW
549/*
550 * Look up a single dir entry. If there is a lookup intent, inform
551 * the MDS so that it gets our 'caps wanted' value in a single op.
552 */
553static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
554 struct nameidata *nd)
555{
3d14c5d2
YS
556 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
557 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
558 struct ceph_mds_request *req;
559 int op;
560 int err;
561
562 dout("lookup %p dentry %p '%.*s'\n",
563 dir, dentry, dentry->d_name.len, dentry->d_name.name);
564
565 if (dentry->d_name.len > NAME_MAX)
566 return ERR_PTR(-ENAMETOOLONG);
567
568 err = ceph_init_dentry(dentry);
569 if (err < 0)
570 return ERR_PTR(err);
571
572 /* open (but not create!) intent? */
573 if (nd &&
574 (nd->flags & LOOKUP_OPEN) &&
2817b000
SW
575 !(nd->intent.open.flags & O_CREAT)) {
576 int mode = nd->intent.open.create_mode & ~current->fs->umask;
577 return ceph_lookup_open(dir, dentry, nd, mode, 1);
578 }
579
580 /* can we conclude ENOENT locally? */
581 if (dentry->d_inode == NULL) {
582 struct ceph_inode_info *ci = ceph_inode(dir);
583 struct ceph_dentry_info *di = ceph_dentry(dentry);
584
585 spin_lock(&dir->i_lock);
586 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
587 if (strncmp(dentry->d_name.name,
3d14c5d2 588 fsc->mount_options->snapdir_name,
2817b000 589 dentry->d_name.len) &&
1d1de916 590 !is_root_ceph_dentry(dir, dentry) &&
2817b000
SW
591 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
592 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
2817b000
SW
593 spin_unlock(&dir->i_lock);
594 dout(" dir %p complete, -ENOENT\n", dir);
595 d_add(dentry, NULL);
596 di->lease_shared_gen = ci->i_shared_gen;
597 return NULL;
598 }
599 spin_unlock(&dir->i_lock);
600 }
601
602 op = ceph_snap(dir) == CEPH_SNAPDIR ?
603 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
604 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
605 if (IS_ERR(req))
7e34bc52 606 return ERR_CAST(req);
2817b000
SW
607 req->r_dentry = dget(dentry);
608 req->r_num_caps = 2;
609 /* we only need inode linkage */
610 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
611 req->r_locked_dir = dir;
612 err = ceph_mdsc_do_request(mdsc, NULL, req);
613 dentry = ceph_finish_lookup(req, dentry, err);
614 ceph_mdsc_put_request(req); /* will dput(dentry) */
615 dout("lookup result=%p\n", dentry);
616 return dentry;
617}
618
619/*
620 * If we do a create but get no trace back from the MDS, follow up with
621 * a lookup (the VFS expects us to link up the provided dentry).
622 */
623int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
624{
625 struct dentry *result = ceph_lookup(dir, dentry, NULL);
626
627 if (result && !IS_ERR(result)) {
628 /*
629 * We created the item, then did a lookup, and found
630 * it was already linked to another inode we already
631 * had in our cache (and thus got spliced). Link our
632 * dentry to that inode, but don't hash it, just in
633 * case the VFS wants to dereference it.
634 */
635 BUG_ON(!result->d_inode);
636 d_instantiate(dentry, result->d_inode);
637 return 0;
638 }
639 return PTR_ERR(result);
640}
641
642static int ceph_mknod(struct inode *dir, struct dentry *dentry,
643 int mode, dev_t rdev)
644{
3d14c5d2
YS
645 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
646 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
647 struct ceph_mds_request *req;
648 int err;
649
650 if (ceph_snap(dir) != CEPH_NOSNAP)
651 return -EROFS;
652
653 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
654 dir, dentry, mode, rdev);
655 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
656 if (IS_ERR(req)) {
657 d_drop(dentry);
658 return PTR_ERR(req);
659 }
660 req->r_dentry = dget(dentry);
661 req->r_num_caps = 2;
662 req->r_locked_dir = dir;
663 req->r_args.mknod.mode = cpu_to_le32(mode);
664 req->r_args.mknod.rdev = cpu_to_le32(rdev);
665 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
666 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
667 err = ceph_mdsc_do_request(mdsc, dir, req);
668 if (!err && !req->r_reply_info.head->is_dentry)
669 err = ceph_handle_notrace_create(dir, dentry);
670 ceph_mdsc_put_request(req);
671 if (err)
672 d_drop(dentry);
673 return err;
674}
675
676static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
677 struct nameidata *nd)
678{
679 dout("create in dir %p dentry %p name '%.*s'\n",
680 dir, dentry, dentry->d_name.len, dentry->d_name.name);
681
682 if (ceph_snap(dir) != CEPH_NOSNAP)
683 return -EROFS;
684
685 if (nd) {
686 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
687 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
688 /* hrm, what should i do here if we get aliased? */
689 if (IS_ERR(dentry))
690 return PTR_ERR(dentry);
691 return 0;
692 }
693
694 /* fall back to mknod */
695 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
696}
697
698static int ceph_symlink(struct inode *dir, struct dentry *dentry,
699 const char *dest)
700{
3d14c5d2
YS
701 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
702 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
703 struct ceph_mds_request *req;
704 int err;
705
706 if (ceph_snap(dir) != CEPH_NOSNAP)
707 return -EROFS;
708
709 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
710 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
711 if (IS_ERR(req)) {
712 d_drop(dentry);
713 return PTR_ERR(req);
714 }
715 req->r_dentry = dget(dentry);
716 req->r_num_caps = 2;
717 req->r_path2 = kstrdup(dest, GFP_NOFS);
718 req->r_locked_dir = dir;
719 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
720 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
721 err = ceph_mdsc_do_request(mdsc, dir, req);
722 if (!err && !req->r_reply_info.head->is_dentry)
723 err = ceph_handle_notrace_create(dir, dentry);
724 ceph_mdsc_put_request(req);
725 if (err)
726 d_drop(dentry);
727 return err;
728}
729
730static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
731{
3d14c5d2
YS
732 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
733 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
734 struct ceph_mds_request *req;
735 int err = -EROFS;
736 int op;
737
738 if (ceph_snap(dir) == CEPH_SNAPDIR) {
739 /* mkdir .snap/foo is a MKSNAP */
740 op = CEPH_MDS_OP_MKSNAP;
741 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
742 dentry->d_name.len, dentry->d_name.name, dentry);
743 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
744 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
745 op = CEPH_MDS_OP_MKDIR;
746 } else {
747 goto out;
748 }
749 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
750 if (IS_ERR(req)) {
751 err = PTR_ERR(req);
752 goto out;
753 }
754
755 req->r_dentry = dget(dentry);
756 req->r_num_caps = 2;
757 req->r_locked_dir = dir;
758 req->r_args.mkdir.mode = cpu_to_le32(mode);
759 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
760 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
761 err = ceph_mdsc_do_request(mdsc, dir, req);
762 if (!err && !req->r_reply_info.head->is_dentry)
763 err = ceph_handle_notrace_create(dir, dentry);
764 ceph_mdsc_put_request(req);
765out:
766 if (err < 0)
767 d_drop(dentry);
768 return err;
769}
770
771static int ceph_link(struct dentry *old_dentry, struct inode *dir,
772 struct dentry *dentry)
773{
3d14c5d2
YS
774 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
775 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
776 struct ceph_mds_request *req;
777 int err;
778
779 if (ceph_snap(dir) != CEPH_NOSNAP)
780 return -EROFS;
781
782 dout("link in dir %p old_dentry %p dentry %p\n", dir,
783 old_dentry, dentry);
784 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
785 if (IS_ERR(req)) {
786 d_drop(dentry);
787 return PTR_ERR(req);
788 }
789 req->r_dentry = dget(dentry);
790 req->r_num_caps = 2;
791 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
792 req->r_locked_dir = dir;
793 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
794 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
795 err = ceph_mdsc_do_request(mdsc, dir, req);
70b666c3 796 if (err) {
2817b000 797 d_drop(dentry);
70b666c3
SW
798 } else if (!req->r_reply_info.head->is_dentry) {
799 ihold(old_dentry->d_inode);
800 d_instantiate(dentry, old_dentry->d_inode);
801 }
2817b000
SW
802 ceph_mdsc_put_request(req);
803 return err;
804}
805
806/*
807 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
808 * looks like the link count will hit 0, drop any other caps (other
809 * than PIN) we don't specifically want (due to the file still being
810 * open).
811 */
812static int drop_caps_for_unlink(struct inode *inode)
813{
814 struct ceph_inode_info *ci = ceph_inode(inode);
815 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
816
817 spin_lock(&inode->i_lock);
818 if (inode->i_nlink == 1) {
819 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
820 ci->i_ceph_flags |= CEPH_I_NODELAY;
821 }
822 spin_unlock(&inode->i_lock);
823 return drop;
824}
825
826/*
827 * rmdir and unlink are differ only by the metadata op code
828 */
829static int ceph_unlink(struct inode *dir, struct dentry *dentry)
830{
3d14c5d2
YS
831 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
832 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
833 struct inode *inode = dentry->d_inode;
834 struct ceph_mds_request *req;
835 int err = -EROFS;
836 int op;
837
838 if (ceph_snap(dir) == CEPH_SNAPDIR) {
839 /* rmdir .snap/foo is RMSNAP */
840 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
841 dentry->d_name.name, dentry);
842 op = CEPH_MDS_OP_RMSNAP;
843 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
844 dout("unlink/rmdir dir %p dn %p inode %p\n",
845 dir, dentry, inode);
846 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
847 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
848 } else
849 goto out;
850 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
851 if (IS_ERR(req)) {
852 err = PTR_ERR(req);
853 goto out;
854 }
855 req->r_dentry = dget(dentry);
856 req->r_num_caps = 2;
857 req->r_locked_dir = dir;
858 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
859 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
860 req->r_inode_drop = drop_caps_for_unlink(inode);
861 err = ceph_mdsc_do_request(mdsc, dir, req);
862 if (!err && !req->r_reply_info.head->is_dentry)
863 d_delete(dentry);
864 ceph_mdsc_put_request(req);
865out:
866 return err;
867}
868
869static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
870 struct inode *new_dir, struct dentry *new_dentry)
871{
3d14c5d2
YS
872 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
873 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
874 struct ceph_mds_request *req;
875 int err;
876
877 if (ceph_snap(old_dir) != ceph_snap(new_dir))
878 return -EXDEV;
879 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
880 ceph_snap(new_dir) != CEPH_NOSNAP)
881 return -EROFS;
882 dout("rename dir %p dentry %p to dir %p dentry %p\n",
883 old_dir, old_dentry, new_dir, new_dentry);
884 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
885 if (IS_ERR(req))
886 return PTR_ERR(req);
887 req->r_dentry = dget(new_dentry);
888 req->r_num_caps = 2;
889 req->r_old_dentry = dget(old_dentry);
890 req->r_locked_dir = new_dir;
891 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
892 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
893 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
894 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
895 /* release LINK_RDCACHE on source inode (mds will lock it) */
896 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
897 if (new_dentry->d_inode)
898 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
899 err = ceph_mdsc_do_request(mdsc, old_dir, req);
900 if (!err && !req->r_reply_info.head->is_dentry) {
901 /*
902 * Normally d_move() is done by fill_trace (called by
903 * do_request, above). If there is no trace, we need
904 * to do it here.
905 */
ea1409f9
SW
906
907 /* d_move screws up d_subdirs order */
908 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
909
2817b000 910 d_move(old_dentry, new_dentry);
ea1409f9
SW
911
912 /* ensure target dentry is invalidated, despite
913 rehashing bug in vfs_rename_dir */
81a6cf2d 914 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
915 }
916 ceph_mdsc_put_request(req);
917 return err;
918}
919
81a6cf2d
SW
920/*
921 * Ensure a dentry lease will no longer revalidate.
922 */
923void ceph_invalidate_dentry_lease(struct dentry *dentry)
924{
925 spin_lock(&dentry->d_lock);
926 dentry->d_time = jiffies;
927 ceph_dentry(dentry)->lease_shared_gen = 0;
928 spin_unlock(&dentry->d_lock);
929}
2817b000
SW
930
931/*
932 * Check if dentry lease is valid. If not, delete the lease. Try to
933 * renew if the least is more than half up.
934 */
935static int dentry_lease_is_valid(struct dentry *dentry)
936{
937 struct ceph_dentry_info *di;
938 struct ceph_mds_session *s;
939 int valid = 0;
940 u32 gen;
941 unsigned long ttl;
942 struct ceph_mds_session *session = NULL;
943 struct inode *dir = NULL;
944 u32 seq = 0;
945
946 spin_lock(&dentry->d_lock);
947 di = ceph_dentry(dentry);
948 if (di && di->lease_session) {
949 s = di->lease_session;
950 spin_lock(&s->s_cap_lock);
951 gen = s->s_cap_gen;
952 ttl = s->s_cap_ttl;
953 spin_unlock(&s->s_cap_lock);
954
955 if (di->lease_gen == gen &&
956 time_before(jiffies, dentry->d_time) &&
957 time_before(jiffies, ttl)) {
958 valid = 1;
959 if (di->lease_renew_after &&
960 time_after(jiffies, di->lease_renew_after)) {
961 /* we should renew */
962 dir = dentry->d_parent->d_inode;
963 session = ceph_get_mds_session(s);
964 seq = di->lease_seq;
965 di->lease_renew_after = 0;
966 di->lease_renew_from = jiffies;
967 }
2817b000
SW
968 }
969 }
970 spin_unlock(&dentry->d_lock);
971
972 if (session) {
973 ceph_mdsc_lease_send_msg(session, dir, dentry,
974 CEPH_MDS_LEASE_RENEW, seq);
975 ceph_put_mds_session(session);
976 }
977 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
978 return valid;
979}
980
981/*
982 * Check if directory-wide content lease/cap is valid.
983 */
984static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
985{
986 struct ceph_inode_info *ci = ceph_inode(dir);
987 struct ceph_dentry_info *di = ceph_dentry(dentry);
988 int valid = 0;
989
990 spin_lock(&dir->i_lock);
991 if (ci->i_shared_gen == di->lease_shared_gen)
992 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
993 spin_unlock(&dir->i_lock);
994 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
995 dir, (unsigned)ci->i_shared_gen, dentry,
996 (unsigned)di->lease_shared_gen, valid);
997 return valid;
998}
999
1000/*
1001 * Check if cached dentry can be trusted.
1002 */
1003static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
1004{
34286d66
NP
1005 struct inode *dir;
1006
0eb980e3 1007 if (nd && nd->flags & LOOKUP_RCU)
34286d66
NP
1008 return -ECHILD;
1009
1010 dir = dentry->d_parent->d_inode;
2817b000 1011
1cd3935b
SW
1012 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1013 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1014 ceph_dentry(dentry)->offset);
2817b000
SW
1015
1016 /* always trust cached snapped dentries, snapdir dentry */
1017 if (ceph_snap(dir) != CEPH_NOSNAP) {
1018 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1019 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
1020 goto out_touch;
1021 }
1022 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
1023 goto out_touch;
1024
1025 if (dentry_lease_is_valid(dentry) ||
1026 dir_lease_is_valid(dir, dentry))
1027 goto out_touch;
1028
1029 dout("d_revalidate %p invalid\n", dentry);
1030 d_drop(dentry);
1031 return 0;
1032out_touch:
1033 ceph_dentry_lru_touch(dentry);
1034 return 1;
1035}
1036
1037/*
147851d2 1038 * Release our ceph_dentry_info.
2817b000 1039 */
147851d2 1040static void ceph_d_release(struct dentry *dentry)
2817b000
SW
1041{
1042 struct ceph_dentry_info *di = ceph_dentry(dentry);
2817b000 1043
147851d2 1044 dout("d_release %p\n", dentry);
2817b000
SW
1045 if (di) {
1046 ceph_dentry_lru_del(dentry);
1047 if (di->lease_session)
1048 ceph_put_mds_session(di->lease_session);
1049 kmem_cache_free(ceph_dentry_cachep, di);
1050 dentry->d_fsdata = NULL;
1051 }
1052}
1053
1054static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1055 struct nameidata *nd)
1056{
1057 /*
1058 * Eventually, we'll want to revalidate snapped metadata
1059 * too... probably...
1060 */
1061 return 1;
1062}
1063
1064
1065
1066/*
1067 * read() on a dir. This weird interface hack only works if mounted
1068 * with '-o dirstat'.
1069 */
1070static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1071 loff_t *ppos)
1072{
1073 struct ceph_file_info *cf = file->private_data;
1074 struct inode *inode = file->f_dentry->d_inode;
1075 struct ceph_inode_info *ci = ceph_inode(inode);
1076 int left;
ae598083 1077 const int bufsize = 1024;
2817b000 1078
3d14c5d2 1079 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1080 return -EISDIR;
1081
1082 if (!cf->dir_info) {
ae598083 1083 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
2817b000
SW
1084 if (!cf->dir_info)
1085 return -ENOMEM;
1086 cf->dir_info_len =
ae598083 1087 snprintf(cf->dir_info, bufsize,
2817b000
SW
1088 "entries: %20lld\n"
1089 " files: %20lld\n"
1090 " subdirs: %20lld\n"
1091 "rentries: %20lld\n"
1092 " rfiles: %20lld\n"
1093 " rsubdirs: %20lld\n"
1094 "rbytes: %20lld\n"
1095 "rctime: %10ld.%09ld\n",
1096 ci->i_files + ci->i_subdirs,
1097 ci->i_files,
1098 ci->i_subdirs,
1099 ci->i_rfiles + ci->i_rsubdirs,
1100 ci->i_rfiles,
1101 ci->i_rsubdirs,
1102 ci->i_rbytes,
1103 (long)ci->i_rctime.tv_sec,
1104 (long)ci->i_rctime.tv_nsec);
1105 }
1106
1107 if (*ppos >= cf->dir_info_len)
1108 return 0;
1109 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1110 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1111 if (left == size)
1112 return -EFAULT;
1113 *ppos += (size - left);
1114 return size - left;
1115}
1116
1117/*
1118 * an fsync() on a dir will wait for any uncommitted directory
1119 * operations to commit.
1120 */
7ea80859 1121static int ceph_dir_fsync(struct file *file, int datasync)
2817b000 1122{
7ea80859 1123 struct inode *inode = file->f_path.dentry->d_inode;
2817b000
SW
1124 struct ceph_inode_info *ci = ceph_inode(inode);
1125 struct list_head *head = &ci->i_unsafe_dirops;
1126 struct ceph_mds_request *req;
1127 u64 last_tid;
1128 int ret = 0;
1129
1130 dout("dir_fsync %p\n", inode);
1131 spin_lock(&ci->i_unsafe_lock);
1132 if (list_empty(head))
1133 goto out;
1134
1135 req = list_entry(head->prev,
1136 struct ceph_mds_request, r_unsafe_dir_item);
1137 last_tid = req->r_tid;
1138
1139 do {
1140 ceph_mdsc_get_request(req);
1141 spin_unlock(&ci->i_unsafe_lock);
1142 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1143 inode, req->r_tid, last_tid);
1144 if (req->r_timeout) {
1145 ret = wait_for_completion_timeout(
1146 &req->r_safe_completion, req->r_timeout);
1147 if (ret > 0)
1148 ret = 0;
1149 else if (ret == 0)
1150 ret = -EIO; /* timed out */
1151 } else {
1152 wait_for_completion(&req->r_safe_completion);
1153 }
1154 spin_lock(&ci->i_unsafe_lock);
1155 ceph_mdsc_put_request(req);
1156
1157 if (ret || list_empty(head))
1158 break;
1159 req = list_entry(head->next,
1160 struct ceph_mds_request, r_unsafe_dir_item);
1161 } while (req->r_tid < last_tid);
1162out:
1163 spin_unlock(&ci->i_unsafe_lock);
1164 return ret;
1165}
1166
1167/*
1168 * We maintain a private dentry LRU.
1169 *
1170 * FIXME: this needs to be changed to a per-mds lru to be useful.
1171 */
1172void ceph_dentry_lru_add(struct dentry *dn)
1173{
1174 struct ceph_dentry_info *di = ceph_dentry(dn);
1175 struct ceph_mds_client *mdsc;
2817b000 1176
04a419f9
SW
1177 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1178 dn->d_name.len, dn->d_name.name);
2817b000 1179 if (di) {
3d14c5d2 1180 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1181 spin_lock(&mdsc->dentry_lru_lock);
1182 list_add_tail(&di->lru, &mdsc->dentry_lru);
1183 mdsc->num_dentry++;
1184 spin_unlock(&mdsc->dentry_lru_lock);
1185 }
1186}
1187
1188void ceph_dentry_lru_touch(struct dentry *dn)
1189{
1190 struct ceph_dentry_info *di = ceph_dentry(dn);
1191 struct ceph_mds_client *mdsc;
2817b000 1192
1cd3935b
SW
1193 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1194 dn->d_name.len, dn->d_name.name, di->offset);
2817b000 1195 if (di) {
3d14c5d2 1196 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1197 spin_lock(&mdsc->dentry_lru_lock);
1198 list_move_tail(&di->lru, &mdsc->dentry_lru);
1199 spin_unlock(&mdsc->dentry_lru_lock);
1200 }
1201}
1202
1203void ceph_dentry_lru_del(struct dentry *dn)
1204{
1205 struct ceph_dentry_info *di = ceph_dentry(dn);
1206 struct ceph_mds_client *mdsc;
1207
04a419f9
SW
1208 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1209 dn->d_name.len, dn->d_name.name);
2817b000 1210 if (di) {
3d14c5d2 1211 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1212 spin_lock(&mdsc->dentry_lru_lock);
1213 list_del_init(&di->lru);
1214 mdsc->num_dentry--;
1215 spin_unlock(&mdsc->dentry_lru_lock);
1216 }
1217}
1218
6c0f3af7
SW
1219/*
1220 * Return name hash for a given dentry. This is dependent on
1221 * the parent directory's hash function.
1222 */
1223unsigned ceph_dentry_hash(struct dentry *dn)
1224{
1225 struct inode *dir = dn->d_parent->d_inode;
1226 struct ceph_inode_info *dci = ceph_inode(dir);
1227
1228 switch (dci->i_dir_layout.dl_dir_hash) {
1229 case 0: /* for backward compat */
1230 case CEPH_STR_HASH_LINUX:
1231 return dn->d_name.hash;
1232
1233 default:
1234 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1235 dn->d_name.name, dn->d_name.len);
1236 }
1237}
1238
2817b000
SW
1239const struct file_operations ceph_dir_fops = {
1240 .read = ceph_read_dir,
1241 .readdir = ceph_readdir,
1242 .llseek = ceph_dir_llseek,
1243 .open = ceph_open,
1244 .release = ceph_release,
1245 .unlocked_ioctl = ceph_ioctl,
1246 .fsync = ceph_dir_fsync,
1247};
1248
1249const struct inode_operations ceph_dir_iops = {
1250 .lookup = ceph_lookup,
1251 .permission = ceph_permission,
1252 .getattr = ceph_getattr,
1253 .setattr = ceph_setattr,
1254 .setxattr = ceph_setxattr,
1255 .getxattr = ceph_getxattr,
1256 .listxattr = ceph_listxattr,
1257 .removexattr = ceph_removexattr,
1258 .mknod = ceph_mknod,
1259 .symlink = ceph_symlink,
1260 .mkdir = ceph_mkdir,
1261 .link = ceph_link,
1262 .unlink = ceph_unlink,
1263 .rmdir = ceph_unlink,
1264 .rename = ceph_rename,
1265 .create = ceph_create,
1266};
1267
52dfb8ac 1268const struct dentry_operations ceph_dentry_ops = {
2817b000 1269 .d_revalidate = ceph_d_revalidate,
147851d2 1270 .d_release = ceph_d_release,
2817b000
SW
1271};
1272
52dfb8ac 1273const struct dentry_operations ceph_snapdir_dentry_ops = {
2817b000 1274 .d_revalidate = ceph_snapdir_d_revalidate,
147851d2 1275 .d_release = ceph_d_release,
2817b000
SW
1276};
1277
52dfb8ac 1278const struct dentry_operations ceph_snap_dentry_ops = {
147851d2 1279 .d_release = ceph_d_release,
2817b000 1280};