]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
2817b000
SW
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
3d14c5d2 10#include "mds_client.h"
2817b000
SW
11
12/*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17/*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29const struct inode_operations ceph_dir_iops;
30const struct file_operations ceph_dir_fops;
52dfb8ac 31const struct dentry_operations ceph_dentry_ops;
2817b000
SW
32
33/*
34 * Initialize ceph dentry state.
35 */
36int ceph_init_dentry(struct dentry *dentry)
37{
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
36e21687 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
2817b000
SW
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
8c6efb58
SW
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 51 goto out_unlock;
8c6efb58 52 }
48d0cbd1
SW
53
54 if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
55 ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
56 d_set_d_op(dentry, &ceph_dentry_ops);
57 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
58 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
59 else
60 d_set_d_op(dentry, &ceph_snap_dentry_ops);
61
2817b000
SW
62 di->dentry = dentry;
63 di->lease_session = NULL;
2817b000 64 dentry->d_time = jiffies;
48d0cbd1
SW
65 /* avoid reordering d_fsdata setup so that the check above is safe */
66 smp_mb();
67 dentry->d_fsdata = di;
2817b000
SW
68 ceph_dentry_lru_add(dentry);
69out_unlock:
70 spin_unlock(&dentry->d_lock);
71 return 0;
72}
73
5f21c96d
SW
74struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
75{
76 struct inode *inode = NULL;
77
78 if (!dentry)
79 return NULL;
80
81 spin_lock(&dentry->d_lock);
82 if (dentry->d_parent) {
83 inode = dentry->d_parent->d_inode;
84 ihold(inode);
85 }
86 spin_unlock(&dentry->d_lock);
87 return inode;
88}
2817b000
SW
89
90
91/*
92 * for readdir, we encode the directory frag and offset within that
93 * frag into f_pos.
94 */
95static unsigned fpos_frag(loff_t p)
96{
97 return p >> 32;
98}
99static unsigned fpos_off(loff_t p)
100{
101 return p & 0xffffffff;
102}
103
104/*
105 * When possible, we try to satisfy a readdir by peeking at the
106 * dcache. We make this work by carefully ordering dentries on
107 * d_u.d_child when we initially get results back from the MDS, and
108 * falling back to a "normal" sync readdir if any dentries in the dir
109 * are dropped.
110 *
111 * I_COMPLETE tells indicates we have all dentries in the dir. It is
112 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
113 * the MDS if/when the directory is modified).
114 */
115static int __dcache_readdir(struct file *filp,
116 void *dirent, filldir_t filldir)
117{
2817b000
SW
118 struct ceph_file_info *fi = filp->private_data;
119 struct dentry *parent = filp->f_dentry;
120 struct inode *dir = parent->d_inode;
121 struct list_head *p;
122 struct dentry *dentry, *last;
123 struct ceph_dentry_info *di;
124 int err = 0;
125
126 /* claim ref on last dentry we returned */
127 last = fi->dentry;
128 fi->dentry = NULL;
129
130 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
131 last);
132
2fd6b7f5 133 spin_lock(&parent->d_lock);
2817b000
SW
134
135 /* start at beginning? */
884ea892
SW
136 if (filp->f_pos == 2 || last == NULL ||
137 filp->f_pos < ceph_dentry(last)->offset) {
2817b000
SW
138 if (list_empty(&parent->d_subdirs))
139 goto out_unlock;
140 p = parent->d_subdirs.prev;
141 dout(" initial p %p/%p\n", p->prev, p->next);
142 } else {
143 p = last->d_u.d_child.prev;
144 }
145
146more:
147 dentry = list_entry(p, struct dentry, d_u.d_child);
148 di = ceph_dentry(dentry);
149 while (1) {
1cd3935b
SW
150 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
151 d_unhashed(dentry) ? "!hashed" : "hashed",
2817b000
SW
152 parent->d_subdirs.prev, parent->d_subdirs.next);
153 if (p == &parent->d_subdirs) {
9cfa1098 154 fi->flags |= CEPH_F_ATEND;
2817b000
SW
155 goto out_unlock;
156 }
2fd6b7f5 157 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2817b000 158 if (!d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 159 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 160 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
2817b000
SW
161 filp->f_pos <= di->offset)
162 break;
163 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
164 dentry->d_name.len, dentry->d_name.name, di->offset,
165 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
166 !dentry->d_inode ? " null" : "");
da502956 167 spin_unlock(&dentry->d_lock);
2817b000
SW
168 p = p->prev;
169 dentry = list_entry(p, struct dentry, d_u.d_child);
170 di = ceph_dentry(dentry);
171 }
172
da502956 173 dget_dlock(dentry);
b7ab39f6 174 spin_unlock(&dentry->d_lock);
2fd6b7f5 175 spin_unlock(&parent->d_lock);
2817b000
SW
176
177 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
178 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
179 filp->f_pos = di->offset;
180 err = filldir(dirent, dentry->d_name.name,
181 dentry->d_name.len, di->offset,
ad1fee96 182 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
2817b000
SW
183 dentry->d_inode->i_mode >> 12);
184
185 if (last) {
186 if (err < 0) {
187 /* remember our position */
188 fi->dentry = last;
189 fi->next_offset = di->offset;
190 } else {
191 dput(last);
192 }
2817b000 193 }
f5b06628
SW
194 last = dentry;
195
2817b000 196 if (err < 0)
efa4c120 197 goto out;
2817b000 198
2817b000
SW
199 filp->f_pos++;
200
b5c84bf6 201 /* make sure a dentry wasn't dropped while we didn't have parent lock */
efa4c120
SW
202 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
203 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
204 err = -EAGAIN;
205 goto out;
206 }
207
2fd6b7f5 208 spin_lock(&parent->d_lock);
efa4c120
SW
209 p = p->prev; /* advance to next dentry */
210 goto more;
2817b000
SW
211
212out_unlock:
2fd6b7f5 213 spin_unlock(&parent->d_lock);
efa4c120
SW
214out:
215 if (last)
2817b000 216 dput(last);
2817b000
SW
217 return err;
218}
219
220/*
221 * make note of the last dentry we read, so we can
222 * continue at the same lexicographical point,
223 * regardless of what dir changes take place on the
224 * server.
225 */
226static int note_last_dentry(struct ceph_file_info *fi, const char *name,
227 int len)
228{
229 kfree(fi->last_name);
230 fi->last_name = kmalloc(len+1, GFP_NOFS);
231 if (!fi->last_name)
232 return -ENOMEM;
233 memcpy(fi->last_name, name, len);
234 fi->last_name[len] = 0;
235 dout("note_last_dentry '%s'\n", fi->last_name);
236 return 0;
237}
238
239static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
240{
241 struct ceph_file_info *fi = filp->private_data;
242 struct inode *inode = filp->f_dentry->d_inode;
243 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2
YS
244 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
245 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
246 unsigned frag = fpos_frag(filp->f_pos);
247 int off = fpos_off(filp->f_pos);
248 int err;
249 u32 ftype;
250 struct ceph_mds_reply_info_parsed *rinfo;
3d14c5d2
YS
251 const int max_entries = fsc->mount_options->max_readdir;
252 const int max_bytes = fsc->mount_options->max_readdir_bytes;
2817b000
SW
253
254 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
9cfa1098 255 if (fi->flags & CEPH_F_ATEND)
2817b000
SW
256 return 0;
257
258 /* always start with . and .. */
259 if (filp->f_pos == 0) {
260 /* note dir version at start of readdir so we can tell
261 * if any dentries get dropped */
262 fi->dir_release_count = ci->i_release_count;
263
264 dout("readdir off 0 -> '.'\n");
265 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
ad1fee96
YS
266 ceph_translate_ino(inode->i_sb, inode->i_ino),
267 inode->i_mode >> 12) < 0)
2817b000
SW
268 return 0;
269 filp->f_pos = 1;
270 off = 1;
271 }
272 if (filp->f_pos == 1) {
b85fd6bd 273 ino_t ino = parent_ino(filp->f_dentry);
2817b000
SW
274 dout("readdir off 1 -> '..'\n");
275 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
ad1fee96 276 ceph_translate_ino(inode->i_sb, ino),
2817b000
SW
277 inode->i_mode >> 12) < 0)
278 return 0;
279 filp->f_pos = 2;
280 off = 2;
281 }
282
283 /* can we use the dcache? */
284 spin_lock(&inode->i_lock);
285 if ((filp->f_pos == 2 || fi->dentry) &&
3d14c5d2 286 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
a0dff78d 287 ceph_snap(inode) != CEPH_SNAPDIR &&
2817b000
SW
288 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
289 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
efa4c120 290 spin_unlock(&inode->i_lock);
2817b000 291 err = __dcache_readdir(filp, dirent, filldir);
efa4c120 292 if (err != -EAGAIN)
2817b000 293 return err;
efa4c120
SW
294 } else {
295 spin_unlock(&inode->i_lock);
2817b000 296 }
2817b000
SW
297 if (fi->dentry) {
298 err = note_last_dentry(fi, fi->dentry->d_name.name,
299 fi->dentry->d_name.len);
300 if (err)
301 return err;
302 dput(fi->dentry);
303 fi->dentry = NULL;
304 }
305
306 /* proceed with a normal readdir */
307
308more:
309 /* do we have the correct frag content buffered? */
310 if (fi->frag != frag || fi->last_readdir == NULL) {
311 struct ceph_mds_request *req;
312 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
313 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
314
315 /* discard old result, if any */
393f6620 316 if (fi->last_readdir) {
2817b000 317 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
318 fi->last_readdir = NULL;
319 }
2817b000
SW
320
321 /* requery frag tree, as the frag topology may have changed */
322 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
323
324 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
325 ceph_vinop(inode), frag, fi->last_name);
326 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
327 if (IS_ERR(req))
328 return PTR_ERR(req);
70b666c3
SW
329 req->r_inode = inode;
330 ihold(inode);
2817b000
SW
331 req->r_dentry = dget(filp->f_dentry);
332 /* hints to request -> mds selection code */
333 req->r_direct_mode = USE_AUTH_MDS;
334 req->r_direct_hash = ceph_frag_value(frag);
335 req->r_direct_is_hash = true;
336 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
337 req->r_readdir_offset = fi->next_offset;
338 req->r_args.readdir.frag = cpu_to_le32(frag);
339 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
23804d91 340 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
e1e4dd0c 341 req->r_num_caps = max_entries + 1;
2817b000
SW
342 err = ceph_mdsc_do_request(mdsc, NULL, req);
343 if (err < 0) {
344 ceph_mdsc_put_request(req);
345 return err;
346 }
347 dout("readdir got and parsed readdir result=%d"
348 " on frag %x, end=%d, complete=%d\n", err, frag,
349 (int)req->r_reply_info.dir_end,
350 (int)req->r_reply_info.dir_complete);
351
352 if (!req->r_did_prepopulate) {
353 dout("readdir !did_prepopulate");
354 fi->dir_release_count--; /* preclude I_COMPLETE */
355 }
356
357 /* note next offset and last dentry name */
358 fi->offset = fi->next_offset;
359 fi->last_readdir = req;
360
361 if (req->r_reply_info.dir_end) {
362 kfree(fi->last_name);
363 fi->last_name = NULL;
7b88dadc
SW
364 if (ceph_frag_is_rightmost(frag))
365 fi->next_offset = 2;
366 else
367 fi->next_offset = 0;
2817b000
SW
368 } else {
369 rinfo = &req->r_reply_info;
370 err = note_last_dentry(fi,
371 rinfo->dir_dname[rinfo->dir_nr-1],
372 rinfo->dir_dname_len[rinfo->dir_nr-1]);
373 if (err)
374 return err;
375 fi->next_offset += rinfo->dir_nr;
376 }
377 }
378
379 rinfo = &fi->last_readdir->r_reply_info;
380 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
381 rinfo->dir_nr, off, fi->offset);
da39822c 382 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
2817b000
SW
383 u64 pos = ceph_make_fpos(frag, off);
384 struct ceph_mds_reply_inode *in =
385 rinfo->dir_in[off - fi->offset].in;
3105c19c
SW
386 struct ceph_vino vino;
387 ino_t ino;
388
2817b000
SW
389 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
390 off, off - fi->offset, rinfo->dir_nr, pos,
391 rinfo->dir_dname_len[off - fi->offset],
392 rinfo->dir_dname[off - fi->offset], in);
393 BUG_ON(!in);
394 ftype = le32_to_cpu(in->mode) >> 12;
3105c19c
SW
395 vino.ino = le64_to_cpu(in->ino);
396 vino.snap = le64_to_cpu(in->snapid);
397 ino = ceph_vino_to_ino(vino);
2817b000
SW
398 if (filldir(dirent,
399 rinfo->dir_dname[off - fi->offset],
400 rinfo->dir_dname_len[off - fi->offset],
ad1fee96
YS
401 pos,
402 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
2817b000
SW
403 dout("filldir stopping us...\n");
404 return 0;
405 }
406 off++;
407 filp->f_pos = pos + 1;
408 }
409
410 if (fi->last_name) {
411 ceph_mdsc_put_request(fi->last_readdir);
412 fi->last_readdir = NULL;
413 goto more;
414 }
415
416 /* more frags? */
417 if (!ceph_frag_is_rightmost(frag)) {
418 frag = ceph_frag_next(frag);
419 off = 0;
420 filp->f_pos = ceph_make_fpos(frag, off);
421 dout("readdir next frag is %x\n", frag);
422 goto more;
423 }
9cfa1098 424 fi->flags |= CEPH_F_ATEND;
2817b000
SW
425
426 /*
427 * if dir_release_count still matches the dir, no dentries
428 * were released during the whole readdir, and we should have
429 * the complete dir contents in our cache.
430 */
431 spin_lock(&inode->i_lock);
432 if (ci->i_release_count == fi->dir_release_count) {
433 dout(" marking %p complete\n", inode);
b545cc15 434 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
2817b000
SW
435 ci->i_max_offset = filp->f_pos;
436 }
437 spin_unlock(&inode->i_lock);
438
439 dout("readdir %p filp %p done.\n", inode, filp);
440 return 0;
441}
442
443static void reset_readdir(struct ceph_file_info *fi)
444{
445 if (fi->last_readdir) {
446 ceph_mdsc_put_request(fi->last_readdir);
447 fi->last_readdir = NULL;
448 }
449 kfree(fi->last_name);
a1629c3b 450 fi->last_name = NULL;
2817b000
SW
451 fi->next_offset = 2; /* compensate for . and .. */
452 if (fi->dentry) {
453 dput(fi->dentry);
454 fi->dentry = NULL;
455 }
9cfa1098 456 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
457}
458
459static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
460{
461 struct ceph_file_info *fi = file->private_data;
462 struct inode *inode = file->f_mapping->host;
463 loff_t old_offset = offset;
464 loff_t retval;
465
466 mutex_lock(&inode->i_mutex);
06222e49 467 retval = -EINVAL;
2817b000
SW
468 switch (origin) {
469 case SEEK_END:
470 offset += inode->i_size + 2; /* FIXME */
471 break;
472 case SEEK_CUR:
473 offset += file->f_pos;
06222e49
JB
474 case SEEK_SET:
475 break;
476 default:
477 goto out;
2817b000 478 }
06222e49 479
2817b000
SW
480 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
481 if (offset != file->f_pos) {
482 file->f_pos = offset;
483 file->f_version = 0;
9cfa1098 484 fi->flags &= ~CEPH_F_ATEND;
2817b000
SW
485 }
486 retval = offset;
487
488 /*
489 * discard buffered readdir content on seekdir(0), or
490 * seek to new frag, or seek prior to current chunk.
491 */
492 if (offset == 0 ||
493 fpos_frag(offset) != fpos_frag(old_offset) ||
494 fpos_off(offset) < fi->offset) {
495 dout("dir_llseek dropping %p content\n", file);
496 reset_readdir(fi);
497 }
498
499 /* bump dir_release_count if we did a forward seek */
500 if (offset > old_offset)
501 fi->dir_release_count--;
502 }
06222e49 503out:
2817b000
SW
504 mutex_unlock(&inode->i_mutex);
505 return retval;
506}
507
508/*
468640e3 509 * Handle lookups for the hidden .snap directory.
2817b000 510 */
468640e3
SW
511int ceph_handle_snapdir(struct ceph_mds_request *req,
512 struct dentry *dentry, int err)
2817b000 513{
3d14c5d2 514 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
d79698da 515 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
2817b000
SW
516
517 /* .snap dir? */
518 if (err == -ENOENT &&
455cec0a 519 ceph_snap(parent) == CEPH_NOSNAP &&
6b805185 520 strcmp(dentry->d_name.name,
3d14c5d2 521 fsc->mount_options->snapdir_name) == 0) {
2817b000
SW
522 struct inode *inode = ceph_get_snapdir(parent);
523 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
524 dentry, dentry->d_name.len, dentry->d_name.name, inode);
9358c6d4 525 BUG_ON(!d_unhashed(dentry));
2817b000
SW
526 d_add(dentry, inode);
527 err = 0;
528 }
468640e3
SW
529 return err;
530}
2817b000 531
468640e3
SW
532/*
533 * Figure out final result of a lookup/open request.
534 *
535 * Mainly, make sure we return the final req->r_dentry (if it already
536 * existed) in place of the original VFS-provided dentry when they
537 * differ.
538 *
539 * Gracefully handle the case where the MDS replies with -ENOENT and
540 * no trace (which it may do, at its discretion, e.g., if it doesn't
541 * care to issue a lease on the negative dentry).
542 */
543struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
544 struct dentry *dentry, int err)
545{
2817b000
SW
546 if (err == -ENOENT) {
547 /* no trace? */
548 err = 0;
549 if (!req->r_reply_info.head->is_dentry) {
550 dout("ENOENT and no trace, dentry %p inode %p\n",
551 dentry, dentry->d_inode);
552 if (dentry->d_inode) {
553 d_drop(dentry);
554 err = -ENOENT;
555 } else {
556 d_add(dentry, NULL);
557 }
558 }
559 }
560 if (err)
561 dentry = ERR_PTR(err);
562 else if (dentry != req->r_dentry)
563 dentry = dget(req->r_dentry); /* we got spliced */
564 else
565 dentry = NULL;
566 return dentry;
567}
568
1d1de916
SW
569static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
570{
571 return ceph_ino(inode) == CEPH_INO_ROOT &&
572 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
573}
574
2817b000
SW
575/*
576 * Look up a single dir entry. If there is a lookup intent, inform
577 * the MDS so that it gets our 'caps wanted' value in a single op.
578 */
579static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
580 struct nameidata *nd)
581{
3d14c5d2
YS
582 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
583 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
584 struct ceph_mds_request *req;
585 int op;
586 int err;
587
588 dout("lookup %p dentry %p '%.*s'\n",
589 dir, dentry, dentry->d_name.len, dentry->d_name.name);
590
591 if (dentry->d_name.len > NAME_MAX)
592 return ERR_PTR(-ENAMETOOLONG);
593
594 err = ceph_init_dentry(dentry);
595 if (err < 0)
596 return ERR_PTR(err);
597
598 /* open (but not create!) intent? */
599 if (nd &&
600 (nd->flags & LOOKUP_OPEN) &&
2817b000
SW
601 !(nd->intent.open.flags & O_CREAT)) {
602 int mode = nd->intent.open.create_mode & ~current->fs->umask;
603 return ceph_lookup_open(dir, dentry, nd, mode, 1);
604 }
605
606 /* can we conclude ENOENT locally? */
607 if (dentry->d_inode == NULL) {
608 struct ceph_inode_info *ci = ceph_inode(dir);
609 struct ceph_dentry_info *di = ceph_dentry(dentry);
610
611 spin_lock(&dir->i_lock);
612 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
613 if (strncmp(dentry->d_name.name,
3d14c5d2 614 fsc->mount_options->snapdir_name,
2817b000 615 dentry->d_name.len) &&
1d1de916 616 !is_root_ceph_dentry(dir, dentry) &&
2817b000
SW
617 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
618 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
2817b000
SW
619 spin_unlock(&dir->i_lock);
620 dout(" dir %p complete, -ENOENT\n", dir);
621 d_add(dentry, NULL);
622 di->lease_shared_gen = ci->i_shared_gen;
623 return NULL;
624 }
625 spin_unlock(&dir->i_lock);
626 }
627
628 op = ceph_snap(dir) == CEPH_SNAPDIR ?
629 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
630 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
631 if (IS_ERR(req))
7e34bc52 632 return ERR_CAST(req);
2817b000
SW
633 req->r_dentry = dget(dentry);
634 req->r_num_caps = 2;
635 /* we only need inode linkage */
636 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
637 req->r_locked_dir = dir;
638 err = ceph_mdsc_do_request(mdsc, NULL, req);
468640e3 639 err = ceph_handle_snapdir(req, dentry, err);
2817b000
SW
640 dentry = ceph_finish_lookup(req, dentry, err);
641 ceph_mdsc_put_request(req); /* will dput(dentry) */
642 dout("lookup result=%p\n", dentry);
643 return dentry;
644}
645
646/*
647 * If we do a create but get no trace back from the MDS, follow up with
648 * a lookup (the VFS expects us to link up the provided dentry).
649 */
650int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
651{
652 struct dentry *result = ceph_lookup(dir, dentry, NULL);
653
654 if (result && !IS_ERR(result)) {
655 /*
656 * We created the item, then did a lookup, and found
657 * it was already linked to another inode we already
658 * had in our cache (and thus got spliced). Link our
659 * dentry to that inode, but don't hash it, just in
660 * case the VFS wants to dereference it.
661 */
662 BUG_ON(!result->d_inode);
663 d_instantiate(dentry, result->d_inode);
664 return 0;
665 }
666 return PTR_ERR(result);
667}
668
669static int ceph_mknod(struct inode *dir, struct dentry *dentry,
670 int mode, dev_t rdev)
671{
3d14c5d2
YS
672 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
673 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
674 struct ceph_mds_request *req;
675 int err;
676
677 if (ceph_snap(dir) != CEPH_NOSNAP)
678 return -EROFS;
679
680 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
681 dir, dentry, mode, rdev);
682 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
683 if (IS_ERR(req)) {
684 d_drop(dentry);
685 return PTR_ERR(req);
686 }
687 req->r_dentry = dget(dentry);
688 req->r_num_caps = 2;
689 req->r_locked_dir = dir;
690 req->r_args.mknod.mode = cpu_to_le32(mode);
691 req->r_args.mknod.rdev = cpu_to_le32(rdev);
692 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
693 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
694 err = ceph_mdsc_do_request(mdsc, dir, req);
695 if (!err && !req->r_reply_info.head->is_dentry)
696 err = ceph_handle_notrace_create(dir, dentry);
697 ceph_mdsc_put_request(req);
698 if (err)
699 d_drop(dentry);
700 return err;
701}
702
703static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
704 struct nameidata *nd)
705{
706 dout("create in dir %p dentry %p name '%.*s'\n",
707 dir, dentry, dentry->d_name.len, dentry->d_name.name);
708
709 if (ceph_snap(dir) != CEPH_NOSNAP)
710 return -EROFS;
711
712 if (nd) {
713 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
714 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
715 /* hrm, what should i do here if we get aliased? */
716 if (IS_ERR(dentry))
717 return PTR_ERR(dentry);
718 return 0;
719 }
720
721 /* fall back to mknod */
722 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
723}
724
725static int ceph_symlink(struct inode *dir, struct dentry *dentry,
726 const char *dest)
727{
3d14c5d2
YS
728 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
729 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
730 struct ceph_mds_request *req;
731 int err;
732
733 if (ceph_snap(dir) != CEPH_NOSNAP)
734 return -EROFS;
735
736 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
737 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
738 if (IS_ERR(req)) {
739 d_drop(dentry);
740 return PTR_ERR(req);
741 }
742 req->r_dentry = dget(dentry);
743 req->r_num_caps = 2;
744 req->r_path2 = kstrdup(dest, GFP_NOFS);
745 req->r_locked_dir = dir;
746 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
747 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
748 err = ceph_mdsc_do_request(mdsc, dir, req);
749 if (!err && !req->r_reply_info.head->is_dentry)
750 err = ceph_handle_notrace_create(dir, dentry);
751 ceph_mdsc_put_request(req);
752 if (err)
753 d_drop(dentry);
754 return err;
755}
756
757static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
758{
3d14c5d2
YS
759 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
760 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
761 struct ceph_mds_request *req;
762 int err = -EROFS;
763 int op;
764
765 if (ceph_snap(dir) == CEPH_SNAPDIR) {
766 /* mkdir .snap/foo is a MKSNAP */
767 op = CEPH_MDS_OP_MKSNAP;
768 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
769 dentry->d_name.len, dentry->d_name.name, dentry);
770 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
771 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
772 op = CEPH_MDS_OP_MKDIR;
773 } else {
774 goto out;
775 }
776 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
777 if (IS_ERR(req)) {
778 err = PTR_ERR(req);
779 goto out;
780 }
781
782 req->r_dentry = dget(dentry);
783 req->r_num_caps = 2;
784 req->r_locked_dir = dir;
785 req->r_args.mkdir.mode = cpu_to_le32(mode);
786 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
787 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
788 err = ceph_mdsc_do_request(mdsc, dir, req);
789 if (!err && !req->r_reply_info.head->is_dentry)
790 err = ceph_handle_notrace_create(dir, dentry);
791 ceph_mdsc_put_request(req);
792out:
793 if (err < 0)
794 d_drop(dentry);
795 return err;
796}
797
798static int ceph_link(struct dentry *old_dentry, struct inode *dir,
799 struct dentry *dentry)
800{
3d14c5d2
YS
801 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
802 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
803 struct ceph_mds_request *req;
804 int err;
805
806 if (ceph_snap(dir) != CEPH_NOSNAP)
807 return -EROFS;
808
809 dout("link in dir %p old_dentry %p dentry %p\n", dir,
810 old_dentry, dentry);
811 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
812 if (IS_ERR(req)) {
813 d_drop(dentry);
814 return PTR_ERR(req);
815 }
816 req->r_dentry = dget(dentry);
817 req->r_num_caps = 2;
818 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
41b02e1f 819 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
2817b000
SW
820 req->r_locked_dir = dir;
821 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
822 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
823 err = ceph_mdsc_do_request(mdsc, dir, req);
70b666c3 824 if (err) {
2817b000 825 d_drop(dentry);
70b666c3
SW
826 } else if (!req->r_reply_info.head->is_dentry) {
827 ihold(old_dentry->d_inode);
828 d_instantiate(dentry, old_dentry->d_inode);
829 }
2817b000
SW
830 ceph_mdsc_put_request(req);
831 return err;
832}
833
834/*
835 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
836 * looks like the link count will hit 0, drop any other caps (other
837 * than PIN) we don't specifically want (due to the file still being
838 * open).
839 */
840static int drop_caps_for_unlink(struct inode *inode)
841{
842 struct ceph_inode_info *ci = ceph_inode(inode);
843 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
844
845 spin_lock(&inode->i_lock);
846 if (inode->i_nlink == 1) {
847 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
848 ci->i_ceph_flags |= CEPH_I_NODELAY;
849 }
850 spin_unlock(&inode->i_lock);
851 return drop;
852}
853
854/*
855 * rmdir and unlink are differ only by the metadata op code
856 */
857static int ceph_unlink(struct inode *dir, struct dentry *dentry)
858{
3d14c5d2
YS
859 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
860 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
861 struct inode *inode = dentry->d_inode;
862 struct ceph_mds_request *req;
863 int err = -EROFS;
864 int op;
865
866 if (ceph_snap(dir) == CEPH_SNAPDIR) {
867 /* rmdir .snap/foo is RMSNAP */
868 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
869 dentry->d_name.name, dentry);
870 op = CEPH_MDS_OP_RMSNAP;
871 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
872 dout("unlink/rmdir dir %p dn %p inode %p\n",
873 dir, dentry, inode);
874 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
875 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
876 } else
877 goto out;
878 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
879 if (IS_ERR(req)) {
880 err = PTR_ERR(req);
881 goto out;
882 }
883 req->r_dentry = dget(dentry);
884 req->r_num_caps = 2;
885 req->r_locked_dir = dir;
886 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
887 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
888 req->r_inode_drop = drop_caps_for_unlink(inode);
889 err = ceph_mdsc_do_request(mdsc, dir, req);
890 if (!err && !req->r_reply_info.head->is_dentry)
891 d_delete(dentry);
892 ceph_mdsc_put_request(req);
893out:
894 return err;
895}
896
897static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
898 struct inode *new_dir, struct dentry *new_dentry)
899{
3d14c5d2
YS
900 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
901 struct ceph_mds_client *mdsc = fsc->mdsc;
2817b000
SW
902 struct ceph_mds_request *req;
903 int err;
904
905 if (ceph_snap(old_dir) != ceph_snap(new_dir))
906 return -EXDEV;
907 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
908 ceph_snap(new_dir) != CEPH_NOSNAP)
909 return -EROFS;
910 dout("rename dir %p dentry %p to dir %p dentry %p\n",
911 old_dir, old_dentry, new_dir, new_dentry);
912 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
913 if (IS_ERR(req))
914 return PTR_ERR(req);
915 req->r_dentry = dget(new_dentry);
916 req->r_num_caps = 2;
917 req->r_old_dentry = dget(old_dentry);
41b02e1f 918 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
2817b000
SW
919 req->r_locked_dir = new_dir;
920 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
921 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
922 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
923 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
924 /* release LINK_RDCACHE on source inode (mds will lock it) */
925 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
926 if (new_dentry->d_inode)
927 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
928 err = ceph_mdsc_do_request(mdsc, old_dir, req);
929 if (!err && !req->r_reply_info.head->is_dentry) {
930 /*
931 * Normally d_move() is done by fill_trace (called by
932 * do_request, above). If there is no trace, we need
933 * to do it here.
934 */
ea1409f9
SW
935
936 /* d_move screws up d_subdirs order */
937 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
938
2817b000 939 d_move(old_dentry, new_dentry);
ea1409f9
SW
940
941 /* ensure target dentry is invalidated, despite
942 rehashing bug in vfs_rename_dir */
81a6cf2d 943 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
944 }
945 ceph_mdsc_put_request(req);
946 return err;
947}
948
81a6cf2d
SW
949/*
950 * Ensure a dentry lease will no longer revalidate.
951 */
952void ceph_invalidate_dentry_lease(struct dentry *dentry)
953{
954 spin_lock(&dentry->d_lock);
955 dentry->d_time = jiffies;
956 ceph_dentry(dentry)->lease_shared_gen = 0;
957 spin_unlock(&dentry->d_lock);
958}
2817b000
SW
959
960/*
961 * Check if dentry lease is valid. If not, delete the lease. Try to
962 * renew if the least is more than half up.
963 */
964static int dentry_lease_is_valid(struct dentry *dentry)
965{
966 struct ceph_dentry_info *di;
967 struct ceph_mds_session *s;
968 int valid = 0;
969 u32 gen;
970 unsigned long ttl;
971 struct ceph_mds_session *session = NULL;
972 struct inode *dir = NULL;
973 u32 seq = 0;
974
975 spin_lock(&dentry->d_lock);
976 di = ceph_dentry(dentry);
977 if (di && di->lease_session) {
978 s = di->lease_session;
979 spin_lock(&s->s_cap_lock);
980 gen = s->s_cap_gen;
981 ttl = s->s_cap_ttl;
982 spin_unlock(&s->s_cap_lock);
983
984 if (di->lease_gen == gen &&
985 time_before(jiffies, dentry->d_time) &&
986 time_before(jiffies, ttl)) {
987 valid = 1;
988 if (di->lease_renew_after &&
989 time_after(jiffies, di->lease_renew_after)) {
990 /* we should renew */
991 dir = dentry->d_parent->d_inode;
992 session = ceph_get_mds_session(s);
993 seq = di->lease_seq;
994 di->lease_renew_after = 0;
995 di->lease_renew_from = jiffies;
996 }
2817b000
SW
997 }
998 }
999 spin_unlock(&dentry->d_lock);
1000
1001 if (session) {
1002 ceph_mdsc_lease_send_msg(session, dir, dentry,
1003 CEPH_MDS_LEASE_RENEW, seq);
1004 ceph_put_mds_session(session);
1005 }
1006 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1007 return valid;
1008}
1009
1010/*
1011 * Check if directory-wide content lease/cap is valid.
1012 */
1013static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1014{
1015 struct ceph_inode_info *ci = ceph_inode(dir);
1016 struct ceph_dentry_info *di = ceph_dentry(dentry);
1017 int valid = 0;
1018
1019 spin_lock(&dir->i_lock);
1020 if (ci->i_shared_gen == di->lease_shared_gen)
1021 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1022 spin_unlock(&dir->i_lock);
1023 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1024 dir, (unsigned)ci->i_shared_gen, dentry,
1025 (unsigned)di->lease_shared_gen, valid);
1026 return valid;
1027}
1028
1029/*
1030 * Check if cached dentry can be trusted.
1031 */
1032static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
1033{
bf1c6aca 1034 int valid = 0;
34286d66
NP
1035 struct inode *dir;
1036
0eb980e3 1037 if (nd && nd->flags & LOOKUP_RCU)
34286d66
NP
1038 return -ECHILD;
1039
1cd3935b
SW
1040 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1041 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1042 ceph_dentry(dentry)->offset);
2817b000 1043
bf1c6aca
SW
1044 dir = ceph_get_dentry_parent_inode(dentry);
1045
2817b000
SW
1046 /* always trust cached snapped dentries, snapdir dentry */
1047 if (ceph_snap(dir) != CEPH_NOSNAP) {
1048 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1049 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
bf1c6aca
SW
1050 valid = 1;
1051 } else if (dentry->d_inode &&
1052 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1053 valid = 1;
1054 } else if (dentry_lease_is_valid(dentry) ||
1055 dir_lease_is_valid(dir, dentry)) {
1056 valid = 1;
2817b000 1057 }
2817b000 1058
bf1c6aca
SW
1059 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1060 if (valid)
1061 ceph_dentry_lru_touch(dentry);
1062 else
1063 d_drop(dentry);
1064 iput(dir);
1065 return valid;
2817b000
SW
1066}
1067
1068/*
147851d2 1069 * Release our ceph_dentry_info.
2817b000 1070 */
147851d2 1071static void ceph_d_release(struct dentry *dentry)
2817b000
SW
1072{
1073 struct ceph_dentry_info *di = ceph_dentry(dentry);
2817b000 1074
147851d2 1075 dout("d_release %p\n", dentry);
2817b000
SW
1076 if (di) {
1077 ceph_dentry_lru_del(dentry);
1078 if (di->lease_session)
1079 ceph_put_mds_session(di->lease_session);
1080 kmem_cache_free(ceph_dentry_cachep, di);
1081 dentry->d_fsdata = NULL;
1082 }
1083}
1084
1085static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1086 struct nameidata *nd)
1087{
1088 /*
1089 * Eventually, we'll want to revalidate snapped metadata
1090 * too... probably...
1091 */
1092 return 1;
1093}
1094
1095
1096
1097/*
1098 * read() on a dir. This weird interface hack only works if mounted
1099 * with '-o dirstat'.
1100 */
1101static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1102 loff_t *ppos)
1103{
1104 struct ceph_file_info *cf = file->private_data;
1105 struct inode *inode = file->f_dentry->d_inode;
1106 struct ceph_inode_info *ci = ceph_inode(inode);
1107 int left;
ae598083 1108 const int bufsize = 1024;
2817b000 1109
3d14c5d2 1110 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1111 return -EISDIR;
1112
1113 if (!cf->dir_info) {
ae598083 1114 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
2817b000
SW
1115 if (!cf->dir_info)
1116 return -ENOMEM;
1117 cf->dir_info_len =
ae598083 1118 snprintf(cf->dir_info, bufsize,
2817b000
SW
1119 "entries: %20lld\n"
1120 " files: %20lld\n"
1121 " subdirs: %20lld\n"
1122 "rentries: %20lld\n"
1123 " rfiles: %20lld\n"
1124 " rsubdirs: %20lld\n"
1125 "rbytes: %20lld\n"
1126 "rctime: %10ld.%09ld\n",
1127 ci->i_files + ci->i_subdirs,
1128 ci->i_files,
1129 ci->i_subdirs,
1130 ci->i_rfiles + ci->i_rsubdirs,
1131 ci->i_rfiles,
1132 ci->i_rsubdirs,
1133 ci->i_rbytes,
1134 (long)ci->i_rctime.tv_sec,
1135 (long)ci->i_rctime.tv_nsec);
1136 }
1137
1138 if (*ppos >= cf->dir_info_len)
1139 return 0;
1140 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1141 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1142 if (left == size)
1143 return -EFAULT;
1144 *ppos += (size - left);
1145 return size - left;
1146}
1147
1148/*
1149 * an fsync() on a dir will wait for any uncommitted directory
1150 * operations to commit.
1151 */
02c24a82
JB
1152static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1153 int datasync)
2817b000 1154{
7ea80859 1155 struct inode *inode = file->f_path.dentry->d_inode;
2817b000
SW
1156 struct ceph_inode_info *ci = ceph_inode(inode);
1157 struct list_head *head = &ci->i_unsafe_dirops;
1158 struct ceph_mds_request *req;
1159 u64 last_tid;
1160 int ret = 0;
1161
1162 dout("dir_fsync %p\n", inode);
02c24a82
JB
1163 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1164 if (ret)
1165 return ret;
1166 mutex_lock(&inode->i_mutex);
1167
2817b000
SW
1168 spin_lock(&ci->i_unsafe_lock);
1169 if (list_empty(head))
1170 goto out;
1171
1172 req = list_entry(head->prev,
1173 struct ceph_mds_request, r_unsafe_dir_item);
1174 last_tid = req->r_tid;
1175
1176 do {
1177 ceph_mdsc_get_request(req);
1178 spin_unlock(&ci->i_unsafe_lock);
1179 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1180 inode, req->r_tid, last_tid);
1181 if (req->r_timeout) {
1182 ret = wait_for_completion_timeout(
1183 &req->r_safe_completion, req->r_timeout);
1184 if (ret > 0)
1185 ret = 0;
1186 else if (ret == 0)
1187 ret = -EIO; /* timed out */
1188 } else {
1189 wait_for_completion(&req->r_safe_completion);
1190 }
1191 spin_lock(&ci->i_unsafe_lock);
1192 ceph_mdsc_put_request(req);
1193
1194 if (ret || list_empty(head))
1195 break;
1196 req = list_entry(head->next,
1197 struct ceph_mds_request, r_unsafe_dir_item);
1198 } while (req->r_tid < last_tid);
1199out:
1200 spin_unlock(&ci->i_unsafe_lock);
02c24a82
JB
1201 mutex_unlock(&inode->i_mutex);
1202
2817b000
SW
1203 return ret;
1204}
1205
1206/*
1207 * We maintain a private dentry LRU.
1208 *
1209 * FIXME: this needs to be changed to a per-mds lru to be useful.
1210 */
1211void ceph_dentry_lru_add(struct dentry *dn)
1212{
1213 struct ceph_dentry_info *di = ceph_dentry(dn);
1214 struct ceph_mds_client *mdsc;
2817b000 1215
04a419f9
SW
1216 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1217 dn->d_name.len, dn->d_name.name);
2817b000 1218 if (di) {
3d14c5d2 1219 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1220 spin_lock(&mdsc->dentry_lru_lock);
1221 list_add_tail(&di->lru, &mdsc->dentry_lru);
1222 mdsc->num_dentry++;
1223 spin_unlock(&mdsc->dentry_lru_lock);
1224 }
1225}
1226
1227void ceph_dentry_lru_touch(struct dentry *dn)
1228{
1229 struct ceph_dentry_info *di = ceph_dentry(dn);
1230 struct ceph_mds_client *mdsc;
2817b000 1231
1cd3935b
SW
1232 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1233 dn->d_name.len, dn->d_name.name, di->offset);
2817b000 1234 if (di) {
3d14c5d2 1235 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1236 spin_lock(&mdsc->dentry_lru_lock);
1237 list_move_tail(&di->lru, &mdsc->dentry_lru);
1238 spin_unlock(&mdsc->dentry_lru_lock);
1239 }
1240}
1241
1242void ceph_dentry_lru_del(struct dentry *dn)
1243{
1244 struct ceph_dentry_info *di = ceph_dentry(dn);
1245 struct ceph_mds_client *mdsc;
1246
04a419f9
SW
1247 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1248 dn->d_name.len, dn->d_name.name);
2817b000 1249 if (di) {
3d14c5d2 1250 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1251 spin_lock(&mdsc->dentry_lru_lock);
1252 list_del_init(&di->lru);
1253 mdsc->num_dentry--;
1254 spin_unlock(&mdsc->dentry_lru_lock);
1255 }
1256}
1257
6c0f3af7
SW
1258/*
1259 * Return name hash for a given dentry. This is dependent on
1260 * the parent directory's hash function.
1261 */
e5f86dc3 1262unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
6c0f3af7 1263{
6c0f3af7
SW
1264 struct ceph_inode_info *dci = ceph_inode(dir);
1265
1266 switch (dci->i_dir_layout.dl_dir_hash) {
1267 case 0: /* for backward compat */
1268 case CEPH_STR_HASH_LINUX:
1269 return dn->d_name.hash;
1270
1271 default:
1272 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1273 dn->d_name.name, dn->d_name.len);
1274 }
1275}
1276
2817b000
SW
1277const struct file_operations ceph_dir_fops = {
1278 .read = ceph_read_dir,
1279 .readdir = ceph_readdir,
1280 .llseek = ceph_dir_llseek,
1281 .open = ceph_open,
1282 .release = ceph_release,
1283 .unlocked_ioctl = ceph_ioctl,
1284 .fsync = ceph_dir_fsync,
1285};
1286
1287const struct inode_operations ceph_dir_iops = {
1288 .lookup = ceph_lookup,
1289 .permission = ceph_permission,
1290 .getattr = ceph_getattr,
1291 .setattr = ceph_setattr,
1292 .setxattr = ceph_setxattr,
1293 .getxattr = ceph_getxattr,
1294 .listxattr = ceph_listxattr,
1295 .removexattr = ceph_removexattr,
1296 .mknod = ceph_mknod,
1297 .symlink = ceph_symlink,
1298 .mkdir = ceph_mkdir,
1299 .link = ceph_link,
1300 .unlink = ceph_unlink,
1301 .rmdir = ceph_unlink,
1302 .rename = ceph_rename,
1303 .create = ceph_create,
1304};
1305
52dfb8ac 1306const struct dentry_operations ceph_dentry_ops = {
2817b000 1307 .d_revalidate = ceph_d_revalidate,
147851d2 1308 .d_release = ceph_d_release,
2817b000
SW
1309};
1310
52dfb8ac 1311const struct dentry_operations ceph_snapdir_dentry_ops = {
2817b000 1312 .d_revalidate = ceph_snapdir_d_revalidate,
147851d2 1313 .d_release = ceph_d_release,
2817b000
SW
1314};
1315
52dfb8ac 1316const struct dentry_operations ceph_snap_dentry_ops = {
147851d2 1317 .d_release = ceph_d_release,
2817b000 1318};