]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
ceph: don't clobber i_max_offset on already complete dir
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
2817b000
SW
1#include "ceph_debug.h"
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
10
11/*
12 * Directory operations: readdir, lookup, create, link, unlink,
13 * rename, etc.
14 */
15
16/*
17 * Ceph MDS operations are specified in terms of a base ino and
18 * relative path. Thus, the client can specify an operation on a
19 * specific inode (e.g., a getattr due to fstat(2)), or as a path
20 * relative to, say, the root directory.
21 *
22 * Normally, we limit ourselves to strict inode ops (no path component)
23 * or dentry operations (a single path component relative to an ino). The
24 * exception to this is open_root_dentry(), which will open the mount
25 * point by name.
26 */
27
28const struct inode_operations ceph_dir_iops;
29const struct file_operations ceph_dir_fops;
30struct dentry_operations ceph_dentry_ops;
31
32/*
33 * Initialize ceph dentry state.
34 */
35int ceph_init_dentry(struct dentry *dentry)
36{
37 struct ceph_dentry_info *di;
38
39 if (dentry->d_fsdata)
40 return 0;
41
42 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
43 dentry->d_op = &ceph_dentry_ops;
44 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
45 dentry->d_op = &ceph_snapdir_dentry_ops;
46 else
47 dentry->d_op = &ceph_snap_dentry_ops;
48
49 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
50 if (!di)
51 return -ENOMEM; /* oh well */
52
53 spin_lock(&dentry->d_lock);
8c6efb58
SW
54 if (dentry->d_fsdata) {
55 /* lost a race */
56 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 57 goto out_unlock;
8c6efb58 58 }
2817b000
SW
59 di->dentry = dentry;
60 di->lease_session = NULL;
61 dentry->d_fsdata = di;
62 dentry->d_time = jiffies;
63 ceph_dentry_lru_add(dentry);
64out_unlock:
65 spin_unlock(&dentry->d_lock);
66 return 0;
67}
68
69
70
71/*
72 * for readdir, we encode the directory frag and offset within that
73 * frag into f_pos.
74 */
75static unsigned fpos_frag(loff_t p)
76{
77 return p >> 32;
78}
79static unsigned fpos_off(loff_t p)
80{
81 return p & 0xffffffff;
82}
83
84/*
85 * When possible, we try to satisfy a readdir by peeking at the
86 * dcache. We make this work by carefully ordering dentries on
87 * d_u.d_child when we initially get results back from the MDS, and
88 * falling back to a "normal" sync readdir if any dentries in the dir
89 * are dropped.
90 *
91 * I_COMPLETE tells indicates we have all dentries in the dir. It is
92 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
93 * the MDS if/when the directory is modified).
94 */
95static int __dcache_readdir(struct file *filp,
96 void *dirent, filldir_t filldir)
97{
98 struct inode *inode = filp->f_dentry->d_inode;
99 struct ceph_file_info *fi = filp->private_data;
100 struct dentry *parent = filp->f_dentry;
101 struct inode *dir = parent->d_inode;
102 struct list_head *p;
103 struct dentry *dentry, *last;
104 struct ceph_dentry_info *di;
105 int err = 0;
106
107 /* claim ref on last dentry we returned */
108 last = fi->dentry;
109 fi->dentry = NULL;
110
111 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
112 last);
113
114 spin_lock(&dcache_lock);
115
116 /* start at beginning? */
117 if (filp->f_pos == 2 || (last &&
118 filp->f_pos < ceph_dentry(last)->offset)) {
119 if (list_empty(&parent->d_subdirs))
120 goto out_unlock;
121 p = parent->d_subdirs.prev;
122 dout(" initial p %p/%p\n", p->prev, p->next);
123 } else {
124 p = last->d_u.d_child.prev;
125 }
126
127more:
128 dentry = list_entry(p, struct dentry, d_u.d_child);
129 di = ceph_dentry(dentry);
130 while (1) {
131 dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next,
132 parent->d_subdirs.prev, parent->d_subdirs.next);
133 if (p == &parent->d_subdirs) {
134 fi->at_end = 1;
135 goto out_unlock;
136 }
137 if (!d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 138 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 139 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
2817b000
SW
140 filp->f_pos <= di->offset)
141 break;
142 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
143 dentry->d_name.len, dentry->d_name.name, di->offset,
144 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
145 !dentry->d_inode ? " null" : "");
146 p = p->prev;
147 dentry = list_entry(p, struct dentry, d_u.d_child);
148 di = ceph_dentry(dentry);
149 }
150
151 atomic_inc(&dentry->d_count);
152 spin_unlock(&dcache_lock);
153 spin_unlock(&inode->i_lock);
154
155 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
156 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
157 filp->f_pos = di->offset;
158 err = filldir(dirent, dentry->d_name.name,
159 dentry->d_name.len, di->offset,
160 dentry->d_inode->i_ino,
161 dentry->d_inode->i_mode >> 12);
162
163 if (last) {
164 if (err < 0) {
165 /* remember our position */
166 fi->dentry = last;
167 fi->next_offset = di->offset;
168 } else {
169 dput(last);
170 }
171 last = NULL;
172 }
173
174 spin_lock(&inode->i_lock);
175 spin_lock(&dcache_lock);
176
f5b06628
SW
177 last = dentry;
178
2817b000
SW
179 if (err < 0)
180 goto out_unlock;
181
2817b000
SW
182 p = p->prev;
183 filp->f_pos++;
184
185 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
186 if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
187 goto more;
188 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
189 err = -EAGAIN;
190
191out_unlock:
192 spin_unlock(&dcache_lock);
193
194 if (last) {
195 spin_unlock(&inode->i_lock);
196 dput(last);
197 spin_lock(&inode->i_lock);
198 }
199
200 return err;
201}
202
203/*
204 * make note of the last dentry we read, so we can
205 * continue at the same lexicographical point,
206 * regardless of what dir changes take place on the
207 * server.
208 */
209static int note_last_dentry(struct ceph_file_info *fi, const char *name,
210 int len)
211{
212 kfree(fi->last_name);
213 fi->last_name = kmalloc(len+1, GFP_NOFS);
214 if (!fi->last_name)
215 return -ENOMEM;
216 memcpy(fi->last_name, name, len);
217 fi->last_name[len] = 0;
218 dout("note_last_dentry '%s'\n", fi->last_name);
219 return 0;
220}
221
222static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
223{
224 struct ceph_file_info *fi = filp->private_data;
225 struct inode *inode = filp->f_dentry->d_inode;
226 struct ceph_inode_info *ci = ceph_inode(inode);
227 struct ceph_client *client = ceph_inode_to_client(inode);
228 struct ceph_mds_client *mdsc = &client->mdsc;
229 unsigned frag = fpos_frag(filp->f_pos);
230 int off = fpos_off(filp->f_pos);
231 int err;
232 u32 ftype;
233 struct ceph_mds_reply_info_parsed *rinfo;
6b805185 234 const int max_entries = client->mount_args->max_readdir;
2817b000
SW
235
236 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
237 if (fi->at_end)
238 return 0;
239
240 /* always start with . and .. */
241 if (filp->f_pos == 0) {
242 /* note dir version at start of readdir so we can tell
243 * if any dentries get dropped */
244 fi->dir_release_count = ci->i_release_count;
245
246 dout("readdir off 0 -> '.'\n");
247 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
248 inode->i_ino, inode->i_mode >> 12) < 0)
249 return 0;
250 filp->f_pos = 1;
251 off = 1;
252 }
253 if (filp->f_pos == 1) {
254 dout("readdir off 1 -> '..'\n");
255 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
256 filp->f_dentry->d_parent->d_inode->i_ino,
257 inode->i_mode >> 12) < 0)
258 return 0;
259 filp->f_pos = 2;
260 off = 2;
261 }
262
263 /* can we use the dcache? */
264 spin_lock(&inode->i_lock);
265 if ((filp->f_pos == 2 || fi->dentry) &&
266 !ceph_test_opt(client, NOASYNCREADDIR) &&
267 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
268 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
269 err = __dcache_readdir(filp, dirent, filldir);
270 if (err != -EAGAIN) {
271 spin_unlock(&inode->i_lock);
272 return err;
273 }
274 }
275 spin_unlock(&inode->i_lock);
276 if (fi->dentry) {
277 err = note_last_dentry(fi, fi->dentry->d_name.name,
278 fi->dentry->d_name.len);
279 if (err)
280 return err;
281 dput(fi->dentry);
282 fi->dentry = NULL;
283 }
284
285 /* proceed with a normal readdir */
286
287more:
288 /* do we have the correct frag content buffered? */
289 if (fi->frag != frag || fi->last_readdir == NULL) {
290 struct ceph_mds_request *req;
291 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
292 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
293
294 /* discard old result, if any */
393f6620 295 if (fi->last_readdir) {
2817b000 296 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
297 fi->last_readdir = NULL;
298 }
2817b000
SW
299
300 /* requery frag tree, as the frag topology may have changed */
301 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
302
303 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
304 ceph_vinop(inode), frag, fi->last_name);
305 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
306 if (IS_ERR(req))
307 return PTR_ERR(req);
308 req->r_inode = igrab(inode);
309 req->r_dentry = dget(filp->f_dentry);
310 /* hints to request -> mds selection code */
311 req->r_direct_mode = USE_AUTH_MDS;
312 req->r_direct_hash = ceph_frag_value(frag);
313 req->r_direct_is_hash = true;
314 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
315 req->r_readdir_offset = fi->next_offset;
316 req->r_args.readdir.frag = cpu_to_le32(frag);
317 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
e1e4dd0c 318 req->r_num_caps = max_entries + 1;
2817b000
SW
319 err = ceph_mdsc_do_request(mdsc, NULL, req);
320 if (err < 0) {
321 ceph_mdsc_put_request(req);
322 return err;
323 }
324 dout("readdir got and parsed readdir result=%d"
325 " on frag %x, end=%d, complete=%d\n", err, frag,
326 (int)req->r_reply_info.dir_end,
327 (int)req->r_reply_info.dir_complete);
328
329 if (!req->r_did_prepopulate) {
330 dout("readdir !did_prepopulate");
331 fi->dir_release_count--; /* preclude I_COMPLETE */
332 }
333
334 /* note next offset and last dentry name */
335 fi->offset = fi->next_offset;
336 fi->last_readdir = req;
337
338 if (req->r_reply_info.dir_end) {
339 kfree(fi->last_name);
340 fi->last_name = NULL;
f1f2765f 341 fi->next_offset = 2;
2817b000
SW
342 } else {
343 rinfo = &req->r_reply_info;
344 err = note_last_dentry(fi,
345 rinfo->dir_dname[rinfo->dir_nr-1],
346 rinfo->dir_dname_len[rinfo->dir_nr-1]);
347 if (err)
348 return err;
349 fi->next_offset += rinfo->dir_nr;
350 }
351 }
352
353 rinfo = &fi->last_readdir->r_reply_info;
354 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
355 rinfo->dir_nr, off, fi->offset);
356 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
357 u64 pos = ceph_make_fpos(frag, off);
358 struct ceph_mds_reply_inode *in =
359 rinfo->dir_in[off - fi->offset].in;
360 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
361 off, off - fi->offset, rinfo->dir_nr, pos,
362 rinfo->dir_dname_len[off - fi->offset],
363 rinfo->dir_dname[off - fi->offset], in);
364 BUG_ON(!in);
365 ftype = le32_to_cpu(in->mode) >> 12;
366 if (filldir(dirent,
367 rinfo->dir_dname[off - fi->offset],
368 rinfo->dir_dname_len[off - fi->offset],
369 pos,
370 le64_to_cpu(in->ino),
371 ftype) < 0) {
372 dout("filldir stopping us...\n");
373 return 0;
374 }
375 off++;
376 filp->f_pos = pos + 1;
377 }
378
379 if (fi->last_name) {
380 ceph_mdsc_put_request(fi->last_readdir);
381 fi->last_readdir = NULL;
382 goto more;
383 }
384
385 /* more frags? */
386 if (!ceph_frag_is_rightmost(frag)) {
387 frag = ceph_frag_next(frag);
388 off = 0;
389 filp->f_pos = ceph_make_fpos(frag, off);
390 dout("readdir next frag is %x\n", frag);
391 goto more;
392 }
393 fi->at_end = 1;
394
395 /*
396 * if dir_release_count still matches the dir, no dentries
397 * were released during the whole readdir, and we should have
398 * the complete dir contents in our cache.
399 */
400 spin_lock(&inode->i_lock);
401 if (ci->i_release_count == fi->dir_release_count) {
402 dout(" marking %p complete\n", inode);
403 ci->i_ceph_flags |= CEPH_I_COMPLETE;
404 ci->i_max_offset = filp->f_pos;
405 }
406 spin_unlock(&inode->i_lock);
407
408 dout("readdir %p filp %p done.\n", inode, filp);
409 return 0;
410}
411
412static void reset_readdir(struct ceph_file_info *fi)
413{
414 if (fi->last_readdir) {
415 ceph_mdsc_put_request(fi->last_readdir);
416 fi->last_readdir = NULL;
417 }
418 kfree(fi->last_name);
419 fi->next_offset = 2; /* compensate for . and .. */
420 if (fi->dentry) {
421 dput(fi->dentry);
422 fi->dentry = NULL;
423 }
424 fi->at_end = 0;
425}
426
427static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
428{
429 struct ceph_file_info *fi = file->private_data;
430 struct inode *inode = file->f_mapping->host;
431 loff_t old_offset = offset;
432 loff_t retval;
433
434 mutex_lock(&inode->i_mutex);
435 switch (origin) {
436 case SEEK_END:
437 offset += inode->i_size + 2; /* FIXME */
438 break;
439 case SEEK_CUR:
440 offset += file->f_pos;
441 }
442 retval = -EINVAL;
443 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
444 if (offset != file->f_pos) {
445 file->f_pos = offset;
446 file->f_version = 0;
447 fi->at_end = 0;
448 }
449 retval = offset;
450
451 /*
452 * discard buffered readdir content on seekdir(0), or
453 * seek to new frag, or seek prior to current chunk.
454 */
455 if (offset == 0 ||
456 fpos_frag(offset) != fpos_frag(old_offset) ||
457 fpos_off(offset) < fi->offset) {
458 dout("dir_llseek dropping %p content\n", file);
459 reset_readdir(fi);
460 }
461
462 /* bump dir_release_count if we did a forward seek */
463 if (offset > old_offset)
464 fi->dir_release_count--;
465 }
466 mutex_unlock(&inode->i_mutex);
467 return retval;
468}
469
470/*
471 * Process result of a lookup/open request.
472 *
473 * Mainly, make sure we return the final req->r_dentry (if it already
474 * existed) in place of the original VFS-provided dentry when they
475 * differ.
476 *
477 * Gracefully handle the case where the MDS replies with -ENOENT and
478 * no trace (which it may do, at its discretion, e.g., if it doesn't
479 * care to issue a lease on the negative dentry).
480 */
481struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
482 struct dentry *dentry, int err)
483{
640ef79d 484 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
2817b000
SW
485 struct inode *parent = dentry->d_parent->d_inode;
486
487 /* .snap dir? */
488 if (err == -ENOENT &&
489 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
6b805185
SW
490 strcmp(dentry->d_name.name,
491 client->mount_args->snapdir_name) == 0) {
2817b000
SW
492 struct inode *inode = ceph_get_snapdir(parent);
493 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
494 dentry, dentry->d_name.len, dentry->d_name.name, inode);
9358c6d4 495 BUG_ON(!d_unhashed(dentry));
2817b000
SW
496 d_add(dentry, inode);
497 err = 0;
498 }
499
500 if (err == -ENOENT) {
501 /* no trace? */
502 err = 0;
503 if (!req->r_reply_info.head->is_dentry) {
504 dout("ENOENT and no trace, dentry %p inode %p\n",
505 dentry, dentry->d_inode);
506 if (dentry->d_inode) {
507 d_drop(dentry);
508 err = -ENOENT;
509 } else {
510 d_add(dentry, NULL);
511 }
512 }
513 }
514 if (err)
515 dentry = ERR_PTR(err);
516 else if (dentry != req->r_dentry)
517 dentry = dget(req->r_dentry); /* we got spliced */
518 else
519 dentry = NULL;
520 return dentry;
521}
522
1d1de916
SW
523static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
524{
525 return ceph_ino(inode) == CEPH_INO_ROOT &&
526 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
527}
528
2817b000
SW
529/*
530 * Look up a single dir entry. If there is a lookup intent, inform
531 * the MDS so that it gets our 'caps wanted' value in a single op.
532 */
533static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
534 struct nameidata *nd)
535{
536 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
537 struct ceph_mds_client *mdsc = &client->mdsc;
538 struct ceph_mds_request *req;
539 int op;
540 int err;
541
542 dout("lookup %p dentry %p '%.*s'\n",
543 dir, dentry, dentry->d_name.len, dentry->d_name.name);
544
545 if (dentry->d_name.len > NAME_MAX)
546 return ERR_PTR(-ENAMETOOLONG);
547
548 err = ceph_init_dentry(dentry);
549 if (err < 0)
550 return ERR_PTR(err);
551
552 /* open (but not create!) intent? */
553 if (nd &&
554 (nd->flags & LOOKUP_OPEN) &&
555 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
556 !(nd->intent.open.flags & O_CREAT)) {
557 int mode = nd->intent.open.create_mode & ~current->fs->umask;
558 return ceph_lookup_open(dir, dentry, nd, mode, 1);
559 }
560
561 /* can we conclude ENOENT locally? */
562 if (dentry->d_inode == NULL) {
563 struct ceph_inode_info *ci = ceph_inode(dir);
564 struct ceph_dentry_info *di = ceph_dentry(dentry);
565
566 spin_lock(&dir->i_lock);
567 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
568 if (strncmp(dentry->d_name.name,
6b805185 569 client->mount_args->snapdir_name,
2817b000 570 dentry->d_name.len) &&
1d1de916 571 !is_root_ceph_dentry(dir, dentry) &&
2817b000
SW
572 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
573 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
574 di->offset = ci->i_max_offset++;
575 spin_unlock(&dir->i_lock);
576 dout(" dir %p complete, -ENOENT\n", dir);
577 d_add(dentry, NULL);
578 di->lease_shared_gen = ci->i_shared_gen;
579 return NULL;
580 }
581 spin_unlock(&dir->i_lock);
582 }
583
584 op = ceph_snap(dir) == CEPH_SNAPDIR ?
585 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
586 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
587 if (IS_ERR(req))
588 return ERR_PTR(PTR_ERR(req));
589 req->r_dentry = dget(dentry);
590 req->r_num_caps = 2;
591 /* we only need inode linkage */
592 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
593 req->r_locked_dir = dir;
594 err = ceph_mdsc_do_request(mdsc, NULL, req);
595 dentry = ceph_finish_lookup(req, dentry, err);
596 ceph_mdsc_put_request(req); /* will dput(dentry) */
597 dout("lookup result=%p\n", dentry);
598 return dentry;
599}
600
601/*
602 * If we do a create but get no trace back from the MDS, follow up with
603 * a lookup (the VFS expects us to link up the provided dentry).
604 */
605int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
606{
607 struct dentry *result = ceph_lookup(dir, dentry, NULL);
608
609 if (result && !IS_ERR(result)) {
610 /*
611 * We created the item, then did a lookup, and found
612 * it was already linked to another inode we already
613 * had in our cache (and thus got spliced). Link our
614 * dentry to that inode, but don't hash it, just in
615 * case the VFS wants to dereference it.
616 */
617 BUG_ON(!result->d_inode);
618 d_instantiate(dentry, result->d_inode);
619 return 0;
620 }
621 return PTR_ERR(result);
622}
623
624static int ceph_mknod(struct inode *dir, struct dentry *dentry,
625 int mode, dev_t rdev)
626{
627 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
628 struct ceph_mds_client *mdsc = &client->mdsc;
629 struct ceph_mds_request *req;
630 int err;
631
632 if (ceph_snap(dir) != CEPH_NOSNAP)
633 return -EROFS;
634
635 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
636 dir, dentry, mode, rdev);
637 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
638 if (IS_ERR(req)) {
639 d_drop(dentry);
640 return PTR_ERR(req);
641 }
642 req->r_dentry = dget(dentry);
643 req->r_num_caps = 2;
644 req->r_locked_dir = dir;
645 req->r_args.mknod.mode = cpu_to_le32(mode);
646 req->r_args.mknod.rdev = cpu_to_le32(rdev);
647 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
648 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
649 err = ceph_mdsc_do_request(mdsc, dir, req);
650 if (!err && !req->r_reply_info.head->is_dentry)
651 err = ceph_handle_notrace_create(dir, dentry);
652 ceph_mdsc_put_request(req);
653 if (err)
654 d_drop(dentry);
655 return err;
656}
657
658static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
659 struct nameidata *nd)
660{
661 dout("create in dir %p dentry %p name '%.*s'\n",
662 dir, dentry, dentry->d_name.len, dentry->d_name.name);
663
664 if (ceph_snap(dir) != CEPH_NOSNAP)
665 return -EROFS;
666
667 if (nd) {
668 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
669 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
670 /* hrm, what should i do here if we get aliased? */
671 if (IS_ERR(dentry))
672 return PTR_ERR(dentry);
673 return 0;
674 }
675
676 /* fall back to mknod */
677 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
678}
679
680static int ceph_symlink(struct inode *dir, struct dentry *dentry,
681 const char *dest)
682{
683 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
684 struct ceph_mds_client *mdsc = &client->mdsc;
685 struct ceph_mds_request *req;
686 int err;
687
688 if (ceph_snap(dir) != CEPH_NOSNAP)
689 return -EROFS;
690
691 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
692 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
693 if (IS_ERR(req)) {
694 d_drop(dentry);
695 return PTR_ERR(req);
696 }
697 req->r_dentry = dget(dentry);
698 req->r_num_caps = 2;
699 req->r_path2 = kstrdup(dest, GFP_NOFS);
700 req->r_locked_dir = dir;
701 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
702 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
703 err = ceph_mdsc_do_request(mdsc, dir, req);
704 if (!err && !req->r_reply_info.head->is_dentry)
705 err = ceph_handle_notrace_create(dir, dentry);
706 ceph_mdsc_put_request(req);
707 if (err)
708 d_drop(dentry);
709 return err;
710}
711
712static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
713{
714 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
715 struct ceph_mds_client *mdsc = &client->mdsc;
716 struct ceph_mds_request *req;
717 int err = -EROFS;
718 int op;
719
720 if (ceph_snap(dir) == CEPH_SNAPDIR) {
721 /* mkdir .snap/foo is a MKSNAP */
722 op = CEPH_MDS_OP_MKSNAP;
723 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
724 dentry->d_name.len, dentry->d_name.name, dentry);
725 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
726 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
727 op = CEPH_MDS_OP_MKDIR;
728 } else {
729 goto out;
730 }
731 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
732 if (IS_ERR(req)) {
733 err = PTR_ERR(req);
734 goto out;
735 }
736
737 req->r_dentry = dget(dentry);
738 req->r_num_caps = 2;
739 req->r_locked_dir = dir;
740 req->r_args.mkdir.mode = cpu_to_le32(mode);
741 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
742 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
743 err = ceph_mdsc_do_request(mdsc, dir, req);
744 if (!err && !req->r_reply_info.head->is_dentry)
745 err = ceph_handle_notrace_create(dir, dentry);
746 ceph_mdsc_put_request(req);
747out:
748 if (err < 0)
749 d_drop(dentry);
750 return err;
751}
752
753static int ceph_link(struct dentry *old_dentry, struct inode *dir,
754 struct dentry *dentry)
755{
756 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
757 struct ceph_mds_client *mdsc = &client->mdsc;
758 struct ceph_mds_request *req;
759 int err;
760
761 if (ceph_snap(dir) != CEPH_NOSNAP)
762 return -EROFS;
763
764 dout("link in dir %p old_dentry %p dentry %p\n", dir,
765 old_dentry, dentry);
766 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
767 if (IS_ERR(req)) {
768 d_drop(dentry);
769 return PTR_ERR(req);
770 }
771 req->r_dentry = dget(dentry);
772 req->r_num_caps = 2;
773 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
774 req->r_locked_dir = dir;
775 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
776 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
777 err = ceph_mdsc_do_request(mdsc, dir, req);
778 if (err)
779 d_drop(dentry);
780 else if (!req->r_reply_info.head->is_dentry)
781 d_instantiate(dentry, igrab(old_dentry->d_inode));
782 ceph_mdsc_put_request(req);
783 return err;
784}
785
786/*
787 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
788 * looks like the link count will hit 0, drop any other caps (other
789 * than PIN) we don't specifically want (due to the file still being
790 * open).
791 */
792static int drop_caps_for_unlink(struct inode *inode)
793{
794 struct ceph_inode_info *ci = ceph_inode(inode);
795 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
796
797 spin_lock(&inode->i_lock);
798 if (inode->i_nlink == 1) {
799 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
800 ci->i_ceph_flags |= CEPH_I_NODELAY;
801 }
802 spin_unlock(&inode->i_lock);
803 return drop;
804}
805
806/*
807 * rmdir and unlink are differ only by the metadata op code
808 */
809static int ceph_unlink(struct inode *dir, struct dentry *dentry)
810{
811 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
812 struct ceph_mds_client *mdsc = &client->mdsc;
813 struct inode *inode = dentry->d_inode;
814 struct ceph_mds_request *req;
815 int err = -EROFS;
816 int op;
817
818 if (ceph_snap(dir) == CEPH_SNAPDIR) {
819 /* rmdir .snap/foo is RMSNAP */
820 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
821 dentry->d_name.name, dentry);
822 op = CEPH_MDS_OP_RMSNAP;
823 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
824 dout("unlink/rmdir dir %p dn %p inode %p\n",
825 dir, dentry, inode);
826 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
827 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
828 } else
829 goto out;
830 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
831 if (IS_ERR(req)) {
832 err = PTR_ERR(req);
833 goto out;
834 }
835 req->r_dentry = dget(dentry);
836 req->r_num_caps = 2;
837 req->r_locked_dir = dir;
838 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
839 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
840 req->r_inode_drop = drop_caps_for_unlink(inode);
841 err = ceph_mdsc_do_request(mdsc, dir, req);
842 if (!err && !req->r_reply_info.head->is_dentry)
843 d_delete(dentry);
844 ceph_mdsc_put_request(req);
845out:
846 return err;
847}
848
849static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
850 struct inode *new_dir, struct dentry *new_dentry)
851{
852 struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb);
853 struct ceph_mds_client *mdsc = &client->mdsc;
854 struct ceph_mds_request *req;
855 int err;
856
857 if (ceph_snap(old_dir) != ceph_snap(new_dir))
858 return -EXDEV;
859 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
860 ceph_snap(new_dir) != CEPH_NOSNAP)
861 return -EROFS;
862 dout("rename dir %p dentry %p to dir %p dentry %p\n",
863 old_dir, old_dentry, new_dir, new_dentry);
864 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
865 if (IS_ERR(req))
866 return PTR_ERR(req);
867 req->r_dentry = dget(new_dentry);
868 req->r_num_caps = 2;
869 req->r_old_dentry = dget(old_dentry);
870 req->r_locked_dir = new_dir;
871 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
872 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
873 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
874 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
875 /* release LINK_RDCACHE on source inode (mds will lock it) */
876 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
877 if (new_dentry->d_inode)
878 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
879 err = ceph_mdsc_do_request(mdsc, old_dir, req);
880 if (!err && !req->r_reply_info.head->is_dentry) {
881 /*
882 * Normally d_move() is done by fill_trace (called by
883 * do_request, above). If there is no trace, we need
884 * to do it here.
885 */
ea1409f9
SW
886
887 /* d_move screws up d_subdirs order */
888 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
889
2817b000 890 d_move(old_dentry, new_dentry);
ea1409f9
SW
891
892 /* ensure target dentry is invalidated, despite
893 rehashing bug in vfs_rename_dir */
81a6cf2d 894 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
895 }
896 ceph_mdsc_put_request(req);
897 return err;
898}
899
81a6cf2d
SW
900/*
901 * Ensure a dentry lease will no longer revalidate.
902 */
903void ceph_invalidate_dentry_lease(struct dentry *dentry)
904{
905 spin_lock(&dentry->d_lock);
906 dentry->d_time = jiffies;
907 ceph_dentry(dentry)->lease_shared_gen = 0;
908 spin_unlock(&dentry->d_lock);
909}
2817b000
SW
910
911/*
912 * Check if dentry lease is valid. If not, delete the lease. Try to
913 * renew if the least is more than half up.
914 */
915static int dentry_lease_is_valid(struct dentry *dentry)
916{
917 struct ceph_dentry_info *di;
918 struct ceph_mds_session *s;
919 int valid = 0;
920 u32 gen;
921 unsigned long ttl;
922 struct ceph_mds_session *session = NULL;
923 struct inode *dir = NULL;
924 u32 seq = 0;
925
926 spin_lock(&dentry->d_lock);
927 di = ceph_dentry(dentry);
928 if (di && di->lease_session) {
929 s = di->lease_session;
930 spin_lock(&s->s_cap_lock);
931 gen = s->s_cap_gen;
932 ttl = s->s_cap_ttl;
933 spin_unlock(&s->s_cap_lock);
934
935 if (di->lease_gen == gen &&
936 time_before(jiffies, dentry->d_time) &&
937 time_before(jiffies, ttl)) {
938 valid = 1;
939 if (di->lease_renew_after &&
940 time_after(jiffies, di->lease_renew_after)) {
941 /* we should renew */
942 dir = dentry->d_parent->d_inode;
943 session = ceph_get_mds_session(s);
944 seq = di->lease_seq;
945 di->lease_renew_after = 0;
946 di->lease_renew_from = jiffies;
947 }
2817b000
SW
948 }
949 }
950 spin_unlock(&dentry->d_lock);
951
952 if (session) {
953 ceph_mdsc_lease_send_msg(session, dir, dentry,
954 CEPH_MDS_LEASE_RENEW, seq);
955 ceph_put_mds_session(session);
956 }
957 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
958 return valid;
959}
960
961/*
962 * Check if directory-wide content lease/cap is valid.
963 */
964static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
965{
966 struct ceph_inode_info *ci = ceph_inode(dir);
967 struct ceph_dentry_info *di = ceph_dentry(dentry);
968 int valid = 0;
969
970 spin_lock(&dir->i_lock);
971 if (ci->i_shared_gen == di->lease_shared_gen)
972 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
973 spin_unlock(&dir->i_lock);
974 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
975 dir, (unsigned)ci->i_shared_gen, dentry,
976 (unsigned)di->lease_shared_gen, valid);
977 return valid;
978}
979
980/*
981 * Check if cached dentry can be trusted.
982 */
983static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
984{
985 struct inode *dir = dentry->d_parent->d_inode;
986
987 dout("d_revalidate %p '%.*s' inode %p\n", dentry,
988 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
989
990 /* always trust cached snapped dentries, snapdir dentry */
991 if (ceph_snap(dir) != CEPH_NOSNAP) {
992 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
993 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
994 goto out_touch;
995 }
996 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
997 goto out_touch;
998
999 if (dentry_lease_is_valid(dentry) ||
1000 dir_lease_is_valid(dir, dentry))
1001 goto out_touch;
1002
1003 dout("d_revalidate %p invalid\n", dentry);
1004 d_drop(dentry);
1005 return 0;
1006out_touch:
1007 ceph_dentry_lru_touch(dentry);
1008 return 1;
1009}
1010
1011/*
1012 * When a dentry is released, clear the dir I_COMPLETE if it was part
1013 * of the current dir gen.
1014 */
1015static void ceph_dentry_release(struct dentry *dentry)
1016{
1017 struct ceph_dentry_info *di = ceph_dentry(dentry);
1018 struct inode *parent_inode = dentry->d_parent->d_inode;
1019
1020 if (parent_inode) {
1021 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1022
1023 spin_lock(&parent_inode->i_lock);
1024 if (ci->i_shared_gen == di->lease_shared_gen) {
1025 dout(" clearing %p complete (d_release)\n",
1026 parent_inode);
1027 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1028 ci->i_release_count++;
1029 }
1030 spin_unlock(&parent_inode->i_lock);
1031 }
1032 if (di) {
1033 ceph_dentry_lru_del(dentry);
1034 if (di->lease_session)
1035 ceph_put_mds_session(di->lease_session);
1036 kmem_cache_free(ceph_dentry_cachep, di);
1037 dentry->d_fsdata = NULL;
1038 }
1039}
1040
1041static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1042 struct nameidata *nd)
1043{
1044 /*
1045 * Eventually, we'll want to revalidate snapped metadata
1046 * too... probably...
1047 */
1048 return 1;
1049}
1050
1051
1052
1053/*
1054 * read() on a dir. This weird interface hack only works if mounted
1055 * with '-o dirstat'.
1056 */
1057static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1058 loff_t *ppos)
1059{
1060 struct ceph_file_info *cf = file->private_data;
1061 struct inode *inode = file->f_dentry->d_inode;
1062 struct ceph_inode_info *ci = ceph_inode(inode);
1063 int left;
1064
640ef79d 1065 if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1066 return -EISDIR;
1067
1068 if (!cf->dir_info) {
1069 cf->dir_info = kmalloc(1024, GFP_NOFS);
1070 if (!cf->dir_info)
1071 return -ENOMEM;
1072 cf->dir_info_len =
1073 sprintf(cf->dir_info,
1074 "entries: %20lld\n"
1075 " files: %20lld\n"
1076 " subdirs: %20lld\n"
1077 "rentries: %20lld\n"
1078 " rfiles: %20lld\n"
1079 " rsubdirs: %20lld\n"
1080 "rbytes: %20lld\n"
1081 "rctime: %10ld.%09ld\n",
1082 ci->i_files + ci->i_subdirs,
1083 ci->i_files,
1084 ci->i_subdirs,
1085 ci->i_rfiles + ci->i_rsubdirs,
1086 ci->i_rfiles,
1087 ci->i_rsubdirs,
1088 ci->i_rbytes,
1089 (long)ci->i_rctime.tv_sec,
1090 (long)ci->i_rctime.tv_nsec);
1091 }
1092
1093 if (*ppos >= cf->dir_info_len)
1094 return 0;
1095 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1096 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1097 if (left == size)
1098 return -EFAULT;
1099 *ppos += (size - left);
1100 return size - left;
1101}
1102
1103/*
1104 * an fsync() on a dir will wait for any uncommitted directory
1105 * operations to commit.
1106 */
1107static int ceph_dir_fsync(struct file *file, struct dentry *dentry,
1108 int datasync)
1109{
1110 struct inode *inode = dentry->d_inode;
1111 struct ceph_inode_info *ci = ceph_inode(inode);
1112 struct list_head *head = &ci->i_unsafe_dirops;
1113 struct ceph_mds_request *req;
1114 u64 last_tid;
1115 int ret = 0;
1116
1117 dout("dir_fsync %p\n", inode);
1118 spin_lock(&ci->i_unsafe_lock);
1119 if (list_empty(head))
1120 goto out;
1121
1122 req = list_entry(head->prev,
1123 struct ceph_mds_request, r_unsafe_dir_item);
1124 last_tid = req->r_tid;
1125
1126 do {
1127 ceph_mdsc_get_request(req);
1128 spin_unlock(&ci->i_unsafe_lock);
1129 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1130 inode, req->r_tid, last_tid);
1131 if (req->r_timeout) {
1132 ret = wait_for_completion_timeout(
1133 &req->r_safe_completion, req->r_timeout);
1134 if (ret > 0)
1135 ret = 0;
1136 else if (ret == 0)
1137 ret = -EIO; /* timed out */
1138 } else {
1139 wait_for_completion(&req->r_safe_completion);
1140 }
1141 spin_lock(&ci->i_unsafe_lock);
1142 ceph_mdsc_put_request(req);
1143
1144 if (ret || list_empty(head))
1145 break;
1146 req = list_entry(head->next,
1147 struct ceph_mds_request, r_unsafe_dir_item);
1148 } while (req->r_tid < last_tid);
1149out:
1150 spin_unlock(&ci->i_unsafe_lock);
1151 return ret;
1152}
1153
1154/*
1155 * We maintain a private dentry LRU.
1156 *
1157 * FIXME: this needs to be changed to a per-mds lru to be useful.
1158 */
1159void ceph_dentry_lru_add(struct dentry *dn)
1160{
1161 struct ceph_dentry_info *di = ceph_dentry(dn);
1162 struct ceph_mds_client *mdsc;
2817b000 1163
04a419f9
SW
1164 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1165 dn->d_name.len, dn->d_name.name);
2817b000 1166 if (di) {
640ef79d 1167 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1168 spin_lock(&mdsc->dentry_lru_lock);
1169 list_add_tail(&di->lru, &mdsc->dentry_lru);
1170 mdsc->num_dentry++;
1171 spin_unlock(&mdsc->dentry_lru_lock);
1172 }
1173}
1174
1175void ceph_dentry_lru_touch(struct dentry *dn)
1176{
1177 struct ceph_dentry_info *di = ceph_dentry(dn);
1178 struct ceph_mds_client *mdsc;
2817b000 1179
04a419f9
SW
1180 dout("dentry_lru_touch %p %p '%.*s'\n", di, dn,
1181 dn->d_name.len, dn->d_name.name);
2817b000 1182 if (di) {
640ef79d 1183 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1184 spin_lock(&mdsc->dentry_lru_lock);
1185 list_move_tail(&di->lru, &mdsc->dentry_lru);
1186 spin_unlock(&mdsc->dentry_lru_lock);
1187 }
1188}
1189
1190void ceph_dentry_lru_del(struct dentry *dn)
1191{
1192 struct ceph_dentry_info *di = ceph_dentry(dn);
1193 struct ceph_mds_client *mdsc;
1194
04a419f9
SW
1195 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1196 dn->d_name.len, dn->d_name.name);
2817b000 1197 if (di) {
640ef79d 1198 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1199 spin_lock(&mdsc->dentry_lru_lock);
1200 list_del_init(&di->lru);
1201 mdsc->num_dentry--;
1202 spin_unlock(&mdsc->dentry_lru_lock);
1203 }
1204}
1205
1206const struct file_operations ceph_dir_fops = {
1207 .read = ceph_read_dir,
1208 .readdir = ceph_readdir,
1209 .llseek = ceph_dir_llseek,
1210 .open = ceph_open,
1211 .release = ceph_release,
1212 .unlocked_ioctl = ceph_ioctl,
1213 .fsync = ceph_dir_fsync,
1214};
1215
1216const struct inode_operations ceph_dir_iops = {
1217 .lookup = ceph_lookup,
1218 .permission = ceph_permission,
1219 .getattr = ceph_getattr,
1220 .setattr = ceph_setattr,
1221 .setxattr = ceph_setxattr,
1222 .getxattr = ceph_getxattr,
1223 .listxattr = ceph_listxattr,
1224 .removexattr = ceph_removexattr,
1225 .mknod = ceph_mknod,
1226 .symlink = ceph_symlink,
1227 .mkdir = ceph_mkdir,
1228 .link = ceph_link,
1229 .unlink = ceph_unlink,
1230 .rmdir = ceph_unlink,
1231 .rename = ceph_rename,
1232 .create = ceph_create,
1233};
1234
1235struct dentry_operations ceph_dentry_ops = {
1236 .d_revalidate = ceph_d_revalidate,
1237 .d_release = ceph_dentry_release,
1238};
1239
1240struct dentry_operations ceph_snapdir_dentry_ops = {
1241 .d_revalidate = ceph_snapdir_d_revalidate,
1242};
1243
1244struct dentry_operations ceph_snap_dentry_ops = {
1245};